From aa0e2a2e70a3519a9dcb9b1da000a13c01aa6cc1 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Thu, 29 Nov 2018 16:28:43 +0100 Subject: [PATCH 001/634] Make constant primops lazy --- src/libexpr/eval.cc | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/src/libexpr/eval.cc b/src/libexpr/eval.cc index 2a194d0e0..3891e8666 100644 --- a/src/libexpr/eval.cc +++ b/src/libexpr/eval.cc @@ -439,14 +439,21 @@ Value * EvalState::addConstant(const string & name, Value & v) Value * EvalState::addPrimOp(const string & name, size_t arity, PrimOpFun primOp) { + auto name2 = string(name, 0, 2) == "__" ? string(name, 2) : name; + Symbol sym = symbols.create(name2); + + /* Hack to make constants lazy: turn them into a application of + the primop to a dummy value. */ if (arity == 0) { + auto vPrimOp = allocValue(); + vPrimOp->type = tPrimOp; + vPrimOp->primOp = new PrimOp(primOp, 1, sym); Value v; - primOp(*this, noPos, nullptr, v); + mkApp(v, *vPrimOp, *vPrimOp); return addConstant(name, v); } + Value * v = allocValue(); - string name2 = string(name, 0, 2) == "__" ? string(name, 2) : name; - Symbol sym = symbols.create(name2); v->type = tPrimOp; v->primOp = new PrimOp(primOp, arity, sym); staticBaseEnv.vars[symbols.create(name)] = baseEnvDispl; From 15a16e5c05d547ec07170df2392263e5e891447b Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Thu, 22 Nov 2018 15:59:52 +0100 Subject: [PATCH 002/634] MultiCommand: Simplify construction --- src/nix/command.cc | 8 +++++--- src/nix/command.hh | 8 ++++---- src/nix/main.cc | 7 ++++++- 3 files changed, 15 insertions(+), 8 deletions(-) diff --git a/src/nix/command.cc b/src/nix/command.cc index 3d7d582d6..e760c17d5 100644 --- a/src/nix/command.cc +++ b/src/nix/command.cc @@ -4,7 +4,7 @@ namespace nix { -Commands * RegisterCommand::commands = 0; +std::vector> * RegisterCommand::commands = 0; void Command::printHelp(const string & programName, std::ostream & out) { @@ -21,9 +21,11 @@ void Command::printHelp(const string & programName, std::ostream & out) } } -MultiCommand::MultiCommand(const Commands & _commands) - : commands(_commands) +MultiCommand::MultiCommand(const std::vector> & _commands) { + for (auto & command : _commands) + commands.emplace(command->name(), command); + expectedArgs.push_back(ExpectedArg{"command", 1, true, [=](std::vector ss) { assert(!command); auto i = commands.find(ss[0]); diff --git a/src/nix/command.hh b/src/nix/command.hh index 97a6fee7f..2108aa674 100644 --- a/src/nix/command.hh +++ b/src/nix/command.hh @@ -173,7 +173,7 @@ public: std::shared_ptr command; - MultiCommand(const Commands & commands); + MultiCommand(const std::vector> & commands); void printHelp(const string & programName, std::ostream & out) override; @@ -185,12 +185,12 @@ public: /* A helper class for registering commands globally. */ struct RegisterCommand { - static Commands * commands; + static std::vector> * commands; RegisterCommand(ref command) { - if (!commands) commands = new Commands; - commands->emplace(command->name(), command); + if (!commands) commands = new std::vector>; + commands->push_back(command); } }; diff --git a/src/nix/main.cc b/src/nix/main.cc index 64c1dc357..3d4348d28 100644 --- a/src/nix/main.cc +++ b/src/nix/main.cc @@ -57,10 +57,15 @@ struct NixArgs : virtual MultiCommand, virtual MixCommonArgs "--help-config' for a list of configuration settings.\n"; } + void printHelp(const string & programName, std::ostream & out) + { + MultiCommand::printHelp(programName, out); + std::cout << "\nNote: this program is EXPERIMENTAL and subject to change.\n"; + } + void showHelpAndExit() { printHelp(programName, std::cout); - std::cout << "\nNote: this program is EXPERIMENTAL and subject to change.\n"; throw Exit(); } }; From f70434b1fbbdb0e188718f0c55a8156a7aa08744 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Thu, 22 Nov 2018 16:03:31 +0100 Subject: [PATCH 003/634] Move Command and MultiCommand to libutil --- src/libutil/args.cc | 69 ++++++++++++++++++++++++++++++++++++++++++ src/libutil/args.hh | 41 +++++++++++++++++++++++++ src/nix/command.cc | 74 --------------------------------------------- src/nix/command.hh | 41 ------------------------- src/nix/main.cc | 5 +++ 5 files changed, 115 insertions(+), 115 deletions(-) diff --git a/src/libutil/args.cc b/src/libutil/args.cc index 7af2a1bf7..2837dacc9 100644 --- a/src/libutil/args.cc +++ b/src/libutil/args.cc @@ -200,4 +200,73 @@ void printTable(std::ostream & out, const Table2 & table) } } +void Command::printHelp(const string & programName, std::ostream & out) +{ + Args::printHelp(programName, out); + + auto exs = examples(); + if (!exs.empty()) { + out << "\n"; + out << "Examples:\n"; + for (auto & ex : exs) + out << "\n" + << " " << ex.description << "\n" // FIXME: wrap + << " $ " << ex.command << "\n"; + } +} + +MultiCommand::MultiCommand(const std::vector> & _commands) +{ + for (auto & command : _commands) + commands.emplace(command->name(), command); + + expectedArgs.push_back(ExpectedArg{"command", 1, true, [=](std::vector ss) { + assert(!command); + auto i = commands.find(ss[0]); + if (i == commands.end()) + throw UsageError("'%s' is not a recognised command", ss[0]); + command = i->second; + }}); +} + +void MultiCommand::printHelp(const string & programName, std::ostream & out) +{ + if (command) { + command->printHelp(programName + " " + command->name(), out); + return; + } + + out << "Usage: " << programName << " ... ...\n"; + + out << "\n"; + out << "Common flags:\n"; + printFlags(out); + + out << "\n"; + out << "Available commands:\n"; + + Table2 table; + for (auto & command : commands) { + auto descr = command.second->description(); + if (!descr.empty()) + table.push_back(std::make_pair(command.second->name(), descr)); + } + printTable(out, table); +} + +bool MultiCommand::processFlag(Strings::iterator & pos, Strings::iterator end) +{ + if (Args::processFlag(pos, end)) return true; + if (command && command->processFlag(pos, end)) return true; + return false; +} + +bool MultiCommand::processArgs(const Strings & args, bool finish) +{ + if (command) + return command->processArgs(args, finish); + else + return Args::processArgs(args, finish); +} + } diff --git a/src/libutil/args.hh b/src/libutil/args.hh index ad5fcca39..bf69bf4b6 100644 --- a/src/libutil/args.hh +++ b/src/libutil/args.hh @@ -188,6 +188,47 @@ public: friend class MultiCommand; }; +/* A command is an argument parser that can be executed by calling its + run() method. */ +struct Command : virtual Args +{ + virtual std::string name() = 0; + virtual void prepare() { }; + virtual void run() = 0; + + struct Example + { + std::string description; + std::string command; + }; + + typedef std::list Examples; + + virtual Examples examples() { return Examples(); } + + void printHelp(const string & programName, std::ostream & out) override; +}; + +typedef std::map> Commands; + +/* An argument parser that supports multiple subcommands, + i.e. ‘ ’. */ +class MultiCommand : virtual Args +{ +public: + Commands commands; + + std::shared_ptr command; + + MultiCommand(const std::vector> & commands); + + void printHelp(const string & programName, std::ostream & out) override; + + bool processFlag(Strings::iterator & pos, Strings::iterator end) override; + + bool processArgs(const Strings & args, bool finish) override; +}; + Strings argvToStrings(int argc, char * * argv); /* Helper function for rendering argument labels. */ diff --git a/src/nix/command.cc b/src/nix/command.cc index e760c17d5..5967ab36c 100644 --- a/src/nix/command.cc +++ b/src/nix/command.cc @@ -6,80 +6,6 @@ namespace nix { std::vector> * RegisterCommand::commands = 0; -void Command::printHelp(const string & programName, std::ostream & out) -{ - Args::printHelp(programName, out); - - auto exs = examples(); - if (!exs.empty()) { - out << "\n"; - out << "Examples:\n"; - for (auto & ex : exs) - out << "\n" - << " " << ex.description << "\n" // FIXME: wrap - << " $ " << ex.command << "\n"; - } -} - -MultiCommand::MultiCommand(const std::vector> & _commands) -{ - for (auto & command : _commands) - commands.emplace(command->name(), command); - - expectedArgs.push_back(ExpectedArg{"command", 1, true, [=](std::vector ss) { - assert(!command); - auto i = commands.find(ss[0]); - if (i == commands.end()) - throw UsageError("'%s' is not a recognised command", ss[0]); - command = i->second; - }}); -} - -void MultiCommand::printHelp(const string & programName, std::ostream & out) -{ - if (command) { - command->printHelp(programName + " " + command->name(), out); - return; - } - - out << "Usage: " << programName << " ... ...\n"; - - out << "\n"; - out << "Common flags:\n"; - printFlags(out); - - out << "\n"; - out << "Available commands:\n"; - - Table2 table; - for (auto & command : commands) { - auto descr = command.second->description(); - if (!descr.empty()) - table.push_back(std::make_pair(command.second->name(), descr)); - } - printTable(out, table); - -#if 0 - out << "\n"; - out << "For full documentation, run 'man " << programName << "' or 'man " << programName << "-'.\n"; -#endif -} - -bool MultiCommand::processFlag(Strings::iterator & pos, Strings::iterator end) -{ - if (Args::processFlag(pos, end)) return true; - if (command && command->processFlag(pos, end)) return true; - return false; -} - -bool MultiCommand::processArgs(const Strings & args, bool finish) -{ - if (command) - return command->processArgs(args, finish); - else - return Args::processArgs(args, finish); -} - StoreCommand::StoreCommand() { } diff --git a/src/nix/command.hh b/src/nix/command.hh index 2108aa674..04183c7ed 100644 --- a/src/nix/command.hh +++ b/src/nix/command.hh @@ -11,27 +11,6 @@ struct Value; class Bindings; class EvalState; -/* A command is an argument parser that can be executed by calling its - run() method. */ -struct Command : virtual Args -{ - virtual std::string name() = 0; - virtual void prepare() { }; - virtual void run() = 0; - - struct Example - { - std::string description; - std::string command; - }; - - typedef std::list Examples; - - virtual Examples examples() { return Examples(); } - - void printHelp(const string & programName, std::ostream & out) override; -}; - class Store; /* A command that require a Nix store. */ @@ -162,26 +141,6 @@ struct StorePathCommand : public InstallablesCommand void run(ref store) override; }; -typedef std::map> Commands; - -/* An argument parser that supports multiple subcommands, - i.e. ‘ ’. */ -class MultiCommand : virtual Args -{ -public: - Commands commands; - - std::shared_ptr command; - - MultiCommand(const std::vector> & commands); - - void printHelp(const string & programName, std::ostream & out) override; - - bool processFlag(Strings::iterator & pos, Strings::iterator end) override; - - bool processArgs(const Strings & args, bool finish) override; -}; - /* A helper class for registering commands globally. */ struct RegisterCommand { diff --git a/src/nix/main.cc b/src/nix/main.cc index 3d4348d28..4b909736d 100644 --- a/src/nix/main.cc +++ b/src/nix/main.cc @@ -60,6 +60,11 @@ struct NixArgs : virtual MultiCommand, virtual MixCommonArgs void printHelp(const string & programName, std::ostream & out) { MultiCommand::printHelp(programName, out); + +#if 0 + out << "\nFor full documentation, run 'man " << programName << "' or 'man " << programName << "-'.\n"; +#endif + std::cout << "\nNote: this program is EXPERIMENTAL and subject to change.\n"; } From c02da997570ac0d9b595d787bea8cb5a4e3cc1f5 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Mon, 26 Nov 2018 19:55:48 +0100 Subject: [PATCH 004/634] EvalState::allocAttr(): Add convenience method --- src/libexpr/attr-set.cc | 6 ++++++ src/libexpr/eval.hh | 1 + 2 files changed, 7 insertions(+) diff --git a/src/libexpr/attr-set.cc b/src/libexpr/attr-set.cc index 0785897d2..b1d61a285 100644 --- a/src/libexpr/attr-set.cc +++ b/src/libexpr/attr-set.cc @@ -43,6 +43,12 @@ Value * EvalState::allocAttr(Value & vAttrs, const Symbol & name) } +Value * EvalState::allocAttr(Value & vAttrs, const std::string & name) +{ + return allocAttr(vAttrs, symbols.create(name)); +} + + void Bindings::sort() { std::sort(begin(), end()); diff --git a/src/libexpr/eval.hh b/src/libexpr/eval.hh index d0f298e16..60cf0f87f 100644 --- a/src/libexpr/eval.hh +++ b/src/libexpr/eval.hh @@ -264,6 +264,7 @@ public: Env & allocEnv(size_t size); Value * allocAttr(Value & vAttrs, const Symbol & name); + Value * allocAttr(Value & vAttrs, const std::string & name); Bindings * allocBindings(size_t capacity); From f216c76c56cdffb5214d074a7d44812843dd174f Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Mon, 26 Nov 2018 19:57:20 +0100 Subject: [PATCH 005/634] Bindings::get(): Add convenience method This allows writing attribute lookups as if (auto name = value.attrs->get(state.sName)) ... --- configure.ac | 2 +- src/libexpr/attr-set.hh | 9 +++++++++ 2 files changed, 10 insertions(+), 1 deletion(-) diff --git a/configure.ac b/configure.ac index 5a2526672..b550231cb 100644 --- a/configure.ac +++ b/configure.ac @@ -62,7 +62,7 @@ CXXFLAGS= AC_PROG_CC AC_PROG_CXX AC_PROG_CPP -AX_CXX_COMPILE_STDCXX_14 +AX_CXX_COMPILE_STDCXX_17 # Use 64-bit file system calls so that we can support files > 2 GiB. diff --git a/src/libexpr/attr-set.hh b/src/libexpr/attr-set.hh index 3119a1848..6c5fb21ad 100644 --- a/src/libexpr/attr-set.hh +++ b/src/libexpr/attr-set.hh @@ -4,6 +4,7 @@ #include "symbol-table.hh" #include +#include namespace nix { @@ -63,6 +64,14 @@ public: return end(); } + std::optional get(const Symbol & name) + { + Attr key(name, 0); + iterator i = std::lower_bound(begin(), end(), key); + if (i != end() && i->name == name) return &*i; + return {}; + } + iterator begin() { return &attrs[0]; } iterator end() { return &attrs[size_]; } From 7a5cf31060289de61370643937277b5d0d5d178c Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Thu, 29 Nov 2018 19:18:36 +0100 Subject: [PATCH 006/634] Initial flake support --- corepkgs/default-installation-source.nix | 3 + corepkgs/local.mk | 8 +- src/libexpr/eval.cc | 1 + src/libexpr/eval.hh | 20 ++- src/libexpr/primops/fetchGit.cc | 9 +- src/libexpr/primops/fetchGit.hh | 23 ++++ src/libexpr/primops/flake.cc | 161 +++++++++++++++++++++++ src/nix/flake.cc | 65 +++++++++ src/nix/installables.cc | 39 +----- 9 files changed, 282 insertions(+), 47 deletions(-) create mode 100644 corepkgs/default-installation-source.nix create mode 100644 src/libexpr/primops/fetchGit.hh create mode 100644 src/libexpr/primops/flake.cc create mode 100644 src/nix/flake.cc diff --git a/corepkgs/default-installation-source.nix b/corepkgs/default-installation-source.nix new file mode 100644 index 000000000..71ba04452 --- /dev/null +++ b/corepkgs/default-installation-source.nix @@ -0,0 +1,3 @@ +builtins.mapAttrs (flakeName: flakeInfo: + (getFlake flakeInfo.uri).${flakeName}.provides.packages or {}) + builtins.flakeRegistry diff --git a/corepkgs/local.mk b/corepkgs/local.mk index 362c8eb61..41aaec63b 100644 --- a/corepkgs/local.mk +++ b/corepkgs/local.mk @@ -1,4 +1,10 @@ -corepkgs_FILES = buildenv.nix unpack-channel.nix derivation.nix fetchurl.nix imported-drv-to-derivation.nix +corepkgs_FILES = \ + buildenv.nix \ + unpack-channel.nix \ + derivation.nix \ + fetchurl.nix \ + imported-drv-to-derivation.nix \ + default-installation-source.nix $(foreach file,config.nix $(corepkgs_FILES),$(eval $(call install-data-in,$(d)/$(file),$(datadir)/nix/corepkgs))) diff --git a/src/libexpr/eval.cc b/src/libexpr/eval.cc index 3891e8666..e3a264277 100644 --- a/src/libexpr/eval.cc +++ b/src/libexpr/eval.cc @@ -290,6 +290,7 @@ EvalState::EvalState(const Strings & _searchPath, ref store) , sOutputHash(symbols.create("outputHash")) , sOutputHashAlgo(symbols.create("outputHashAlgo")) , sOutputHashMode(symbols.create("outputHashMode")) + , sDescription(symbols.create("description")) , repair(NoRepair) , store(store) , baseEnv(allocEnv(128)) diff --git a/src/libexpr/eval.hh b/src/libexpr/eval.hh index 60cf0f87f..674b08f45 100644 --- a/src/libexpr/eval.hh +++ b/src/libexpr/eval.hh @@ -72,7 +72,8 @@ public: sSystem, sOverrides, sOutputs, sOutputName, sIgnoreNulls, sFile, sLine, sColumn, sFunctor, sToString, sRight, sWrong, sStructuredAttrs, sBuilder, sArgs, - sOutputHash, sOutputHashAlgo, sOutputHashMode; + sOutputHash, sOutputHashAlgo, sOutputHashMode, + sDescription; Symbol sDerivationNix; /* If set, force copying files to the Nix store even if they @@ -311,6 +312,23 @@ private: friend struct ExprOpConcatLists; friend struct ExprSelect; friend void prim_getAttr(EvalState & state, const Pos & pos, Value * * args, Value & v); + +public: + + struct FlakeRegistry + { + struct Entry + { + std::string uri; + }; + std::map entries; + }; + + const FlakeRegistry & getFlakeRegistry(); + +private: + std::unique_ptr _flakeRegistry; + std::once_flag _flakeRegistryInit; }; diff --git a/src/libexpr/primops/fetchGit.cc b/src/libexpr/primops/fetchGit.cc index b46d2f258..6b6ca08d1 100644 --- a/src/libexpr/primops/fetchGit.cc +++ b/src/libexpr/primops/fetchGit.cc @@ -1,3 +1,4 @@ +#include "fetchGit.hh" #include "primops.hh" #include "eval-inline.hh" #include "download.hh" @@ -15,14 +16,6 @@ using namespace std::string_literals; namespace nix { -struct GitInfo -{ - Path storePath; - std::string rev; - std::string shortRev; - uint64_t revCount = 0; -}; - std::regex revRegex("^[0-9a-fA-F]{40}$"); GitInfo exportGit(ref store, const std::string & uri, diff --git a/src/libexpr/primops/fetchGit.hh b/src/libexpr/primops/fetchGit.hh new file mode 100644 index 000000000..23ab2fae9 --- /dev/null +++ b/src/libexpr/primops/fetchGit.hh @@ -0,0 +1,23 @@ +#pragma once + +#include "store-api.hh" + +#include + +namespace nix { + +struct GitInfo +{ + Path storePath; + std::string rev; + std::string shortRev; + uint64_t revCount = 0; +}; + +GitInfo exportGit(ref store, const std::string & uri, + std::experimental::optional ref, std::string rev, + const std::string & name); + +extern std::regex revRegex; + +} diff --git a/src/libexpr/primops/flake.cc b/src/libexpr/primops/flake.cc new file mode 100644 index 000000000..457c30948 --- /dev/null +++ b/src/libexpr/primops/flake.cc @@ -0,0 +1,161 @@ +#include "primops.hh" +#include "eval-inline.hh" +#include "fetchGit.hh" +#include "download.hh" + +#include +#include + +namespace nix { + +const EvalState::FlakeRegistry & EvalState::getFlakeRegistry() +{ + std::call_once(_flakeRegistryInit, [&]() + { + _flakeRegistry = std::make_unique(); + + if (!evalSettings.pureEval) { + + auto registryUri = "file:///home/eelco/Dev/gists/nix-flakes/registry.json"; + + auto registryFile = getDownloader()->download(DownloadRequest(registryUri)); + + auto json = nlohmann::json::parse(*registryFile.data); + + auto version = json.value("version", 0); + if (version != 1) + throw Error("flake registry '%s' has unsupported version %d", registryUri, version); + + auto flakes = json["flakes"]; + for (auto i = flakes.begin(); i != flakes.end(); ++i) { + FlakeRegistry::Entry entry; + entry.uri = i->value("uri", ""); + if (entry.uri.empty()) + throw Error("invalid flake registry entry"); + _flakeRegistry->entries.emplace(i.key(), entry); + } + } + }); + + return *_flakeRegistry; +} + +static void prim_flakeRegistry(EvalState & state, const Pos & pos, Value * * args, Value & v) +{ + auto registry = state.getFlakeRegistry(); + + state.mkAttrs(v, registry.entries.size()); + + for (auto & entry : registry.entries) { + auto vEntry = state.allocAttr(v, entry.first); + state.mkAttrs(*vEntry, 2); + mkString(*state.allocAttr(*vEntry, state.symbols.create("uri")), entry.second.uri); + vEntry->attrs->sort(); + } + + v.attrs->sort(); +} + +static RegisterPrimOp r1("__flakeRegistry", 0, prim_flakeRegistry); + +struct Flake +{ + std::string name; + std::string description; + Path path; + std::set requires; + Value * vProvides; // FIXME: gc +}; + +static Flake fetchFlake(EvalState & state, const std::string & flakeUri) +{ + Flake flake; + + auto gitInfo = exportGit(state.store, flakeUri, {}, "", "source"); + + state.store->assertStorePath(gitInfo.storePath); + + Value vInfo; + state.evalFile(gitInfo.storePath + "/flake.nix", vInfo); + + state.forceAttrs(vInfo); + + if (auto name = vInfo.attrs->get(state.sName)) + flake.name = state.forceStringNoCtx(*(**name).value, *(**name).pos); + else + throw Error("flake lacks attribute 'name'"); + + if (auto description = vInfo.attrs->get(state.sDescription)) + flake.description = state.forceStringNoCtx(*(**description).value, *(**description).pos); + + if (auto requires = vInfo.attrs->get(state.symbols.create("requires"))) { + state.forceList(*(**requires).value, *(**requires).pos); + for (unsigned int n = 0; n < (**requires).value->listSize(); ++n) + flake.requires.insert(state.forceStringNoCtx( + *(**requires).value->listElems()[n], *(**requires).pos)); + } + + if (auto provides = vInfo.attrs->get(state.symbols.create("provides"))) { + state.forceFunction(*(**provides).value, *(**provides).pos); + flake.vProvides = (**provides).value; + } else + throw Error("flake lacks attribute 'provides'"); + + return flake; +} + +static std::map resolveFlakes(EvalState & state, const StringSet & flakeUris) +{ + auto registry = state.getFlakeRegistry(); + + std::map done; + std::queue todo; + for (auto & i : flakeUris) todo.push(i); + + while (!todo.empty()) { + auto flakeUri = todo.front(); + todo.pop(); + if (done.count(flakeUri)) continue; + + auto flake = fetchFlake(state, flakeUri); + + for (auto & require : flake.requires) { + auto i = registry.entries.find(require); + if (i == registry.entries.end()) + throw Error("unknown flake '%s'", require); + todo.push(i->second.uri); + } + + done.emplace(flake.name, flake); + } + + return done; +} + +static void prim_getFlake(EvalState & state, const Pos & pos, Value * * args, Value & v) +{ + std::string flakeUri = state.forceStringNoCtx(*args[0], pos); + + auto flakes = resolveFlakes(state, {flakeUri}); + + auto vResult = state.allocValue(); + + state.mkAttrs(*vResult, flakes.size()); + + for (auto & flake : flakes) { + auto vFlake = state.allocAttr(*vResult, flake.second.name); + state.mkAttrs(*vFlake, 2); + mkString(*state.allocAttr(*vFlake, state.sDescription), flake.second.description); + auto vProvides = state.allocAttr(*vFlake, state.symbols.create("provides")); + mkApp(*vProvides, *flake.second.vProvides, *vResult); + vFlake->attrs->sort(); + } + + vResult->attrs->sort(); + + v = *vResult; +} + +static RegisterPrimOp r2("getFlake", 1, prim_getFlake); + +} diff --git a/src/nix/flake.cc b/src/nix/flake.cc new file mode 100644 index 000000000..98cd90c64 --- /dev/null +++ b/src/nix/flake.cc @@ -0,0 +1,65 @@ +#include "command.hh" +#include "common-args.hh" +#include "shared.hh" +#include "progress-bar.hh" +#include "eval.hh" + +using namespace nix; + +struct CmdFlakeList : StoreCommand, MixEvalArgs +{ + std::string name() override + { + return "list"; + } + + std::string description() override + { + return "list available Nix flakes"; + } + + void run(nix::ref store) override + { + auto evalState = std::make_shared(searchPath, store); + + auto registry = evalState->getFlakeRegistry(); + + stopProgressBar(); + + for (auto & entry : registry.entries) { + std::cout << entry.first << " " << entry.second.uri << "\n"; + } + } +}; + +struct CmdFlake : virtual MultiCommand, virtual Command +{ + CmdFlake() + : MultiCommand({make_ref()}) + { + } + + std::string name() override + { + return "flake"; + } + + std::string description() override + { + return "manage Nix flakes"; + } + + void run() override + { + if (!command) + throw UsageError("'nix flake' requires a sub-command."); + command->run(); + } + + void printHelp(const string & programName, std::ostream & out) override + { + MultiCommand::printHelp(programName, out); + } +}; + +static RegisterCommand r1(make_ref()); diff --git a/src/nix/installables.cc b/src/nix/installables.cc index 0c1ad3ab3..9b7b96c25 100644 --- a/src/nix/installables.cc +++ b/src/nix/installables.cc @@ -26,47 +26,12 @@ Value * SourceExprCommand::getSourceExpr(EvalState & state) { if (vSourceExpr) return vSourceExpr; - auto sToplevel = state.symbols.create("_toplevel"); - vSourceExpr = state.allocValue(); if (file != "") state.evalFile(lookupFileArg(state, file), *vSourceExpr); - - else { - - /* Construct the installation source from $NIX_PATH. */ - - auto searchPath = state.getSearchPath(); - - state.mkAttrs(*vSourceExpr, searchPath.size() + 1); - - mkBool(*state.allocAttr(*vSourceExpr, sToplevel), true); - - std::unordered_set seen; - - for (auto & i : searchPath) { - if (i.first == "") continue; - if (seen.count(i.first)) continue; - seen.insert(i.first); -#if 0 - auto res = state.resolveSearchPathElem(i); - if (!res.first) continue; - if (!pathExists(res.second)) continue; - mkApp(*state.allocAttr(*vSourceExpr, state.symbols.create(i.first)), - state.getBuiltin("import"), - mkString(*state.allocValue(), res.second)); -#endif - Value * v1 = state.allocValue(); - mkPrimOpApp(*v1, state.getBuiltin("findFile"), state.getBuiltin("nixPath")); - Value * v2 = state.allocValue(); - mkApp(*v2, *v1, mkString(*state.allocValue(), i.first)); - mkApp(*state.allocAttr(*vSourceExpr, state.symbols.create(i.first)), - state.getBuiltin("import"), *v2); - } - - vSourceExpr->attrs->sort(); - } + else + state.evalFile(lookupFileArg(state, ""), *vSourceExpr); return vSourceExpr; } From ef4cf4e681bfe30b15b8c2940b51b322bce5b6d8 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Fri, 30 Nov 2018 16:11:15 +0100 Subject: [PATCH 007/634] Introduce flake URIs --- src/libexpr/primops/flake.cc | 48 +++++++++++++++++++++++++----------- 1 file changed, 33 insertions(+), 15 deletions(-) diff --git a/src/libexpr/primops/flake.cc b/src/libexpr/primops/flake.cc index 457c30948..9dc6fa1f1 100644 --- a/src/libexpr/primops/flake.cc +++ b/src/libexpr/primops/flake.cc @@ -4,6 +4,7 @@ #include "download.hh" #include +#include #include namespace nix { @@ -67,16 +68,39 @@ struct Flake Value * vProvides; // FIXME: gc }; -static Flake fetchFlake(EvalState & state, const std::string & flakeUri) +std::regex flakeRegex("^flake:([a-zA-Z][a-zA-Z0-9_-]+)$"); + +static Path fetchFlake(EvalState & state, const std::string & flakeUri) { + std::smatch match; + + if (std::regex_match(flakeUri, match, flakeRegex)) { + auto flakeName = match[1]; + auto registry = state.getFlakeRegistry(); + auto i = registry.entries.find(flakeName); + if (i == registry.entries.end()) + throw Error("unknown flake '%s'", flakeName); + return fetchFlake(state, i->second.uri); + } + + else if (hasPrefix(flakeUri, "/") || hasPrefix(flakeUri, "git://")) { + auto gitInfo = exportGit(state.store, flakeUri, {}, "", "source"); + return gitInfo.storePath; + } + + else + throw Error("unsupported flake URI '%s'", flakeUri); +} + +static Flake getFlake(EvalState & state, const std::string & flakeUri) +{ + auto flakePath = fetchFlake(state, flakeUri); + state.store->assertStorePath(flakePath); + Flake flake; - auto gitInfo = exportGit(state.store, flakeUri, {}, "", "source"); - - state.store->assertStorePath(gitInfo.storePath); - Value vInfo; - state.evalFile(gitInfo.storePath + "/flake.nix", vInfo); + state.evalFile(flakePath + "/flake.nix", vInfo); state.forceAttrs(vInfo); @@ -106,8 +130,6 @@ static Flake fetchFlake(EvalState & state, const std::string & flakeUri) static std::map resolveFlakes(EvalState & state, const StringSet & flakeUris) { - auto registry = state.getFlakeRegistry(); - std::map done; std::queue todo; for (auto & i : flakeUris) todo.push(i); @@ -117,14 +139,10 @@ static std::map resolveFlakes(EvalState & state, const Strin todo.pop(); if (done.count(flakeUri)) continue; - auto flake = fetchFlake(state, flakeUri); + auto flake = getFlake(state, flakeUri); - for (auto & require : flake.requires) { - auto i = registry.entries.find(require); - if (i == registry.entries.end()) - throw Error("unknown flake '%s'", require); - todo.push(i->second.uri); - } + for (auto & require : flake.requires) + todo.push(require); done.emplace(flake.name, flake); } From dcae46ab146b735aa49fcf4cad4a320e79362c5e Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Wed, 12 Dec 2018 13:20:59 +0100 Subject: [PATCH 008/634] Add github URIs For example, github:edolstra/dwarffs is more-or-less equivalent to https://github.com/edolstra/dwarffs.git. It's a much faster way to get GitHub repositories: it fetches tarballs rather than entire Git repositories. It also allows fetching specific revisions by hash without specifying a ref (e.g. a branch name): github:edolstra/dwarffs/41c0c1bf292ea3ac3858ff393b49ca1123dbd553 --- src/libexpr/primops/flake.cc | 23 ++++++++++++++++++++++- 1 file changed, 22 insertions(+), 1 deletion(-) diff --git a/src/libexpr/primops/flake.cc b/src/libexpr/primops/flake.cc index 9dc6fa1f1..81e912402 100644 --- a/src/libexpr/primops/flake.cc +++ b/src/libexpr/primops/flake.cc @@ -68,7 +68,8 @@ struct Flake Value * vProvides; // FIXME: gc }; -std::regex flakeRegex("^flake:([a-zA-Z][a-zA-Z0-9_-]+)$"); +std::regex flakeRegex("^flake:([a-zA-Z][a-zA-Z0-9_-]*)(/[a-zA-Z][a-zA-Z0-9_.-]*)?$"); +std::regex githubRegex("^github:([a-zA-Z][a-zA-Z0-9_-]*)/([a-zA-Z][a-zA-Z0-9_-]*)(/([a-zA-Z][a-zA-Z0-9_-]*))?$"); static Path fetchFlake(EvalState & state, const std::string & flakeUri) { @@ -76,6 +77,7 @@ static Path fetchFlake(EvalState & state, const std::string & flakeUri) if (std::regex_match(flakeUri, match, flakeRegex)) { auto flakeName = match[1]; + auto revOrRef = match[2]; auto registry = state.getFlakeRegistry(); auto i = registry.entries.find(flakeName); if (i == registry.entries.end()) @@ -83,6 +85,25 @@ static Path fetchFlake(EvalState & state, const std::string & flakeUri) return fetchFlake(state, i->second.uri); } + else if (std::regex_match(flakeUri, match, githubRegex)) { + auto owner = match[1]; + auto repo = match[2]; + auto revOrRef = match[4].str(); + if (revOrRef.empty()) revOrRef = "master"; + + // FIXME: require hash in pure mode. + + // FIXME: use regular /archive URLs instead? api.github.com + // might have stricter rate limits. + auto storePath = getDownloader()->downloadCached(state.store, + fmt("https://api.github.com/repos/%s/%s/tarball/%s", owner, repo, revOrRef), + true, "source"); + + // FIXME: extract revision hash from ETag. + + return storePath; + } + else if (hasPrefix(flakeUri, "/") || hasPrefix(flakeUri, "git://")) { auto gitInfo = exportGit(state.store, flakeUri, {}, "", "source"); return gitInfo.storePath; From 52419f8db3ed4806f8114a98f94a68f3b249f065 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Mon, 11 Feb 2019 13:01:39 +0100 Subject: [PATCH 009/634] Add flake registry This will eventually be moved to nixos.org. --- flake-registry.json | 11 +++++++++++ local.mk | 2 ++ src/libexpr/primops/flake.cc | 8 ++++++-- 3 files changed, 19 insertions(+), 2 deletions(-) create mode 100644 flake-registry.json diff --git a/flake-registry.json b/flake-registry.json new file mode 100644 index 000000000..b850daa74 --- /dev/null +++ b/flake-registry.json @@ -0,0 +1,11 @@ +{ + "version": 1, + "flakes": { + "dwarffs": { + "uri": "github:edolstra/dwarffs/flake" + }, + "nixpkgs": { + "uri": "github:edolstra/nixpkgs/flake" + } + } +} diff --git a/local.mk b/local.mk index 4b380176f..11ed9c0a6 100644 --- a/local.mk +++ b/local.mk @@ -10,3 +10,5 @@ GLOBAL_CXXFLAGS += -I . -I src -I src/libutil -I src/libstore -I src/libmain -I $(foreach i, config.h $(call rwildcard, src/lib*, *.hh), \ $(eval $(call install-file-in, $(i), $(includedir)/nix, 0644))) + +$(eval $(call install-data-in,$(d)/flake-registry.json,$(datadir)/nix)) diff --git a/src/libexpr/primops/flake.cc b/src/libexpr/primops/flake.cc index 81e912402..1367fa420 100644 --- a/src/libexpr/primops/flake.cc +++ b/src/libexpr/primops/flake.cc @@ -17,15 +17,19 @@ const EvalState::FlakeRegistry & EvalState::getFlakeRegistry() if (!evalSettings.pureEval) { +#if 0 auto registryUri = "file:///home/eelco/Dev/gists/nix-flakes/registry.json"; auto registryFile = getDownloader()->download(DownloadRequest(registryUri)); +#endif - auto json = nlohmann::json::parse(*registryFile.data); + auto registryFile = readFile(settings.nixDataDir + "/nix/flake-registry.json"); + + auto json = nlohmann::json::parse(registryFile); auto version = json.value("version", 0); if (version != 1) - throw Error("flake registry '%s' has unsupported version %d", registryUri, version); + throw Error("flake registry '%s' has unsupported version %d", registryFile, version); auto flakes = json["flakes"]; for (auto i = flakes.begin(); i != flakes.end(); ++i) { From c8a0b9d5cbfe6619f8b38118f5b1d1875d1c5309 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Tue, 12 Feb 2019 13:43:32 +0100 Subject: [PATCH 010/634] experimental/optional -> optional --- src/libexpr/eval.hh | 2 +- src/libexpr/get-drvs.cc | 2 +- src/libexpr/get-drvs.hh | 2 +- src/libexpr/primops.cc | 2 +- src/libexpr/primops/fetchGit.cc | 4 ++-- src/libexpr/primops/fetchGit.hh | 2 +- src/libstore/build.cc | 10 +++++----- src/libstore/parsed-derivations.cc | 4 ++-- src/libstore/parsed-derivations.hh | 8 ++++---- src/libstore/remote-store.hh | 2 +- src/libutil/lru-cache.hh | 4 ++-- src/libutil/serialise.cc | 2 +- src/libutil/util.cc | 2 +- src/libutil/util.hh | 6 +++--- src/nix/add-to-store.cc | 2 +- 15 files changed, 27 insertions(+), 27 deletions(-) diff --git a/src/libexpr/eval.hh b/src/libexpr/eval.hh index 674b08f45..c8ee63551 100644 --- a/src/libexpr/eval.hh +++ b/src/libexpr/eval.hh @@ -82,7 +82,7 @@ public: /* The allowed filesystem paths in restricted or pure evaluation mode. */ - std::experimental::optional allowedPaths; + std::optional allowedPaths; Value vEmptySet; diff --git a/src/libexpr/get-drvs.cc b/src/libexpr/get-drvs.cc index d38ed2df3..21a4d7917 100644 --- a/src/libexpr/get-drvs.cc +++ b/src/libexpr/get-drvs.cc @@ -295,7 +295,7 @@ static bool getDerivation(EvalState & state, Value & v, } -std::experimental::optional getDerivation(EvalState & state, Value & v, +std::optional getDerivation(EvalState & state, Value & v, bool ignoreAssertionFailures) { Done done; diff --git a/src/libexpr/get-drvs.hh b/src/libexpr/get-drvs.hh index daaa635fe..d7860fc6a 100644 --- a/src/libexpr/get-drvs.hh +++ b/src/libexpr/get-drvs.hh @@ -78,7 +78,7 @@ typedef list DrvInfos; /* If value `v' denotes a derivation, return a DrvInfo object describing it. Otherwise return nothing. */ -std::experimental::optional getDerivation(EvalState & state, +std::optional getDerivation(EvalState & state, Value & v, bool ignoreAssertionFailures); void getDerivations(EvalState & state, Value & v, const string & pathPrefix, diff --git a/src/libexpr/primops.cc b/src/libexpr/primops.cc index 60698f740..f787ad96b 100644 --- a/src/libexpr/primops.cc +++ b/src/libexpr/primops.cc @@ -555,7 +555,7 @@ static void prim_derivationStrict(EvalState & state, const Pos & pos, Value * * PathSet context; - std::experimental::optional outputHash; + std::optional outputHash; std::string outputHashAlgo; bool outputHashRecursive = false; diff --git a/src/libexpr/primops/fetchGit.cc b/src/libexpr/primops/fetchGit.cc index 6b6ca08d1..3027e0f2d 100644 --- a/src/libexpr/primops/fetchGit.cc +++ b/src/libexpr/primops/fetchGit.cc @@ -19,7 +19,7 @@ namespace nix { std::regex revRegex("^[0-9a-fA-F]{40}$"); GitInfo exportGit(ref store, const std::string & uri, - std::experimental::optional ref, std::string rev, + std::optional ref, std::string rev, const std::string & name) { if (evalSettings.pureEval && rev == "") @@ -183,7 +183,7 @@ GitInfo exportGit(ref store, const std::string & uri, static void prim_fetchGit(EvalState & state, const Pos & pos, Value * * args, Value & v) { std::string url; - std::experimental::optional ref; + std::optional ref; std::string rev; std::string name = "source"; PathSet context; diff --git a/src/libexpr/primops/fetchGit.hh b/src/libexpr/primops/fetchGit.hh index 23ab2fae9..6031e09e1 100644 --- a/src/libexpr/primops/fetchGit.hh +++ b/src/libexpr/primops/fetchGit.hh @@ -15,7 +15,7 @@ struct GitInfo }; GitInfo exportGit(ref store, const std::string & uri, - std::experimental::optional ref, std::string rev, + std::optional ref, std::string rev, const std::string & name); extern std::regex revRegex; diff --git a/src/libstore/build.cc b/src/libstore/build.cc index 47ee8b48f..6b88b1307 100644 --- a/src/libstore/build.cc +++ b/src/libstore/build.cc @@ -2413,7 +2413,7 @@ void DerivationGoal::writeStructuredAttrs() objects consisting entirely of those values. (So nested arrays or objects are not supported.) */ - auto handleSimpleType = [](const nlohmann::json & value) -> std::experimental::optional { + auto handleSimpleType = [](const nlohmann::json & value) -> std::optional { if (value.is_string()) return shellEscape(value); @@ -3311,8 +3311,8 @@ void DerivationGoal::checkOutputs(const std::map & outputs) struct Checks { bool ignoreSelfRefs = false; - std::experimental::optional maxSize, maxClosureSize; - std::experimental::optional allowedReferences, allowedRequisites, disallowedReferences, disallowedRequisites; + std::optional maxSize, maxClosureSize; + std::optional allowedReferences, allowedRequisites, disallowedReferences, disallowedRequisites; }; /* Compute the closure and closure size of some output. This @@ -3359,7 +3359,7 @@ void DerivationGoal::checkOutputs(const std::map & outputs) info.path, closureSize, *checks.maxClosureSize); } - auto checkRefs = [&](const std::experimental::optional & value, bool allowed, bool recursive) + auto checkRefs = [&](const std::optional & value, bool allowed, bool recursive) { if (!value) return; @@ -3413,7 +3413,7 @@ void DerivationGoal::checkOutputs(const std::map & outputs) if (maxClosureSize != output->end()) checks.maxClosureSize = maxClosureSize->get(); - auto get = [&](const std::string & name) -> std::experimental::optional { + auto get = [&](const std::string & name) -> std::optional { auto i = output->find(name); if (i != output->end()) { Strings res; diff --git a/src/libstore/parsed-derivations.cc b/src/libstore/parsed-derivations.cc index dc3286482..17fde00a0 100644 --- a/src/libstore/parsed-derivations.cc +++ b/src/libstore/parsed-derivations.cc @@ -16,7 +16,7 @@ ParsedDerivation::ParsedDerivation(const Path & drvPath, BasicDerivation & drv) } } -std::experimental::optional ParsedDerivation::getStringAttr(const std::string & name) const +std::optional ParsedDerivation::getStringAttr(const std::string & name) const { if (structuredAttrs) { auto i = structuredAttrs->find(name); @@ -56,7 +56,7 @@ bool ParsedDerivation::getBoolAttr(const std::string & name, bool def) const } } -std::experimental::optional ParsedDerivation::getStringsAttr(const std::string & name) const +std::optional ParsedDerivation::getStringsAttr(const std::string & name) const { if (structuredAttrs) { auto i = structuredAttrs->find(name); diff --git a/src/libstore/parsed-derivations.hh b/src/libstore/parsed-derivations.hh index 0a82c1461..ed07dc652 100644 --- a/src/libstore/parsed-derivations.hh +++ b/src/libstore/parsed-derivations.hh @@ -8,22 +8,22 @@ class ParsedDerivation { Path drvPath; BasicDerivation & drv; - std::experimental::optional structuredAttrs; + std::optional structuredAttrs; public: ParsedDerivation(const Path & drvPath, BasicDerivation & drv); - const std::experimental::optional & getStructuredAttrs() const + const std::optional & getStructuredAttrs() const { return structuredAttrs; } - std::experimental::optional getStringAttr(const std::string & name) const; + std::optional getStringAttr(const std::string & name) const; bool getBoolAttr(const std::string & name, bool def = false) const; - std::experimental::optional getStringsAttr(const std::string & name) const; + std::optional getStringsAttr(const std::string & name) const; StringSet getRequiredSystemFeatures() const; diff --git a/src/libstore/remote-store.hh b/src/libstore/remote-store.hh index 4f554b598..919c6d819 100644 --- a/src/libstore/remote-store.hh +++ b/src/libstore/remote-store.hh @@ -149,7 +149,7 @@ public: private: ref openConnection() override; - std::experimental::optional path; + std::optional path; }; diff --git a/src/libutil/lru-cache.hh b/src/libutil/lru-cache.hh index 9b8290e63..8b83f842c 100644 --- a/src/libutil/lru-cache.hh +++ b/src/libutil/lru-cache.hh @@ -2,7 +2,7 @@ #include #include -#include +#include namespace nix { @@ -64,7 +64,7 @@ public: /* Look up an item in the cache. If it exists, it becomes the most recently used item. */ - std::experimental::optional get(const Key & key) + std::optional get(const Key & key) { auto i = data.find(key); if (i == data.end()) return {}; diff --git a/src/libutil/serialise.cc b/src/libutil/serialise.cc index 0e75eeec2..8201549fd 100644 --- a/src/libutil/serialise.cc +++ b/src/libutil/serialise.cc @@ -171,7 +171,7 @@ std::unique_ptr sinkToSource( std::function fun; std::function eof; - std::experimental::optional coro; + std::optional coro; bool started = false; SinkToSource(std::function fun, std::function eof) diff --git a/src/libutil/util.cc b/src/libutil/util.cc index 7eca35577..e3dcd246c 100644 --- a/src/libutil/util.cc +++ b/src/libutil/util.cc @@ -965,7 +965,7 @@ std::vector stringsToCharPtrs(const Strings & ss) string runProgram(Path program, bool searchPath, const Strings & args, - const std::experimental::optional & input) + const std::optional & input) { RunOptions opts(program, args); opts.searchPath = searchPath; diff --git a/src/libutil/util.hh b/src/libutil/util.hh index bda87bee4..9f239bff3 100644 --- a/src/libutil/util.hh +++ b/src/libutil/util.hh @@ -14,7 +14,7 @@ #include #include #include -#include +#include #include #ifndef HAVE_STRUCT_DIRENT_D_TYPE @@ -259,14 +259,14 @@ pid_t startProcess(std::function fun, const ProcessOptions & options = P shell backtick operator). */ string runProgram(Path program, bool searchPath = false, const Strings & args = Strings(), - const std::experimental::optional & input = {}); + const std::optional & input = {}); struct RunOptions { Path program; bool searchPath = true; Strings args; - std::experimental::optional input; + std::optional input; Source * standardIn = nullptr; Sink * standardOut = nullptr; bool _killStderr = false; diff --git a/src/nix/add-to-store.cc b/src/nix/add-to-store.cc index d0003790d..e86b96e3f 100644 --- a/src/nix/add-to-store.cc +++ b/src/nix/add-to-store.cc @@ -8,7 +8,7 @@ using namespace nix; struct CmdAddToStore : MixDryRun, StoreCommand { Path path; - std::experimental::optional namePart; + std::optional namePart; CmdAddToStore() { From 0cd7f2cd8d99071ebfb06a8f0d6a18efed6cd42e Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Tue, 12 Feb 2019 13:44:20 +0100 Subject: [PATCH 011/634] pkg-config files: Use c++17 --- src/libexpr/nix-expr.pc.in | 2 +- src/libmain/nix-main.pc.in | 2 +- src/libstore/nix-store.pc.in | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/src/libexpr/nix-expr.pc.in b/src/libexpr/nix-expr.pc.in index 79f3e2f45..80f7a492b 100644 --- a/src/libexpr/nix-expr.pc.in +++ b/src/libexpr/nix-expr.pc.in @@ -7,4 +7,4 @@ Description: Nix Package Manager Version: @PACKAGE_VERSION@ Requires: nix-store bdw-gc Libs: -L${libdir} -lnixexpr -Cflags: -I${includedir}/nix -std=c++14 +Cflags: -I${includedir}/nix -std=c++17 diff --git a/src/libmain/nix-main.pc.in b/src/libmain/nix-main.pc.in index 38bc85c48..37b03dcd4 100644 --- a/src/libmain/nix-main.pc.in +++ b/src/libmain/nix-main.pc.in @@ -6,4 +6,4 @@ Name: Nix Description: Nix Package Manager Version: @PACKAGE_VERSION@ Libs: -L${libdir} -lnixmain -Cflags: -I${includedir}/nix -std=c++14 +Cflags: -I${includedir}/nix -std=c++17 diff --git a/src/libstore/nix-store.pc.in b/src/libstore/nix-store.pc.in index 5cf22faad..6d67b1e03 100644 --- a/src/libstore/nix-store.pc.in +++ b/src/libstore/nix-store.pc.in @@ -6,4 +6,4 @@ Name: Nix Description: Nix Package Manager Version: @PACKAGE_VERSION@ Libs: -L${libdir} -lnixstore -lnixutil -Cflags: -I${includedir}/nix -std=c++14 +Cflags: -I${includedir}/nix -std=c++17 From 91a6a47b0e98f4114c263ef32895e749639c50ad Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Tue, 12 Feb 2019 18:23:11 +0100 Subject: [PATCH 012/634] Improve flake references --- src/libexpr/eval.cc | 1 + src/libexpr/eval.hh | 10 +- src/libexpr/primops/fetchGit.cc | 2 +- src/libexpr/primops/fetchGit.hh | 2 - src/libexpr/primops/flake.cc | 107 +++++++++++---------- src/libexpr/primops/flake.hh | 17 ++++ src/libexpr/primops/flakeref.cc | 139 ++++++++++++++++++++++++++++ src/libexpr/primops/flakeref.hh | 158 ++++++++++++++++++++++++++++++++ src/nix/flake.cc | 3 +- 9 files changed, 380 insertions(+), 59 deletions(-) create mode 100644 src/libexpr/primops/flake.hh create mode 100644 src/libexpr/primops/flakeref.cc create mode 100644 src/libexpr/primops/flakeref.hh diff --git a/src/libexpr/eval.cc b/src/libexpr/eval.cc index e3a264277..548eef31b 100644 --- a/src/libexpr/eval.cc +++ b/src/libexpr/eval.cc @@ -7,6 +7,7 @@ #include "eval-inline.hh" #include "download.hh" #include "json.hh" +#include "primops/flake.hh" #include #include diff --git a/src/libexpr/eval.hh b/src/libexpr/eval.hh index c8ee63551..35c01b97a 100644 --- a/src/libexpr/eval.hh +++ b/src/libexpr/eval.hh @@ -17,6 +17,7 @@ namespace nix { class Store; class EvalState; enum RepairFlag : bool; +struct FlakeRegistry; typedef void (* PrimOpFun) (EvalState & state, const Pos & pos, Value * * args, Value & v); @@ -315,15 +316,6 @@ private: public: - struct FlakeRegistry - { - struct Entry - { - std::string uri; - }; - std::map entries; - }; - const FlakeRegistry & getFlakeRegistry(); private: diff --git a/src/libexpr/primops/fetchGit.cc b/src/libexpr/primops/fetchGit.cc index 3027e0f2d..62e9dfc0e 100644 --- a/src/libexpr/primops/fetchGit.cc +++ b/src/libexpr/primops/fetchGit.cc @@ -16,7 +16,7 @@ using namespace std::string_literals; namespace nix { -std::regex revRegex("^[0-9a-fA-F]{40}$"); +extern std::regex revRegex; GitInfo exportGit(ref store, const std::string & uri, std::optional ref, std::string rev, diff --git a/src/libexpr/primops/fetchGit.hh b/src/libexpr/primops/fetchGit.hh index 6031e09e1..d7a0e165a 100644 --- a/src/libexpr/primops/fetchGit.hh +++ b/src/libexpr/primops/fetchGit.hh @@ -18,6 +18,4 @@ GitInfo exportGit(ref store, const std::string & uri, std::optional ref, std::string rev, const std::string & name); -extern std::regex revRegex; - } diff --git a/src/libexpr/primops/flake.cc b/src/libexpr/primops/flake.cc index 1367fa420..5e92b1da3 100644 --- a/src/libexpr/primops/flake.cc +++ b/src/libexpr/primops/flake.cc @@ -1,3 +1,4 @@ +#include "flake.hh" #include "primops.hh" #include "eval-inline.hh" #include "fetchGit.hh" @@ -9,7 +10,7 @@ namespace nix { -const EvalState::FlakeRegistry & EvalState::getFlakeRegistry() +const FlakeRegistry & EvalState::getFlakeRegistry() { std::call_once(_flakeRegistryInit, [&]() { @@ -33,10 +34,7 @@ const EvalState::FlakeRegistry & EvalState::getFlakeRegistry() auto flakes = json["flakes"]; for (auto i = flakes.begin(); i != flakes.end(); ++i) { - FlakeRegistry::Entry entry; - entry.uri = i->value("uri", ""); - if (entry.uri.empty()) - throw Error("invalid flake registry entry"); + FlakeRegistry::Entry entry{FlakeRef(i->value("uri", ""))}; _flakeRegistry->entries.emplace(i.key(), entry); } } @@ -54,7 +52,7 @@ static void prim_flakeRegistry(EvalState & state, const Pos & pos, Value * * arg for (auto & entry : registry.entries) { auto vEntry = state.allocAttr(v, entry.first); state.mkAttrs(*vEntry, 2); - mkString(*state.allocAttr(*vEntry, state.symbols.create("uri")), entry.second.uri); + mkString(*state.allocAttr(*vEntry, state.symbols.create("uri")), entry.second.ref.to_string()); vEntry->attrs->sort(); } @@ -63,44 +61,53 @@ static void prim_flakeRegistry(EvalState & state, const Pos & pos, Value * * arg static RegisterPrimOp r1("__flakeRegistry", 0, prim_flakeRegistry); +static FlakeRef lookupFlake(EvalState & state, const FlakeRef & flakeRef) +{ + if (auto refData = std::get_if(&flakeRef.data)) { + auto registry = state.getFlakeRegistry(); + auto i = registry.entries.find(refData->id); + if (i == registry.entries.end()) + throw Error("cannot find flake '%s' in the flake registry", refData->id); + auto newRef = FlakeRef(i->second.ref); + if (!newRef.isDirect()) + throw Error("found indirect flake URI '%s' in the flake registry", i->second.ref.to_string()); + return newRef; + } else + return flakeRef; +} + struct Flake { - std::string name; + FlakeId id; std::string description; Path path; std::set requires; Value * vProvides; // FIXME: gc + // commit hash + // date + // content hash }; -std::regex flakeRegex("^flake:([a-zA-Z][a-zA-Z0-9_-]*)(/[a-zA-Z][a-zA-Z0-9_.-]*)?$"); -std::regex githubRegex("^github:([a-zA-Z][a-zA-Z0-9_-]*)/([a-zA-Z][a-zA-Z0-9_-]*)(/([a-zA-Z][a-zA-Z0-9_-]*))?$"); - -static Path fetchFlake(EvalState & state, const std::string & flakeUri) +static Path fetchFlake(EvalState & state, const FlakeRef & flakeRef) { - std::smatch match; - - if (std::regex_match(flakeUri, match, flakeRegex)) { - auto flakeName = match[1]; - auto revOrRef = match[2]; - auto registry = state.getFlakeRegistry(); - auto i = registry.entries.find(flakeName); - if (i == registry.entries.end()) - throw Error("unknown flake '%s'", flakeName); - return fetchFlake(state, i->second.uri); - } - - else if (std::regex_match(flakeUri, match, githubRegex)) { - auto owner = match[1]; - auto repo = match[2]; - auto revOrRef = match[4].str(); - if (revOrRef.empty()) revOrRef = "master"; + assert(flakeRef.isDirect()); + if (auto refData = std::get_if(&flakeRef.data)) { // FIXME: require hash in pure mode. // FIXME: use regular /archive URLs instead? api.github.com // might have stricter rate limits. + + // FIXME: support passing auth tokens for private repos. + auto storePath = getDownloader()->downloadCached(state.store, - fmt("https://api.github.com/repos/%s/%s/tarball/%s", owner, repo, revOrRef), + fmt("https://api.github.com/repos/%s/%s/tarball/%s", + refData->owner, refData->repo, + refData->rev + ? refData->rev->to_string(Base16, false) + : refData->ref + ? *refData->ref + : "master"), true, "source"); // FIXME: extract revision hash from ETag. @@ -108,18 +115,18 @@ static Path fetchFlake(EvalState & state, const std::string & flakeUri) return storePath; } - else if (hasPrefix(flakeUri, "/") || hasPrefix(flakeUri, "git://")) { - auto gitInfo = exportGit(state.store, flakeUri, {}, "", "source"); + else if (auto refData = std::get_if(&flakeRef.data)) { + auto gitInfo = exportGit(state.store, refData->uri, refData->ref, + refData->rev ? refData->rev->to_string(Base16, false) : "", "source"); return gitInfo.storePath; } - else - throw Error("unsupported flake URI '%s'", flakeUri); + else abort(); } -static Flake getFlake(EvalState & state, const std::string & flakeUri) +static Flake getFlake(EvalState & state, const FlakeRef & flakeRef) { - auto flakePath = fetchFlake(state, flakeUri); + auto flakePath = fetchFlake(state, flakeRef); state.store->assertStorePath(flakePath); Flake flake; @@ -130,7 +137,7 @@ static Flake getFlake(EvalState & state, const std::string & flakeUri) state.forceAttrs(vInfo); if (auto name = vInfo.attrs->get(state.sName)) - flake.name = state.forceStringNoCtx(*(**name).value, *(**name).pos); + flake.id = state.forceStringNoCtx(*(**name).value, *(**name).pos); else throw Error("flake lacks attribute 'name'"); @@ -153,23 +160,31 @@ static Flake getFlake(EvalState & state, const std::string & flakeUri) return flake; } -static std::map resolveFlakes(EvalState & state, const StringSet & flakeUris) +/* Given a set of flake references, recursively fetch them and their + dependencies. */ +static std::map resolveFlakes(EvalState & state, const std::vector & flakeRefs) { - std::map done; - std::queue todo; - for (auto & i : flakeUris) todo.push(i); + std::map done; + std::queue todo; + for (auto & i : flakeRefs) todo.push(i); while (!todo.empty()) { - auto flakeUri = todo.front(); + auto flakeRef = todo.front(); todo.pop(); - if (done.count(flakeUri)) continue; - auto flake = getFlake(state, flakeUri); + if (auto refData = std::get_if(&flakeRef.data)) { + if (done.count(refData->id)) continue; // optimization + flakeRef = lookupFlake(state, flakeRef); + } + + auto flake = getFlake(state, flakeRef); + + if (done.count(flake.id)) continue; for (auto & require : flake.requires) todo.push(require); - done.emplace(flake.name, flake); + done.emplace(flake.id, flake); } return done; @@ -177,7 +192,7 @@ static std::map resolveFlakes(EvalState & state, const Strin static void prim_getFlake(EvalState & state, const Pos & pos, Value * * args, Value & v) { - std::string flakeUri = state.forceStringNoCtx(*args[0], pos); + auto flakeUri = FlakeRef(state.forceStringNoCtx(*args[0], pos)); auto flakes = resolveFlakes(state, {flakeUri}); @@ -186,7 +201,7 @@ static void prim_getFlake(EvalState & state, const Pos & pos, Value * * args, Va state.mkAttrs(*vResult, flakes.size()); for (auto & flake : flakes) { - auto vFlake = state.allocAttr(*vResult, flake.second.name); + auto vFlake = state.allocAttr(*vResult, flake.second.id); state.mkAttrs(*vFlake, 2); mkString(*state.allocAttr(*vFlake, state.sDescription), flake.second.description); auto vProvides = state.allocAttr(*vFlake, state.symbols.create("provides")); diff --git a/src/libexpr/primops/flake.hh b/src/libexpr/primops/flake.hh new file mode 100644 index 000000000..6be6e99d2 --- /dev/null +++ b/src/libexpr/primops/flake.hh @@ -0,0 +1,17 @@ +#include "types.hh" +#include "flakeref.hh" + +#include + +namespace nix { + +struct FlakeRegistry +{ + struct Entry + { + FlakeRef ref; + }; + std::map entries; +}; + +} diff --git a/src/libexpr/primops/flakeref.cc b/src/libexpr/primops/flakeref.cc new file mode 100644 index 000000000..447b56822 --- /dev/null +++ b/src/libexpr/primops/flakeref.cc @@ -0,0 +1,139 @@ +#include "flakeref.hh" + +#include + +namespace nix { + +// A Git ref (i.e. branch or tag name). +const static std::string refRegex = "[a-zA-Z][a-zA-Z0-9_.-]*"; // FIXME: check + +// A Git revision (a SHA-1 commit hash). +const static std::string revRegexS = "[0-9a-fA-F]{40}"; +std::regex revRegex(revRegexS, std::regex::ECMAScript); + +// A Git ref or revision. +const static std::string revOrRefRegex = "(?:(" + revRegexS + ")|(" + refRegex + "))"; + +// A rev ("e72daba8250068216d79d2aeef40d4d95aff6666"), or a ref +// optionally followed by a rev (e.g. "master" or +// "master/e72daba8250068216d79d2aeef40d4d95aff6666"). +const static std::string refAndOrRevRegex = "(?:(" + revRegexS + ")|(?:(" + refRegex + ")(?:/(" + revRegexS + "))?))"; + +const static std::string flakeId = "[a-zA-Z][a-zA-Z0-9_-]*"; + +// GitHub references. +const static std::string ownerRegex = "[a-zA-Z][a-zA-Z0-9_-]*"; +const static std::string repoRegex = "[a-zA-Z][a-zA-Z0-9_-]*"; + +// URI stuff. +const static std::string schemeRegex = "(?:http|https|ssh|git|file)"; +const static std::string authorityRegex = "[a-zA-Z0-9._~-]*"; +const static std::string segmentRegex = "[a-zA-Z0-9._~-]+"; +const static std::string pathRegex = "/?" + segmentRegex + "(?:/" + segmentRegex + ")*"; +const static std::string paramRegex = "[a-z]+=[a-zA-Z0-9._-]*"; + +FlakeRef::FlakeRef(const std::string & uri) +{ + // FIXME: could combine this into one regex. + + static std::regex flakeRegex( + "(?:flake:)?(" + flakeId + ")(?:/(?:" + refAndOrRevRegex + "))?", + std::regex::ECMAScript); + + static std::regex githubRegex( + "github:(" + ownerRegex + ")/(" + repoRegex + ")(?:/" + revOrRefRegex + ")?", + std::regex::ECMAScript); + + static std::regex uriRegex( + "((" + schemeRegex + "):" + + "(?://(" + authorityRegex + "))?" + + "(" + pathRegex + "))" + + "(?:[?](" + paramRegex + "(?:&" + paramRegex + ")*))?", + std::regex::ECMAScript); + + static std::regex refRegex2(refRegex, std::regex::ECMAScript); + + std::cmatch match; + if (std::regex_match(uri.c_str(), match, flakeRegex)) { + IsFlakeId d; + d.id = match[1]; + if (match[2].matched) + d.rev = Hash(match[2], htSHA1); + else if (match[3].matched) { + d.ref = match[3]; + if (match[4].matched) + d.rev = Hash(match[4], htSHA1); + } + data = d; + } + + else if (std::regex_match(uri.c_str(), match, githubRegex)) { + IsGitHub d; + d.owner = match[1]; + d.repo = match[2]; + if (match[3].matched) + d.rev = Hash(match[3], htSHA1); + else if (match[4].matched) { + d.ref = match[4]; + } + data = d; + } + + else if (std::regex_match(uri.c_str(), match, uriRegex) && hasSuffix(match[4], ".git")) { + IsGit d; + d.uri = match[1]; + for (auto & param : tokenizeString(match[5], "&")) { + auto n = param.find('='); + assert(n != param.npos); + std::string name(param, 0, n); + std::string value(param, n + 1); + if (name == "rev") { + if (!std::regex_match(value, revRegex)) + throw Error("invalid Git revision '%s'", value); + d.rev = Hash(value, htSHA1); + } else if (name == "ref") { + if (!std::regex_match(value, refRegex2)) + throw Error("invalid Git ref '%s'", value); + d.ref = value; + } else + // FIXME: should probably pass through unknown parameters + throw Error("invalid Git flake reference parameter '%s', in '%s'", name, uri); + } + if (d.rev && !d.ref) + throw Error("flake URI '%s' lacks a Git ref", uri); + data = d; + } + + else + throw Error("'%s' is not a valid flake reference", uri); +} + +std::string FlakeRef::to_string() const +{ + if (auto refData = std::get_if(&data)) { + return + "flake:" + refData->id + + (refData->ref ? "/" + *refData->ref : "") + + (refData->rev ? "/" + refData->rev->to_string(Base16, false) : ""); + } + + else if (auto refData = std::get_if(&data)) { + assert(!refData->ref || !refData->rev); + return + "github:" + refData->owner + "/" + refData->repo + + (refData->ref ? "/" + *refData->ref : "") + + (refData->rev ? "/" + refData->rev->to_string(Base16, false) : ""); + } + + else if (auto refData = std::get_if(&data)) { + assert(refData->ref || !refData->rev); + return + refData->uri + + (refData->ref ? "?ref=" + *refData->ref : "") + + (refData->rev ? "&rev=" + refData->rev->to_string(Base16, false) : ""); + } + + else abort(); +} + +} diff --git a/src/libexpr/primops/flakeref.hh b/src/libexpr/primops/flakeref.hh new file mode 100644 index 000000000..8559317e0 --- /dev/null +++ b/src/libexpr/primops/flakeref.hh @@ -0,0 +1,158 @@ +#include "types.hh" +#include "hash.hh" + +#include + +namespace nix { + +/* Flake references are a URI-like syntax to specify a flake. + + Examples: + + * (/rev-or-ref(/rev)?)? + + Look up a flake by ID in the flake lock file or in the flake + registry. These must specify an actual location for the flake + using the formats listed below. Note that in pure evaluation + mode, the flake registry is empty. + + Optionally, the rev or ref from the dereferenced flake can be + overriden. For example, + + nixpkgs/19.09 + + uses the "19.09" branch of the nixpkgs' flake GitHub repository, + while + + nixpkgs/98a2a5b5370c1e2092d09cb38b9dcff6d98a109f + + uses the specified revision. For Git (rather than GitHub) + repositories, both the rev and ref must be given, e.g. + + nixpkgs/19.09/98a2a5b5370c1e2092d09cb38b9dcff6d98a109f + + * github:/(/)? + + A repository on GitHub. These differ from Git references in that + they're downloaded in a efficient way (via the tarball mechanism) + and that they support downloading a specific revision without + specifying a branch. is either a commit hash ("rev") + or a branch or tag name ("ref"). The default is: "master" if none + is specified. Note that in pure evaluation mode, a commit hash + must be used. + + Flakes fetched in this manner expose "rev" and "lastModified" + attributes, but not "revCount". + + Examples: + + github:edolstra/dwarffs + github:edolstra/dwarffs/unstable + github:edolstra/dwarffs/41c0c1bf292ea3ac3858ff393b49ca1123dbd553 + + * https:///.git(\?attr(&attr)*)? + ssh:///.git(\?attr(&attr)*)? + git:///.git(\?attr(&attr)*)? + file:///(\?attr(&attr)*)? + + where 'attr' is one of: + rev= + ref= + + A Git repository fetched through https. Note that the path must + end in ".git". The default for "ref" is "master". + + Examples: + + https://example.org/my/repo.git + https://example.org/my/repo.git?ref=release-1.2.3 + https://example.org/my/repo.git?rev=e72daba8250068216d79d2aeef40d4d95aff6666 + + * /path.git(\?attr(&attr)*)? + + Like file://path.git, but if no "ref" or "rev" is specified, the + (possibly dirty) working tree will be used. Using a working tree + is not allowed in pure evaluation mode. + + Examples: + + /path/to/my/repo + /path/to/my/repo?ref=develop + /path/to/my/repo?rev=e72daba8250068216d79d2aeef40d4d95aff6666 + + * https:///.tar.xz(?hash=) + file:///.tar.xz(?hash=) + + A flake distributed as a tarball. In pure evaluation mode, an SRI + hash is mandatory. It exposes a "lastModified" attribute, being + the newest file inside the tarball. + + Example: + + https://releases.nixos.org/nixos/unstable/nixos-19.03pre167858.f2a1a4e93be/nixexprs.tar.xz + https://releases.nixos.org/nixos/unstable/nixos-19.03pre167858.f2a1a4e93be/nixexprs.tar.xz?hash=sha256-56bbc099995ea8581ead78f22832fee7dbcb0a0b6319293d8c2d0aef5379397c + + Note: currently, there can be only one flake per Git repository, and + it must be at top-level. In the future, we may want to add a field + (e.g. "dir=") to specify a subdirectory inside the repository. +*/ + +typedef std::string FlakeId; + +struct FlakeRef +{ + struct IsFlakeId + { + FlakeId id; + std::optional ref; + std::optional rev; + }; + + struct IsGitHub + { + std::string owner, repo; + std::optional ref; + std::optional rev; + }; + + struct IsGit + { + std::string uri; + std::optional ref; + std::optional rev; + }; + + // Git, Tarball + + std::variant data; + + // Parse a flake URI. + FlakeRef(const std::string & uri); + + /* Unify two flake references so that the resulting reference + combines the information from both. For example, + "nixpkgs/" and "github:NixOS/nixpkgs" unifies to + "nixpkgs/master". May throw an exception if the references are + incompatible (e.g. "nixpkgs/" and "nixpkgs/", + where hash1 != hash2). */ + FlakeRef(const FlakeRef & a, const FlakeRef & b); + + // FIXME: change to operator <<. + std::string to_string() const; + + /* Check whether this is a "direct" flake reference, that is, not + a flake ID, which requires a lookup in the flake registry. */ + bool isDirect() const + { + return !std::get_if(&data); + } + + /* Check whether this is an "immutable" flake reference, that is, + one that contains a commit hash or content hash. */ + bool isImmutable() const + { + abort(); // TODO + } +}; + +} diff --git a/src/nix/flake.cc b/src/nix/flake.cc index 98cd90c64..9b36c3cbd 100644 --- a/src/nix/flake.cc +++ b/src/nix/flake.cc @@ -1,3 +1,4 @@ +#include "primops/flake.hh" #include "command.hh" #include "common-args.hh" #include "shared.hh" @@ -27,7 +28,7 @@ struct CmdFlakeList : StoreCommand, MixEvalArgs stopProgressBar(); for (auto & entry : registry.entries) { - std::cout << entry.first << " " << entry.second.uri << "\n"; + std::cout << entry.first << " " << entry.second.ref.to_string() << "\n"; } } }; From ba05f29838b3bafe28c3ea491be711229298cb1b Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Tue, 12 Feb 2019 20:35:03 +0100 Subject: [PATCH 013/634] nix: Enable pure mode by default We want to encourage a brave new world of hermetic evaluation for source-level reproducibility, so flakes should not poke around in the filesystem outside of their explicit dependencies. Note that the default installation source remains impure in that it can refer to mutable flakes, so "nix build nixpkgs.hello" still works (and fetches the latest nixpkgs, unless it has been pinned by the user). A problem with pure evaluation is that builtins.currentSystem is unavailable. For the moment, I've hard-coded "x86_64-linux" in the nixpkgs flake. Eventually, "system" should be a flake function argument. --- corepkgs/default-installation-source.nix | 3 -- corepkgs/local.mk | 3 +- src/libexpr/eval.hh | 2 + src/libexpr/primops/flake.cc | 48 ++++++++++++------------ src/nix/installables.cc | 11 +++++- src/nix/main.cc | 1 + 6 files changed, 38 insertions(+), 30 deletions(-) delete mode 100644 corepkgs/default-installation-source.nix diff --git a/corepkgs/default-installation-source.nix b/corepkgs/default-installation-source.nix deleted file mode 100644 index 71ba04452..000000000 --- a/corepkgs/default-installation-source.nix +++ /dev/null @@ -1,3 +0,0 @@ -builtins.mapAttrs (flakeName: flakeInfo: - (getFlake flakeInfo.uri).${flakeName}.provides.packages or {}) - builtins.flakeRegistry diff --git a/corepkgs/local.mk b/corepkgs/local.mk index 41aaec63b..67306e50d 100644 --- a/corepkgs/local.mk +++ b/corepkgs/local.mk @@ -3,8 +3,7 @@ corepkgs_FILES = \ unpack-channel.nix \ derivation.nix \ fetchurl.nix \ - imported-drv-to-derivation.nix \ - default-installation-source.nix + imported-drv-to-derivation.nix $(foreach file,config.nix $(corepkgs_FILES),$(eval $(call install-data-in,$(d)/$(file),$(datadir)/nix/corepkgs))) diff --git a/src/libexpr/eval.hh b/src/libexpr/eval.hh index 35c01b97a..27c6c3da8 100644 --- a/src/libexpr/eval.hh +++ b/src/libexpr/eval.hh @@ -318,6 +318,8 @@ public: const FlakeRegistry & getFlakeRegistry(); + Value * makeFlakeRegistryValue(); + private: std::unique_ptr _flakeRegistry; std::once_flag _flakeRegistryInit; diff --git a/src/libexpr/primops/flake.cc b/src/libexpr/primops/flake.cc index 5e92b1da3..4d027558d 100644 --- a/src/libexpr/primops/flake.cc +++ b/src/libexpr/primops/flake.cc @@ -16,50 +16,49 @@ const FlakeRegistry & EvalState::getFlakeRegistry() { _flakeRegistry = std::make_unique(); - if (!evalSettings.pureEval) { - #if 0 - auto registryUri = "file:///home/eelco/Dev/gists/nix-flakes/registry.json"; + auto registryUri = "file:///home/eelco/Dev/gists/nix-flakes/registry.json"; - auto registryFile = getDownloader()->download(DownloadRequest(registryUri)); + auto registryFile = getDownloader()->download(DownloadRequest(registryUri)); #endif - auto registryFile = readFile(settings.nixDataDir + "/nix/flake-registry.json"); + auto registryFile = readFile(settings.nixDataDir + "/nix/flake-registry.json"); - auto json = nlohmann::json::parse(registryFile); + auto json = nlohmann::json::parse(registryFile); - auto version = json.value("version", 0); - if (version != 1) - throw Error("flake registry '%s' has unsupported version %d", registryFile, version); + auto version = json.value("version", 0); + if (version != 1) + throw Error("flake registry '%s' has unsupported version %d", registryFile, version); - auto flakes = json["flakes"]; - for (auto i = flakes.begin(); i != flakes.end(); ++i) { - FlakeRegistry::Entry entry{FlakeRef(i->value("uri", ""))}; - _flakeRegistry->entries.emplace(i.key(), entry); - } + auto flakes = json["flakes"]; + for (auto i = flakes.begin(); i != flakes.end(); ++i) { + FlakeRegistry::Entry entry{FlakeRef(i->value("uri", ""))}; + _flakeRegistry->entries.emplace(i.key(), entry); } }); return *_flakeRegistry; } -static void prim_flakeRegistry(EvalState & state, const Pos & pos, Value * * args, Value & v) +Value * EvalState::makeFlakeRegistryValue() { - auto registry = state.getFlakeRegistry(); + auto v = allocValue(); - state.mkAttrs(v, registry.entries.size()); + auto registry = getFlakeRegistry(); + + mkAttrs(*v, registry.entries.size()); for (auto & entry : registry.entries) { - auto vEntry = state.allocAttr(v, entry.first); - state.mkAttrs(*vEntry, 2); - mkString(*state.allocAttr(*vEntry, state.symbols.create("uri")), entry.second.ref.to_string()); + auto vEntry = allocAttr(*v, entry.first); + mkAttrs(*vEntry, 2); + mkString(*allocAttr(*vEntry, symbols.create("uri")), entry.second.ref.to_string()); vEntry->attrs->sort(); } - v.attrs->sort(); -} + v->attrs->sort(); -static RegisterPrimOp r1("__flakeRegistry", 0, prim_flakeRegistry); + return v; +} static FlakeRef lookupFlake(EvalState & state, const FlakeRef & flakeRef) { @@ -129,6 +128,9 @@ static Flake getFlake(EvalState & state, const FlakeRef & flakeRef) auto flakePath = fetchFlake(state, flakeRef); state.store->assertStorePath(flakePath); + if (state.allowedPaths) + state.allowedPaths->insert(flakePath); + Flake flake; Value vInfo; diff --git a/src/nix/installables.cc b/src/nix/installables.cc index 9b7b96c25..faad057a7 100644 --- a/src/nix/installables.cc +++ b/src/nix/installables.cc @@ -30,8 +30,15 @@ Value * SourceExprCommand::getSourceExpr(EvalState & state) if (file != "") state.evalFile(lookupFileArg(state, file), *vSourceExpr); - else - state.evalFile(lookupFileArg(state, ""), *vSourceExpr); + else { + auto fun = state.parseExprFromString( + "builtins.mapAttrs (flakeName: flakeInfo:" + " (getFlake flakeInfo.uri).${flakeName}.provides.packages or {})", "/"); + auto vFun = state.allocValue(); + state.eval(fun, *vFun); + auto vRegistry = state.makeFlakeRegistryValue(); + mkApp(*vSourceExpr, *vFun, *vRegistry); + } return vSourceExpr; } diff --git a/src/nix/main.cc b/src/nix/main.cc index 4b909736d..01b0866f2 100644 --- a/src/nix/main.cc +++ b/src/nix/main.cc @@ -97,6 +97,7 @@ void mainWrapped(int argc, char * * argv) verbosity = lvlError; settings.verboseBuild = false; + evalSettings.pureEval = true; NixArgs args; From 272b58220d17bc862f646dbc2cb38eea126001c0 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Tue, 12 Feb 2019 21:05:44 +0100 Subject: [PATCH 014/634] Enforce use of immutable flakes in pure mode ... plus a temporary hack to allow impure flakes at top-level for the default installation source. --- src/libexpr/primops/fetchGit.cc | 6 +++--- src/libexpr/primops/fetchMercurial.cc | 6 +++--- src/libexpr/primops/flake.cc | 30 ++++++++++++++++++++------- src/libexpr/primops/flakeref.cc | 14 +++++++++++++ src/libexpr/primops/flakeref.hh | 5 +---- src/nix/installables.cc | 4 +++- 6 files changed, 46 insertions(+), 19 deletions(-) diff --git a/src/libexpr/primops/fetchGit.cc b/src/libexpr/primops/fetchGit.cc index 62e9dfc0e..bbf13c87b 100644 --- a/src/libexpr/primops/fetchGit.cc +++ b/src/libexpr/primops/fetchGit.cc @@ -22,9 +22,6 @@ GitInfo exportGit(ref store, const std::string & uri, std::optional ref, std::string rev, const std::string & name) { - if (evalSettings.pureEval && rev == "") - throw Error("in pure evaluation mode, 'fetchGit' requires a Git revision"); - if (!ref && rev == "" && hasPrefix(uri, "/") && pathExists(uri + "/.git")) { bool clean = true; @@ -218,6 +215,9 @@ static void prim_fetchGit(EvalState & state, const Pos & pos, Value * * args, Va // whitelist. Ah well. state.checkURI(url); + if (evalSettings.pureEval && rev == "") + throw Error("in pure evaluation mode, 'fetchGit' requires a Git revision"); + auto gitInfo = exportGit(state.store, url, ref, rev, name); state.mkAttrs(v, 8); diff --git a/src/libexpr/primops/fetchMercurial.cc b/src/libexpr/primops/fetchMercurial.cc index 66f49f374..cfe1bd871 100644 --- a/src/libexpr/primops/fetchMercurial.cc +++ b/src/libexpr/primops/fetchMercurial.cc @@ -27,9 +27,6 @@ std::regex commitHashRegex("^[0-9a-fA-F]{40}$"); HgInfo exportMercurial(ref store, const std::string & uri, std::string rev, const std::string & name) { - if (evalSettings.pureEval && rev == "") - throw Error("in pure evaluation mode, 'fetchMercurial' requires a Mercurial revision"); - if (rev == "" && hasPrefix(uri, "/") && pathExists(uri + "/.hg")) { bool clean = runProgram("hg", true, { "status", "-R", uri, "--modified", "--added", "--removed" }) == ""; @@ -203,6 +200,9 @@ static void prim_fetchMercurial(EvalState & state, const Pos & pos, Value * * ar // whitelist. Ah well. state.checkURI(url); + if (evalSettings.pureEval && rev == "") + throw Error("in pure evaluation mode, 'fetchMercurial' requires a Mercurial revision"); + auto hgInfo = exportMercurial(state.store, url, rev, name); state.mkAttrs(v, 8); diff --git a/src/libexpr/primops/flake.cc b/src/libexpr/primops/flake.cc index 4d027558d..1e70ccbd6 100644 --- a/src/libexpr/primops/flake.cc +++ b/src/libexpr/primops/flake.cc @@ -162,16 +162,17 @@ static Flake getFlake(EvalState & state, const FlakeRef & flakeRef) return flake; } -/* Given a set of flake references, recursively fetch them and their +/* Given a flake reference, recursively fetch it and its dependencies. */ -static std::map resolveFlakes(EvalState & state, const std::vector & flakeRefs) +static std::map resolveFlake(EvalState & state, + const FlakeRef & topRef, bool impureTopRef) { std::map done; - std::queue todo; - for (auto & i : flakeRefs) todo.push(i); + std::queue> todo; + todo.push({topRef, impureTopRef}); while (!todo.empty()) { - auto flakeRef = todo.front(); + auto [flakeRef, impureRef] = todo.front(); todo.pop(); if (auto refData = std::get_if(&flakeRef.data)) { @@ -179,12 +180,15 @@ static std::map resolveFlakes(EvalState & state, const std::vect flakeRef = lookupFlake(state, flakeRef); } + if (evalSettings.pureEval && !flakeRef.isImmutable() && !impureRef) + throw Error("mutable flake '%s' is not allowed in pure mode; use --no-pure-eval to disable", flakeRef.to_string()); + auto flake = getFlake(state, flakeRef); if (done.count(flake.id)) continue; for (auto & require : flake.requires) - todo.push(require); + todo.push({require, false}); done.emplace(flake.id, flake); } @@ -194,9 +198,19 @@ static std::map resolveFlakes(EvalState & state, const std::vect static void prim_getFlake(EvalState & state, const Pos & pos, Value * * args, Value & v) { - auto flakeUri = FlakeRef(state.forceStringNoCtx(*args[0], pos)); + auto flakeUri = state.forceStringNoCtx(*args[0], pos); - auto flakes = resolveFlakes(state, {flakeUri}); + // FIXME: temporary hack to make the default installation source + // work. + bool impure = false; + if (hasPrefix(flakeUri, "impure:")) { + flakeUri = std::string(flakeUri, 7); + impure = true; + } + + auto flakeRef = FlakeRef(flakeUri); + + auto flakes = resolveFlake(state, flakeUri, impure); auto vResult = state.allocValue(); diff --git a/src/libexpr/primops/flakeref.cc b/src/libexpr/primops/flakeref.cc index 447b56822..639313f21 100644 --- a/src/libexpr/primops/flakeref.cc +++ b/src/libexpr/primops/flakeref.cc @@ -136,4 +136,18 @@ std::string FlakeRef::to_string() const else abort(); } +bool FlakeRef::isImmutable() const +{ + if (auto refData = std::get_if(&data)) + return (bool) refData->rev; + + else if (auto refData = std::get_if(&data)) + return (bool) refData->rev; + + else if (auto refData = std::get_if(&data)) + return (bool) refData->rev; + + else abort(); +} + } diff --git a/src/libexpr/primops/flakeref.hh b/src/libexpr/primops/flakeref.hh index 8559317e0..ad0cf8630 100644 --- a/src/libexpr/primops/flakeref.hh +++ b/src/libexpr/primops/flakeref.hh @@ -149,10 +149,7 @@ struct FlakeRef /* Check whether this is an "immutable" flake reference, that is, one that contains a commit hash or content hash. */ - bool isImmutable() const - { - abort(); // TODO - } + bool isImmutable() const; }; } diff --git a/src/nix/installables.cc b/src/nix/installables.cc index faad057a7..b4584f168 100644 --- a/src/nix/installables.cc +++ b/src/nix/installables.cc @@ -31,9 +31,11 @@ Value * SourceExprCommand::getSourceExpr(EvalState & state) if (file != "") state.evalFile(lookupFileArg(state, file), *vSourceExpr); else { + // FIXME: remove "impure" hack, call some non-user-accessible + // variant of getFlake instead. auto fun = state.parseExprFromString( "builtins.mapAttrs (flakeName: flakeInfo:" - " (getFlake flakeInfo.uri).${flakeName}.provides.packages or {})", "/"); + " (getFlake (\"impure:\" + flakeInfo.uri)).${flakeName}.provides.packages or {})", "/"); auto vFun = state.allocValue(); state.eval(fun, *vFun); auto vRegistry = state.makeFlakeRegistryValue(); From beab05851bfa895fe538f15f8bbb2da3a20db638 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Tue, 12 Feb 2019 21:55:43 +0100 Subject: [PATCH 015/634] nix: Add --flake flag This allows using an arbitrary "provides" attribute from the specified flake. For example: nix build --flake nixpkgs packages.hello (Maybe provides.packages should be used for consistency...) --- src/libexpr/eval.hh | 2 -- src/libexpr/primops/flake.cc | 52 ++++++++++++++++++++++++------------ src/libexpr/primops/flake.hh | 7 +++++ src/nix/command.hh | 3 ++- src/nix/installables.cc | 26 ++++++++++++++---- 5 files changed, 65 insertions(+), 25 deletions(-) diff --git a/src/libexpr/eval.hh b/src/libexpr/eval.hh index 27c6c3da8..35c01b97a 100644 --- a/src/libexpr/eval.hh +++ b/src/libexpr/eval.hh @@ -318,8 +318,6 @@ public: const FlakeRegistry & getFlakeRegistry(); - Value * makeFlakeRegistryValue(); - private: std::unique_ptr _flakeRegistry; std::once_flag _flakeRegistryInit; diff --git a/src/libexpr/primops/flake.cc b/src/libexpr/primops/flake.cc index 1e70ccbd6..3bd62a50b 100644 --- a/src/libexpr/primops/flake.cc +++ b/src/libexpr/primops/flake.cc @@ -40,18 +40,18 @@ const FlakeRegistry & EvalState::getFlakeRegistry() return *_flakeRegistry; } -Value * EvalState::makeFlakeRegistryValue() +Value * makeFlakeRegistryValue(EvalState & state) { - auto v = allocValue(); + auto v = state.allocValue(); - auto registry = getFlakeRegistry(); + auto registry = state.getFlakeRegistry(); - mkAttrs(*v, registry.entries.size()); + state.mkAttrs(*v, registry.entries.size()); for (auto & entry : registry.entries) { - auto vEntry = allocAttr(*v, entry.first); - mkAttrs(*vEntry, 2); - mkString(*allocAttr(*vEntry, symbols.create("uri")), entry.second.ref.to_string()); + auto vEntry = state.allocAttr(*v, entry.first); + state.mkAttrs(*vEntry, 2); + mkString(*state.allocAttr(*vEntry, state.symbols.create("uri")), entry.second.ref.to_string()); vEntry->attrs->sort(); } @@ -163,16 +163,19 @@ static Flake getFlake(EvalState & state, const FlakeRef & flakeRef) } /* Given a flake reference, recursively fetch it and its - dependencies. */ -static std::map resolveFlake(EvalState & state, + dependencies. + FIXME: this should return a graph of flakes. +*/ +static std::tuple> resolveFlake(EvalState & state, const FlakeRef & topRef, bool impureTopRef) { std::map done; std::queue> todo; - todo.push({topRef, impureTopRef}); + std::optional topFlakeId; /// FIXME: ambiguous + todo.push({topRef, true}); while (!todo.empty()) { - auto [flakeRef, impureRef] = todo.front(); + auto [flakeRef, toplevel] = todo.front(); todo.pop(); if (auto refData = std::get_if(&flakeRef.data)) { @@ -180,26 +183,27 @@ static std::map resolveFlake(EvalState & state, flakeRef = lookupFlake(state, flakeRef); } - if (evalSettings.pureEval && !flakeRef.isImmutable() && !impureRef) + if (evalSettings.pureEval && !flakeRef.isImmutable() && (!toplevel || !impureTopRef)) throw Error("mutable flake '%s' is not allowed in pure mode; use --no-pure-eval to disable", flakeRef.to_string()); auto flake = getFlake(state, flakeRef); if (done.count(flake.id)) continue; + if (toplevel) topFlakeId = flake.id; + for (auto & require : flake.requires) todo.push({require, false}); done.emplace(flake.id, flake); } - return done; + assert(topFlakeId); + return {*topFlakeId, done}; } -static void prim_getFlake(EvalState & state, const Pos & pos, Value * * args, Value & v) +Value * makeFlakeValue(EvalState & state, std::string flakeUri, Value & v) { - auto flakeUri = state.forceStringNoCtx(*args[0], pos); - // FIXME: temporary hack to make the default installation source // work. bool impure = false; @@ -210,14 +214,20 @@ static void prim_getFlake(EvalState & state, const Pos & pos, Value * * args, Va auto flakeRef = FlakeRef(flakeUri); - auto flakes = resolveFlake(state, flakeUri, impure); + auto [topFlakeId, flakes] = resolveFlake(state, flakeUri, impure); + + // FIXME: we should call each flake with only its dependencies + // (rather than the closure of the top-level flake). auto vResult = state.allocValue(); state.mkAttrs(*vResult, flakes.size()); + Value * vTop = 0; + for (auto & flake : flakes) { auto vFlake = state.allocAttr(*vResult, flake.second.id); + if (topFlakeId == flake.second.id) vTop = vFlake; state.mkAttrs(*vFlake, 2); mkString(*state.allocAttr(*vFlake, state.sDescription), flake.second.description); auto vProvides = state.allocAttr(*vFlake, state.symbols.create("provides")); @@ -228,6 +238,14 @@ static void prim_getFlake(EvalState & state, const Pos & pos, Value * * args, Va vResult->attrs->sort(); v = *vResult; + + assert(vTop); + return vTop; +} + +static void prim_getFlake(EvalState & state, const Pos & pos, Value * * args, Value & v) +{ + makeFlakeValue(state, state.forceStringNoCtx(*args[0], pos), v); } static RegisterPrimOp r2("getFlake", 1, prim_getFlake); diff --git a/src/libexpr/primops/flake.hh b/src/libexpr/primops/flake.hh index 6be6e99d2..e504dc196 100644 --- a/src/libexpr/primops/flake.hh +++ b/src/libexpr/primops/flake.hh @@ -5,6 +5,9 @@ namespace nix { +struct Value; +class EvalState; + struct FlakeRegistry { struct Entry @@ -14,4 +17,8 @@ struct FlakeRegistry std::map entries; }; +Value * makeFlakeRegistryValue(EvalState & state); + +Value * makeFlakeValue(EvalState & state, std::string flakeUri, Value & v); + } diff --git a/src/nix/command.hh b/src/nix/command.hh index 04183c7ed..a08347945 100644 --- a/src/nix/command.hh +++ b/src/nix/command.hh @@ -53,7 +53,8 @@ struct Installable struct SourceExprCommand : virtual Args, StoreCommand, MixEvalArgs { - Path file; + std::optional file; + std::optional flakeUri; SourceExprCommand(); diff --git a/src/nix/installables.cc b/src/nix/installables.cc index b4584f168..0453c72c2 100644 --- a/src/nix/installables.cc +++ b/src/nix/installables.cc @@ -7,6 +7,7 @@ #include "get-drvs.hh" #include "store-api.hh" #include "shared.hh" +#include "primops/flake.hh" #include @@ -18,8 +19,15 @@ SourceExprCommand::SourceExprCommand() .shortName('f') .longName("file") .label("file") - .description("evaluate FILE rather than the default") + .description("evaluate FILE rather than use the default installation source") .dest(&file); + + mkFlag() + .shortName('F') + .longName("flake") + .label("flake") + .description("evaluate FLAKE rather than use the default installation source") + .dest(&flakeUri); } Value * SourceExprCommand::getSourceExpr(EvalState & state) @@ -28,9 +36,17 @@ Value * SourceExprCommand::getSourceExpr(EvalState & state) vSourceExpr = state.allocValue(); - if (file != "") - state.evalFile(lookupFileArg(state, file), *vSourceExpr); - else { + if (file && flakeUri) + throw Error("cannot use both --file and --flake"); + + if (file) + state.evalFile(lookupFileArg(state, *file), *vSourceExpr); + else if (flakeUri) { + // FIXME: handle flakeUri being a relative path + auto vTemp = state.allocValue(); + auto vFlake = *makeFlakeValue(state, "impure:" + *flakeUri, *vTemp); + *vSourceExpr = *((*vFlake.attrs->get(state.symbols.create("provides")))->value); + } else { // FIXME: remove "impure" hack, call some non-user-accessible // variant of getFlake instead. auto fun = state.parseExprFromString( @@ -38,7 +54,7 @@ Value * SourceExprCommand::getSourceExpr(EvalState & state) " (getFlake (\"impure:\" + flakeInfo.uri)).${flakeName}.provides.packages or {})", "/"); auto vFun = state.allocValue(); state.eval(fun, *vFun); - auto vRegistry = state.makeFlakeRegistryValue(); + auto vRegistry = makeFlakeRegistryValue(state); mkApp(*vSourceExpr, *vFun, *vRegistry); } From e38ec77de8077658d2f75fd6ff8b0f0f06babda9 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Tue, 12 Feb 2019 22:06:19 +0100 Subject: [PATCH 016/634] Interpret all file:// URIs as Git repositories --- src/libexpr/primops/flakeref.cc | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/libexpr/primops/flakeref.cc b/src/libexpr/primops/flakeref.cc index 639313f21..a2700f102 100644 --- a/src/libexpr/primops/flakeref.cc +++ b/src/libexpr/primops/flakeref.cc @@ -79,7 +79,9 @@ FlakeRef::FlakeRef(const std::string & uri) data = d; } - else if (std::regex_match(uri.c_str(), match, uriRegex) && hasSuffix(match[4], ".git")) { + else if (std::regex_match(uri.c_str(), match, uriRegex) + && (match[2] == "file" || hasSuffix(match[4], ".git"))) + { IsGit d; d.uri = match[1]; for (auto & param : tokenizeString(match[5], "&")) { From 6e9182fbc2544e5252366321da1c9406571d01e4 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Tue, 12 Feb 2019 22:43:22 +0100 Subject: [PATCH 017/634] Add basic flake lock file support --- src/libexpr/primops/flake.cc | 117 +++++++++++++++++++++++------------ 1 file changed, 76 insertions(+), 41 deletions(-) diff --git a/src/libexpr/primops/flake.cc b/src/libexpr/primops/flake.cc index 3bd62a50b..a788e935e 100644 --- a/src/libexpr/primops/flake.cc +++ b/src/libexpr/primops/flake.cc @@ -10,31 +10,40 @@ namespace nix { +/* Read the registry or a lock file. (Currently they have an identical + format. */ +static std::unique_ptr readRegistry(const Path & path) +{ + auto registry = std::make_unique(); + + auto json = nlohmann::json::parse(readFile(path)); + + auto version = json.value("version", 0); + if (version != 1) + throw Error("flake registry '%s' has unsupported version %d", path, version); + + auto flakes = json["flakes"]; + for (auto i = flakes.begin(); i != flakes.end(); ++i) { + FlakeRegistry::Entry entry{FlakeRef(i->value("uri", ""))}; + registry->entries.emplace(i.key(), entry); + } + + return registry; +} + const FlakeRegistry & EvalState::getFlakeRegistry() { std::call_once(_flakeRegistryInit, [&]() { - _flakeRegistry = std::make_unique(); - #if 0 auto registryUri = "file:///home/eelco/Dev/gists/nix-flakes/registry.json"; auto registryFile = getDownloader()->download(DownloadRequest(registryUri)); #endif - auto registryFile = readFile(settings.nixDataDir + "/nix/flake-registry.json"); + auto registryFile = settings.nixDataDir + "/nix/flake-registry.json"; - auto json = nlohmann::json::parse(registryFile); - - auto version = json.value("version", 0); - if (version != 1) - throw Error("flake registry '%s' has unsupported version %d", registryFile, version); - - auto flakes = json["flakes"]; - for (auto i = flakes.begin(); i != flakes.end(); ++i) { - FlakeRegistry::Entry entry{FlakeRef(i->value("uri", ""))}; - _flakeRegistry->entries.emplace(i.key(), entry); - } + _flakeRegistry = readRegistry(registryFile); }); return *_flakeRegistry; @@ -60,33 +69,24 @@ Value * makeFlakeRegistryValue(EvalState & state) return v; } -static FlakeRef lookupFlake(EvalState & state, const FlakeRef & flakeRef) +static FlakeRef lookupFlake(EvalState & state, const FlakeRef & flakeRef, + std::vector registries) { if (auto refData = std::get_if(&flakeRef.data)) { - auto registry = state.getFlakeRegistry(); - auto i = registry.entries.find(refData->id); - if (i == registry.entries.end()) - throw Error("cannot find flake '%s' in the flake registry", refData->id); - auto newRef = FlakeRef(i->second.ref); - if (!newRef.isDirect()) - throw Error("found indirect flake URI '%s' in the flake registry", i->second.ref.to_string()); - return newRef; + for (auto registry : registries) { + auto i = registry->entries.find(refData->id); + if (i != registry->entries.end()) { + auto newRef = FlakeRef(i->second.ref); + if (!newRef.isDirect()) + throw Error("found indirect flake URI '%s' in the flake registry", i->second.ref.to_string()); + return newRef; + } + } + throw Error("cannot find flake '%s' in the flake registry or in the flake lock file", refData->id); } else return flakeRef; } -struct Flake -{ - FlakeId id; - std::string description; - Path path; - std::set requires; - Value * vProvides; // FIXME: gc - // commit hash - // date - // content hash -}; - static Path fetchFlake(EvalState & state, const FlakeRef & flakeRef) { assert(flakeRef.isDirect()); @@ -123,6 +123,19 @@ static Path fetchFlake(EvalState & state, const FlakeRef & flakeRef) else abort(); } +struct Flake +{ + FlakeId id; + std::string description; + Path path; + std::vector requires; + std::unique_ptr lockFile; + Value * vProvides; // FIXME: gc + // commit hash + // date + // content hash +}; + static Flake getFlake(EvalState & state, const FlakeRef & flakeRef) { auto flakePath = fetchFlake(state, flakeRef); @@ -134,7 +147,7 @@ static Flake getFlake(EvalState & state, const FlakeRef & flakeRef) Flake flake; Value vInfo; - state.evalFile(flakePath + "/flake.nix", vInfo); + state.evalFile(flakePath + "/flake.nix", vInfo); // FIXME: symlink attack state.forceAttrs(vInfo); @@ -149,8 +162,8 @@ static Flake getFlake(EvalState & state, const FlakeRef & flakeRef) if (auto requires = vInfo.attrs->get(state.symbols.create("requires"))) { state.forceList(*(**requires).value, *(**requires).pos); for (unsigned int n = 0; n < (**requires).value->listSize(); ++n) - flake.requires.insert(state.forceStringNoCtx( - *(**requires).value->listElems()[n], *(**requires).pos)); + flake.requires.push_back(FlakeRef(state.forceStringNoCtx( + *(**requires).value->listElems()[n], *(**requires).pos))); } if (auto provides = vInfo.attrs->get(state.symbols.create("provides"))) { @@ -159,6 +172,16 @@ static Flake getFlake(EvalState & state, const FlakeRef & flakeRef) } else throw Error("flake lacks attribute 'provides'"); + auto lockFile = flakePath + "/flake.lock"; // FIXME: symlink attack + + if (pathExists(lockFile)) { + flake.lockFile = readRegistry(lockFile); + for (auto & entry : flake.lockFile->entries) + if (!entry.second.ref.isImmutable()) + throw Error("flake lock file '%s' contains mutable entry '%s'", + lockFile, entry.second.ref.to_string()); + } + return flake; } @@ -174,13 +197,19 @@ static std::tuple> resolveFlake(EvalState & st std::optional topFlakeId; /// FIXME: ambiguous todo.push({topRef, true}); + std::vector registries; + FlakeRegistry localRegistry; + registries.push_back(&localRegistry); + if (!evalSettings.pureEval) + registries.push_back(&state.getFlakeRegistry()); + while (!todo.empty()) { auto [flakeRef, toplevel] = todo.front(); todo.pop(); if (auto refData = std::get_if(&flakeRef.data)) { if (done.count(refData->id)) continue; // optimization - flakeRef = lookupFlake(state, flakeRef); + flakeRef = lookupFlake(state, flakeRef, registries); } if (evalSettings.pureEval && !flakeRef.isImmutable() && (!toplevel || !impureTopRef)) @@ -195,11 +224,17 @@ static std::tuple> resolveFlake(EvalState & st for (auto & require : flake.requires) todo.push({require, false}); - done.emplace(flake.id, flake); + if (flake.lockFile) + for (auto & entry : flake.lockFile->entries) { + if (localRegistry.entries.count(entry.first)) continue; + localRegistry.entries.emplace(entry.first, entry.second); + } + + done.emplace(flake.id, std::move(flake)); } assert(topFlakeId); - return {*topFlakeId, done}; + return {*topFlakeId, std::move(done)}; } Value * makeFlakeValue(EvalState & state, std::string flakeUri, Value & v) From 529add316c5356a8060c35f987643b7bf5c796dc Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Mon, 25 Feb 2019 23:20:50 +0800 Subject: [PATCH 018/634] downloadCached: Return ETag This allows fetchFlake() to return the Git revision of a GitHub archive. --- src/libexpr/common-eval-args.cc | 2 +- src/libexpr/parser.y | 2 +- src/libexpr/primops.cc | 2 +- src/libexpr/primops/flake.cc | 49 +++++++++++++++++++++++---------- src/libstore/download.cc | 17 +++++++++--- src/libstore/download.hh | 12 ++++++-- src/nix-channel/nix-channel.cc | 6 ++-- 7 files changed, 64 insertions(+), 26 deletions(-) diff --git a/src/libexpr/common-eval-args.cc b/src/libexpr/common-eval-args.cc index 3e0c78f28..37c74a94b 100644 --- a/src/libexpr/common-eval-args.cc +++ b/src/libexpr/common-eval-args.cc @@ -46,7 +46,7 @@ Bindings * MixEvalArgs::getAutoArgs(EvalState & state) Path lookupFileArg(EvalState & state, string s) { if (isUri(s)) - return getDownloader()->downloadCached(state.store, s, true); + return getDownloader()->downloadCached(state.store, s, true).path; else if (s.size() > 2 && s.at(0) == '<' && s.at(s.size() - 1) == '>') { Path p = s.substr(1, s.size() - 2); return state.findFile(p); diff --git a/src/libexpr/parser.y b/src/libexpr/parser.y index cbd576d7d..0f1ac05d6 100644 --- a/src/libexpr/parser.y +++ b/src/libexpr/parser.y @@ -657,7 +657,7 @@ std::pair EvalState::resolveSearchPathElem(const SearchPathEl if (isUri(elem.second)) { try { - res = { true, getDownloader()->downloadCached(store, elem.second, true) }; + res = { true, getDownloader()->downloadCached(store, elem.second, true).path }; } catch (DownloadError & e) { printError(format("warning: Nix search path entry '%1%' cannot be downloaded, ignoring") % elem.second); res = { false, "" }; diff --git a/src/libexpr/primops.cc b/src/libexpr/primops.cc index f787ad96b..7e3dd23ed 100644 --- a/src/libexpr/primops.cc +++ b/src/libexpr/primops.cc @@ -2112,7 +2112,7 @@ void fetch(EvalState & state, const Pos & pos, Value * * args, Value & v, if (evalSettings.pureEval && !expectedHash) throw Error("in pure evaluation mode, '%s' requires a 'sha256' argument", who); - Path res = getDownloader()->downloadCached(state.store, url, unpack, name, expectedHash); + Path res = getDownloader()->downloadCached(state.store, url, unpack, name, expectedHash).path; if (state.allowedPaths) state.allowedPaths->insert(res); diff --git a/src/libexpr/primops/flake.cc b/src/libexpr/primops/flake.cc index a788e935e..f9a1a3d6a 100644 --- a/src/libexpr/primops/flake.cc +++ b/src/libexpr/primops/flake.cc @@ -87,7 +87,13 @@ static FlakeRef lookupFlake(EvalState & state, const FlakeRef & flakeRef, return flakeRef; } -static Path fetchFlake(EvalState & state, const FlakeRef & flakeRef) +struct FlakeSourceInfo +{ + Path storePath; + std::optional rev; +}; + +static FlakeSourceInfo fetchFlake(EvalState & state, const FlakeRef & flakeRef) { assert(flakeRef.isDirect()); @@ -99,25 +105,36 @@ static Path fetchFlake(EvalState & state, const FlakeRef & flakeRef) // FIXME: support passing auth tokens for private repos. - auto storePath = getDownloader()->downloadCached(state.store, - fmt("https://api.github.com/repos/%s/%s/tarball/%s", - refData->owner, refData->repo, - refData->rev - ? refData->rev->to_string(Base16, false) - : refData->ref - ? *refData->ref - : "master"), - true, "source"); + auto url = fmt("https://api.github.com/repos/%s/%s/tarball/%s", + refData->owner, refData->repo, + refData->rev + ? refData->rev->to_string(Base16, false) + : refData->ref + ? *refData->ref + : "master"); - // FIXME: extract revision hash from ETag. + auto result = getDownloader()->downloadCached(state.store, url, true, "source"); - return storePath; + if (!result.etag) + throw Error("did not receive an ETag header from '%s'", url); + + if (result.etag->size() != 42 || (*result.etag)[0] != '"' || (*result.etag)[41] != '"') + throw Error("ETag header '%s' from '%s' is not a Git revision", *result.etag, url); + + FlakeSourceInfo info; + info.storePath = result.path; + info.rev = Hash(std::string(*result.etag, 1, result.etag->size() - 2), htSHA1); + + return info; } else if (auto refData = std::get_if(&flakeRef.data)) { auto gitInfo = exportGit(state.store, refData->uri, refData->ref, refData->rev ? refData->rev->to_string(Base16, false) : "", "source"); - return gitInfo.storePath; + FlakeSourceInfo info; + info.storePath = gitInfo.storePath; + info.rev = Hash(gitInfo.rev, htSHA1); + return info; } else abort(); @@ -138,7 +155,11 @@ struct Flake static Flake getFlake(EvalState & state, const FlakeRef & flakeRef) { - auto flakePath = fetchFlake(state, flakeRef); + auto sourceInfo = fetchFlake(state, flakeRef); + debug("got flake source '%s' with revision %s", + sourceInfo.storePath, sourceInfo.rev.value_or(Hash(htSHA1)).to_string(Base16, false)); + + auto flakePath = sourceInfo.storePath; state.store->assertStorePath(flakePath); if (state.allowedPaths) diff --git a/src/libstore/download.cc b/src/libstore/download.cc index 467f570bb..360d48b09 100644 --- a/src/libstore/download.cc +++ b/src/libstore/download.cc @@ -771,7 +771,7 @@ void Downloader::download(DownloadRequest && request, Sink & sink) } } -Path Downloader::downloadCached(ref store, const string & url_, bool unpack, string name, const Hash & expectedHash, string * effectiveUrl, int ttl) +CachedDownloadResult Downloader::downloadCached(ref store, const string & url_, bool unpack, string name, const Hash & expectedHash, string * effectiveUrl, int ttl) { auto url = resolveUri(url_); @@ -783,8 +783,11 @@ Path Downloader::downloadCached(ref store, const string & url_, bool unpa Path expectedStorePath; if (expectedHash) { expectedStorePath = store->makeFixedOutputPath(unpack, expectedHash, name); - if (store->isValidPath(expectedStorePath)) - return store->toRealPath(expectedStorePath); + if (store->isValidPath(expectedStorePath)) { + CachedDownloadResult result; + result.path = store->toRealPath(expectedStorePath); + return result; + } } Path cacheDir = getCacheDir() + "/nix/tarballs"; @@ -803,6 +806,8 @@ Path Downloader::downloadCached(ref store, const string & url_, bool unpa bool skip = false; + CachedDownloadResult result; + if (pathExists(fileLink) && pathExists(dataFile)) { storePath = readLink(fileLink); store->addTempRoot(storePath); @@ -814,6 +819,7 @@ Path Downloader::downloadCached(ref store, const string & url_, bool unpa skip = true; if (effectiveUrl) *effectiveUrl = url_; + result.etag = ss[1]; } else if (!ss[1].empty()) { debug(format("verifying previous ETag '%1%'") % ss[1]); expectedETag = ss[1]; @@ -831,6 +837,7 @@ Path Downloader::downloadCached(ref store, const string & url_, bool unpa auto res = download(request); if (effectiveUrl) *effectiveUrl = res.effectiveUrl; + result.etag = res.etag; if (!res.cached) { ValidPathInfo info; @@ -852,6 +859,7 @@ Path Downloader::downloadCached(ref store, const string & url_, bool unpa } catch (DownloadError & e) { if (storePath.empty()) throw; printError(format("warning: %1%; using cached result") % e.msg()); + result.etag = expectedETag; } } @@ -885,7 +893,8 @@ Path Downloader::downloadCached(ref store, const string & url_, bool unpa url, expectedHash.to_string(), gotHash.to_string()); } - return store->toRealPath(storePath); + result.path = store->toRealPath(storePath); + return result; } diff --git a/src/libstore/download.hh b/src/libstore/download.hh index f0228f7d0..8acfe4e1a 100644 --- a/src/libstore/download.hh +++ b/src/libstore/download.hh @@ -41,6 +41,12 @@ struct DownloadResult uint64_t bodySize = 0; }; +struct CachedDownloadResult +{ + Path path; + std::optional etag; +}; + class Store; struct Downloader @@ -64,8 +70,10 @@ struct Downloader and is more recent than ‘tarball-ttl’ seconds. Otherwise, use the recorded ETag to verify if the server has a more recent version, and if so, download it to the Nix store. */ - Path downloadCached(ref store, const string & uri, bool unpack, string name = "", - const Hash & expectedHash = Hash(), string * effectiveUri = nullptr, int ttl = settings.tarballTtl); + CachedDownloadResult downloadCached( + ref store, const string & uri, bool unpack, string name = "", + const Hash & expectedHash = Hash(), string * effectiveUri = nullptr, + int ttl = settings.tarballTtl); enum Error { NotFound, Forbidden, Misc, Transient, Interrupted }; }; diff --git a/src/nix-channel/nix-channel.cc b/src/nix-channel/nix-channel.cc index 8b66cc7e3..7b23088a2 100755 --- a/src/nix-channel/nix-channel.cc +++ b/src/nix-channel/nix-channel.cc @@ -88,7 +88,7 @@ static void update(const StringSet & channelNames) // definition from a consistent location if the redirect changes mid-download. std::string effectiveUrl; auto dl = getDownloader(); - auto filename = dl->downloadCached(store, url, false, "", Hash(), &effectiveUrl, 0); + auto filename = dl->downloadCached(store, url, false, "", Hash(), &effectiveUrl, 0).path; url = chomp(std::move(effectiveUrl)); // If the URL contains a version number, append it to the name @@ -123,10 +123,10 @@ static void update(const StringSet & channelNames) // Download the channel tarball. auto fullURL = url + "/nixexprs.tar.xz"; try { - filename = dl->downloadCached(store, fullURL, false); + filename = dl->downloadCached(store, fullURL, false).path; } catch (DownloadError & e) { fullURL = url + "/nixexprs.tar.bz2"; - filename = dl->downloadCached(store, fullURL, false); + filename = dl->downloadCached(store, fullURL, false).path; } chomp(filename); } From d342de02b9f7ee07f22e1986af8d5c8eb325d8ba Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Mon, 25 Feb 2019 23:23:45 +0800 Subject: [PATCH 019/634] fetchFlake: Use infinite TTL when the revision is specified --- src/libexpr/primops/flake.cc | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/libexpr/primops/flake.cc b/src/libexpr/primops/flake.cc index f9a1a3d6a..3998f9ef9 100644 --- a/src/libexpr/primops/flake.cc +++ b/src/libexpr/primops/flake.cc @@ -113,7 +113,8 @@ static FlakeSourceInfo fetchFlake(EvalState & state, const FlakeRef & flakeRef) ? *refData->ref : "master"); - auto result = getDownloader()->downloadCached(state.store, url, true, "source"); + auto result = getDownloader()->downloadCached(state.store, url, true, "source", + Hash(), nullptr, refData->rev ? 1000000000 : settings.tarballTtl); if (!result.etag) throw Error("did not receive an ETag header from '%s'", url); From cfb6ab80cea7f0ed3f525e8120f2e569f963fa0e Mon Sep 17 00:00:00 2001 From: Nick Van den Broeck Date: Thu, 21 Feb 2019 06:53:01 +0100 Subject: [PATCH 020/634] Implemented "nix flake info" --- src/libexpr/primops/flake.cc | 13 ------------- src/libexpr/primops/flake.hh | 14 ++++++++++++++ src/nix/command.hh | 11 +++++++++++ src/nix/flake.cc | 23 ++++++++++++++++++++++- 4 files changed, 47 insertions(+), 14 deletions(-) diff --git a/src/libexpr/primops/flake.cc b/src/libexpr/primops/flake.cc index 3998f9ef9..9f137a0b9 100644 --- a/src/libexpr/primops/flake.cc +++ b/src/libexpr/primops/flake.cc @@ -141,19 +141,6 @@ static FlakeSourceInfo fetchFlake(EvalState & state, const FlakeRef & flakeRef) else abort(); } -struct Flake -{ - FlakeId id; - std::string description; - Path path; - std::vector requires; - std::unique_ptr lockFile; - Value * vProvides; // FIXME: gc - // commit hash - // date - // content hash -}; - static Flake getFlake(EvalState & state, const FlakeRef & flakeRef) { auto sourceInfo = fetchFlake(state, flakeRef); diff --git a/src/libexpr/primops/flake.hh b/src/libexpr/primops/flake.hh index e504dc196..af28bc5b0 100644 --- a/src/libexpr/primops/flake.hh +++ b/src/libexpr/primops/flake.hh @@ -21,4 +21,18 @@ Value * makeFlakeRegistryValue(EvalState & state); Value * makeFlakeValue(EvalState & state, std::string flakeUri, Value & v); +struct Flake +{ + FlakeId id; + std::string description; + Path path; + std::vector requires; + std::unique_ptr lockFile; + Value * vProvides; // FIXME: gc + // commit hash + // date + // content hash +}; + +static Flake getFlake(EvalState & state, const FlakeRef & flakeRef); } diff --git a/src/nix/command.hh b/src/nix/command.hh index a08347945..b3248222e 100644 --- a/src/nix/command.hh +++ b/src/nix/command.hh @@ -34,6 +34,17 @@ struct Buildable typedef std::vector Buildables; +struct FlakeCommand : virtual Args, StoreCommand, MixEvalArgs +{ + std::string flakeUri; + +public: + FlakeCommand() + { + expectArg("flake-uri", &flakeUri); + } +}; + struct Installable { virtual std::string what() = 0; diff --git a/src/nix/flake.cc b/src/nix/flake.cc index 9b36c3cbd..099425688 100644 --- a/src/nix/flake.cc +++ b/src/nix/flake.cc @@ -33,10 +33,31 @@ struct CmdFlakeList : StoreCommand, MixEvalArgs } }; +struct CmdFlakeInfo : FlakeCommand +{ + std::string name() override + { + return "info"; + } + + std::string description() override + { + return "list info about a given flake"; + } + + void run(nix::ref store) override + { + auto evalState = std::make_shared(searchPath, store); + nix::Flake flake = nix::getFlake(*evalState, FlakeRef(flakeUri)); + std::cout << "Location: " << flake.path << "\n"; + std::cout << "Description: " << flake.description << "\n"; + } +}; + struct CmdFlake : virtual MultiCommand, virtual Command { CmdFlake() - : MultiCommand({make_ref()}) + : MultiCommand({make_ref(), make_ref()}) { } From 9ff1a9ea65bdeb520becb843b8300a23fb88a5a9 Mon Sep 17 00:00:00 2001 From: Nick Van den Broeck Date: Wed, 27 Feb 2019 19:54:18 +0100 Subject: [PATCH 021/634] Implemented json flag for `nix flake info` --- src/libexpr/primops/flake.cc | 2 +- src/libexpr/primops/flake.hh | 2 +- src/nix/command.cc | 9 +++++++++ src/nix/command.hh | 7 +++++++ src/nix/flake.cc | 14 +++++++++++--- 5 files changed, 29 insertions(+), 5 deletions(-) diff --git a/src/libexpr/primops/flake.cc b/src/libexpr/primops/flake.cc index 9f137a0b9..ac0421549 100644 --- a/src/libexpr/primops/flake.cc +++ b/src/libexpr/primops/flake.cc @@ -141,7 +141,7 @@ static FlakeSourceInfo fetchFlake(EvalState & state, const FlakeRef & flakeRef) else abort(); } -static Flake getFlake(EvalState & state, const FlakeRef & flakeRef) +Flake getFlake(EvalState & state, const FlakeRef & flakeRef) { auto sourceInfo = fetchFlake(state, flakeRef); debug("got flake source '%s' with revision %s", diff --git a/src/libexpr/primops/flake.hh b/src/libexpr/primops/flake.hh index af28bc5b0..194b969a2 100644 --- a/src/libexpr/primops/flake.hh +++ b/src/libexpr/primops/flake.hh @@ -34,5 +34,5 @@ struct Flake // content hash }; -static Flake getFlake(EvalState & state, const FlakeRef & flakeRef); +Flake getFlake(EvalState & state, const FlakeRef & flakeRef); } diff --git a/src/nix/command.cc b/src/nix/command.cc index 5967ab36c..e1e32aaae 100644 --- a/src/nix/command.cc +++ b/src/nix/command.cc @@ -27,6 +27,15 @@ void StoreCommand::run() run(getStore()); } +JsonFormattable::JsonFormattable() +{ + mkFlag() + .longName("json-formattable") + .shortName('j') + .description("output will be printed as json") + .handler([&]() { jsonFormatting = true; }); +} + StorePathsCommand::StorePathsCommand(bool recursive) : recursive(recursive) { diff --git a/src/nix/command.hh b/src/nix/command.hh index b3248222e..5c2f8c304 100644 --- a/src/nix/command.hh +++ b/src/nix/command.hh @@ -26,6 +26,13 @@ private: std::shared_ptr _store; }; +struct JsonFormattable : virtual Command +{ + bool jsonFormatting = false;; + + JsonFormattable(); +}; + struct Buildable { Path drvPath; // may be empty diff --git a/src/nix/flake.cc b/src/nix/flake.cc index 099425688..22e5b297c 100644 --- a/src/nix/flake.cc +++ b/src/nix/flake.cc @@ -4,6 +4,7 @@ #include "shared.hh" #include "progress-bar.hh" #include "eval.hh" +#include using namespace nix; @@ -33,7 +34,7 @@ struct CmdFlakeList : StoreCommand, MixEvalArgs } }; -struct CmdFlakeInfo : FlakeCommand +struct CmdFlakeInfo : FlakeCommand, JsonFormattable { std::string name() override { @@ -49,8 +50,15 @@ struct CmdFlakeInfo : FlakeCommand { auto evalState = std::make_shared(searchPath, store); nix::Flake flake = nix::getFlake(*evalState, FlakeRef(flakeUri)); - std::cout << "Location: " << flake.path << "\n"; - std::cout << "Description: " << flake.description << "\n"; + if (jsonFormatting) { + nlohmann::json j; + j["location"] = flake.path; + j["description"] = flake.description; + std::cout << j.dump(4) << std::endl; + } else { + std::cout << "Location: " << flake.path << "\n"; + std::cout << "Description: " << flake.description << "\n"; + } } }; From 6542de98c298b6dc268b358166bd2f5bea2cc230 Mon Sep 17 00:00:00 2001 From: Nick Van den Broeck Date: Mon, 25 Feb 2019 13:46:37 +0100 Subject: [PATCH 022/634] Implemented writeRegistry --- src/libexpr/primops/flake.cc | 12 ++++++++++++ src/libexpr/primops/flake.hh | 2 ++ 2 files changed, 14 insertions(+) diff --git a/src/libexpr/primops/flake.cc b/src/libexpr/primops/flake.cc index ac0421549..9d1da84f1 100644 --- a/src/libexpr/primops/flake.cc +++ b/src/libexpr/primops/flake.cc @@ -31,6 +31,18 @@ static std::unique_ptr readRegistry(const Path & path) return registry; } +/* Write the registry or lock file to a file. */ +static void writeRegistry(FlakeRegistry registry, Path path = "./flake.lock") +{ + nlohmann::json json = {}; + json["value"] = 0; // Not sure whether this should be 0. + json["flakes"] = {}; + for (auto elem : registry.entries) { + json["flakes"][elem.first] = elem.second.ref.to_string(); + } + writeFile(path, json.dump(4)); // The '4' is the number of spaces used in the indentation in the json file. +} + const FlakeRegistry & EvalState::getFlakeRegistry() { std::call_once(_flakeRegistryInit, [&]() diff --git a/src/libexpr/primops/flake.hh b/src/libexpr/primops/flake.hh index 194b969a2..90c6bc7d2 100644 --- a/src/libexpr/primops/flake.hh +++ b/src/libexpr/primops/flake.hh @@ -35,4 +35,6 @@ struct Flake }; Flake getFlake(EvalState & state, const FlakeRef & flakeRef); + +void writeRegistry(FlakeRegistry); } From d4ee8afd59cd7935f59b730c432cf58460af8a84 Mon Sep 17 00:00:00 2001 From: Nick Van den Broeck Date: Thu, 21 Feb 2019 06:53:01 +0100 Subject: [PATCH 023/634] Implemented --flake flag for nix build Also fixed Eelco's PR comments --- src/libexpr/primops/flake.cc | 64 +++++++++++++++++++++++++++++---- src/libexpr/primops/flake.hh | 11 ++++-- src/libexpr/primops/flakeref.hh | 3 ++ src/libutil/util.cc | 1 - src/nix/build.cc | 15 ++++++++ src/nix/command.cc | 9 ----- src/nix/command.hh | 7 ---- src/nix/flake.cc | 29 +++++++++++++-- 8 files changed, 111 insertions(+), 28 deletions(-) diff --git a/src/libexpr/primops/flake.cc b/src/libexpr/primops/flake.cc index 9d1da84f1..df0845c24 100644 --- a/src/libexpr/primops/flake.cc +++ b/src/libexpr/primops/flake.cc @@ -3,7 +3,9 @@ #include "eval-inline.hh" #include "fetchGit.hh" #include "download.hh" +#include "args.hh" +#include #include #include #include @@ -32,10 +34,10 @@ static std::unique_ptr readRegistry(const Path & path) } /* Write the registry or lock file to a file. */ -static void writeRegistry(FlakeRegistry registry, Path path = "./flake.lock") +void writeRegistry(FlakeRegistry registry, Path path) { nlohmann::json json = {}; - json["value"] = 0; // Not sure whether this should be 0. + json["version"] = 1; json["flakes"] = {}; for (auto elem : registry.entries) { json["flakes"][elem.first] = elem.second.ref.to_string(); @@ -107,9 +109,21 @@ struct FlakeSourceInfo static FlakeSourceInfo fetchFlake(EvalState & state, const FlakeRef & flakeRef) { - assert(flakeRef.isDirect()); + FlakeRef directFlakeRef = FlakeRef(flakeRef); + if (!flakeRef.isDirect()) + { + std::vector registries; + // 'pureEval' is a setting which cannot be changed in `nix flake`, + // but without flagging it off, we can't use any FlakeIds. + // if (!evalSettings.pureEval) { + registries.push_back(&state.getFlakeRegistry()); + // } + directFlakeRef = lookupFlake(state, flakeRef, registries); + } + assert(directFlakeRef.isDirect()); + // NOTE FROM NICK: I don't see why one wouldn't fetch FlakeId flakes.. - if (auto refData = std::get_if(&flakeRef.data)) { + if (auto refData = std::get_if(&directFlakeRef.data)) { // FIXME: require hash in pure mode. // FIXME: use regular /archive URLs instead? api.github.com @@ -141,7 +155,7 @@ static FlakeSourceInfo fetchFlake(EvalState & state, const FlakeRef & flakeRef) return info; } - else if (auto refData = std::get_if(&flakeRef.data)) { + else if (auto refData = std::get_if(&directFlakeRef.data)) { auto gitInfo = exportGit(state.store, refData->uri, refData->ref, refData->rev ? refData->rev->to_string(Base16, false) : "", "source"); FlakeSourceInfo info; @@ -165,7 +179,16 @@ Flake getFlake(EvalState & state, const FlakeRef & flakeRef) if (state.allowedPaths) state.allowedPaths->insert(flakePath); - Flake flake; + FlakeRef newFlakeRef(flakeRef); + if (std::get_if(&newFlakeRef.data)) { + FlakeSourceInfo srcInfo = fetchFlake(state, newFlakeRef); + if (srcInfo.rev) { + std::string uri = flakeRef.to_string(); + newFlakeRef = FlakeRef(uri + "/" + srcInfo.rev->to_string()); + } + } + + Flake flake(newFlakeRef); Value vInfo; state.evalFile(flakePath + "/flake.nix", vInfo); // FIXME: symlink attack @@ -258,6 +281,35 @@ static std::tuple> resolveFlake(EvalState & st return {*topFlakeId, std::move(done)}; } +FlakeRegistry updateLockFile(EvalState & evalState, FlakeRef & flakeRef) +{ + FlakeRegistry newLockFile; + std::map myDependencyMap = get<1>(resolveFlake(evalState, flakeRef, false)); + // Nick assumed that "topRefPure" means that the Flake for flakeRef can be + // fetched purely. + for (auto const& require : myDependencyMap) { + FlakeRegistry::Entry entry = FlakeRegistry::Entry(require.second.ref); + // The FlakeRefs are immutable because they come out of the Flake objects, + // not from the requires. + newLockFile.entries.insert(std::pair(require.first, entry)); + } + return newLockFile; +} + +void updateLockFile(EvalState & state, std::string path) +{ + // 'path' is the path to the local flake repo. + FlakeRef flakeRef = FlakeRef(path); + if (std::get_if(&flakeRef.data)) { + FlakeRegistry newLockFile = updateLockFile(state, flakeRef); + writeRegistry(newLockFile, path + "/flake.lock"); + } else if (std::get_if(&flakeRef.data)) { + throw UsageError("You can only update local flakes, not flakes on GitHub."); + } else { + throw UsageError("You can only update local flakes, not flakes through their FlakeId."); + } +} + Value * makeFlakeValue(EvalState & state, std::string flakeUri, Value & v) { // FIXME: temporary hack to make the default installation source diff --git a/src/libexpr/primops/flake.hh b/src/libexpr/primops/flake.hh index 90c6bc7d2..b3a755311 100644 --- a/src/libexpr/primops/flake.hh +++ b/src/libexpr/primops/flake.hh @@ -13,6 +13,7 @@ struct FlakeRegistry struct Entry { FlakeRef ref; + Entry(const FlakeRef & flakeRef) : ref(flakeRef) {}; }; std::map entries; }; @@ -21,9 +22,12 @@ Value * makeFlakeRegistryValue(EvalState & state); Value * makeFlakeValue(EvalState & state, std::string flakeUri, Value & v); +void writeRegistry(FlakeRegistry, Path); + struct Flake { FlakeId id; + FlakeRef ref; std::string description; Path path; std::vector requires; @@ -32,9 +36,12 @@ struct Flake // commit hash // date // content hash + Flake(FlakeRef & flakeRef) : ref(flakeRef) {}; }; -Flake getFlake(EvalState & state, const FlakeRef & flakeRef); +Flake getFlake(EvalState &, const FlakeRef &); -void writeRegistry(FlakeRegistry); +FlakeRegistry updateLockFile(EvalState &, Flake &); + +void updateLockFile(EvalState &, std::string); } diff --git a/src/libexpr/primops/flakeref.hh b/src/libexpr/primops/flakeref.hh index ad0cf8630..4d1756b49 100644 --- a/src/libexpr/primops/flakeref.hh +++ b/src/libexpr/primops/flakeref.hh @@ -129,6 +129,9 @@ struct FlakeRef // Parse a flake URI. FlakeRef(const std::string & uri); + // Default constructor + FlakeRef(const FlakeRef & flakeRef) : data(flakeRef.data) {}; + /* Unify two flake references so that the resulting reference combines the information from both. For example, "nixpkgs/" and "github:NixOS/nixpkgs" unifies to diff --git a/src/libutil/util.cc b/src/libutil/util.cc index e3dcd246c..b0a2b853e 100644 --- a/src/libutil/util.cc +++ b/src/libutil/util.cc @@ -344,7 +344,6 @@ void writeFile(const Path & path, Source & source, mode_t mode) } } - string readLine(int fd) { string s; diff --git a/src/nix/build.cc b/src/nix/build.cc index b329ac38a..12ef08679 100644 --- a/src/nix/build.cc +++ b/src/nix/build.cc @@ -1,3 +1,5 @@ +#include "primops/flake.hh" +#include "eval.hh" #include "command.hh" #include "common-args.hh" #include "shared.hh" @@ -9,6 +11,8 @@ struct CmdBuild : MixDryRun, InstallablesCommand { Path outLink = "result"; + std::optional flakeUri = std::nullopt; + CmdBuild() { mkFlag() @@ -22,6 +26,11 @@ struct CmdBuild : MixDryRun, InstallablesCommand .longName("no-link") .description("do not create a symlink to the build result") .set(&outLink, Path("")); + + mkFlag() + .longName("flake") + .description("update lock file of given flake") + .dest(&flakeUri); } std::string name() override @@ -52,6 +61,8 @@ struct CmdBuild : MixDryRun, InstallablesCommand { auto buildables = build(store, dryRun ? DryRun : Build, installables); + auto evalState = std::make_shared(searchPath, store); + if (dryRun) return; for (size_t i = 0; i < buildables.size(); ++i) { @@ -66,6 +77,10 @@ struct CmdBuild : MixDryRun, InstallablesCommand store2->addPermRoot(output.second, absPath(symlink), true); } } + + if (flakeUri) { + updateLockFile(*evalState, *flakeUri); + } } }; diff --git a/src/nix/command.cc b/src/nix/command.cc index e1e32aaae..5967ab36c 100644 --- a/src/nix/command.cc +++ b/src/nix/command.cc @@ -27,15 +27,6 @@ void StoreCommand::run() run(getStore()); } -JsonFormattable::JsonFormattable() -{ - mkFlag() - .longName("json-formattable") - .shortName('j') - .description("output will be printed as json") - .handler([&]() { jsonFormatting = true; }); -} - StorePathsCommand::StorePathsCommand(bool recursive) : recursive(recursive) { diff --git a/src/nix/command.hh b/src/nix/command.hh index 5c2f8c304..b3248222e 100644 --- a/src/nix/command.hh +++ b/src/nix/command.hh @@ -26,13 +26,6 @@ private: std::shared_ptr _store; }; -struct JsonFormattable : virtual Command -{ - bool jsonFormatting = false;; - - JsonFormattable(); -}; - struct Buildable { Path drvPath; // may be empty diff --git a/src/nix/flake.cc b/src/nix/flake.cc index 22e5b297c..6cef38936 100644 --- a/src/nix/flake.cc +++ b/src/nix/flake.cc @@ -34,7 +34,28 @@ struct CmdFlakeList : StoreCommand, MixEvalArgs } }; -struct CmdFlakeInfo : FlakeCommand, JsonFormattable +struct CmdFlakeUpdate : FlakeCommand +{ + std::string name() override + { + return "update"; + } + + std::string description() override + { + return "update flake lock file"; + } + + void run(nix::ref store) override + { + auto evalState = std::make_shared(searchPath, store); + + if (flakeUri == "") flakeUri = absPath("./flake.nix"); + updateLockFile(*evalState, flakeUri); + } +}; + +struct CmdFlakeInfo : FlakeCommand, MixJSON { std::string name() override { @@ -50,7 +71,7 @@ struct CmdFlakeInfo : FlakeCommand, JsonFormattable { auto evalState = std::make_shared(searchPath, store); nix::Flake flake = nix::getFlake(*evalState, FlakeRef(flakeUri)); - if (jsonFormatting) { + if (json) { nlohmann::json j; j["location"] = flake.path; j["description"] = flake.description; @@ -65,7 +86,9 @@ struct CmdFlakeInfo : FlakeCommand, JsonFormattable struct CmdFlake : virtual MultiCommand, virtual Command { CmdFlake() - : MultiCommand({make_ref(), make_ref()}) + : MultiCommand({make_ref() + , make_ref() + , make_ref()}) { } From e007f367bd605ad14ddf84d1d5ad611aa427d338 Mon Sep 17 00:00:00 2001 From: Nick Van den Broeck Date: Thu, 21 Feb 2019 06:53:01 +0100 Subject: [PATCH 024/634] Fixed minor things --- src/libexpr/primops/flake.cc | 6 +++--- src/nix/build.cc | 13 ++++++------- src/nix/command.hh | 21 +++++++++++++++------ src/nix/flake.cc | 11 ++++++++--- src/nix/installables.cc | 1 + 5 files changed, 33 insertions(+), 19 deletions(-) diff --git a/src/libexpr/primops/flake.cc b/src/libexpr/primops/flake.cc index df0845c24..48a036875 100644 --- a/src/libexpr/primops/flake.cc +++ b/src/libexpr/primops/flake.cc @@ -299,14 +299,14 @@ FlakeRegistry updateLockFile(EvalState & evalState, FlakeRef & flakeRef) void updateLockFile(EvalState & state, std::string path) { // 'path' is the path to the local flake repo. - FlakeRef flakeRef = FlakeRef(path); + FlakeRef flakeRef = FlakeRef("file://" + path); if (std::get_if(&flakeRef.data)) { FlakeRegistry newLockFile = updateLockFile(state, flakeRef); writeRegistry(newLockFile, path + "/flake.lock"); } else if (std::get_if(&flakeRef.data)) { - throw UsageError("You can only update local flakes, not flakes on GitHub."); + throw UsageError("you can only update local flakes, not flakes on GitHub"); } else { - throw UsageError("You can only update local flakes, not flakes through their FlakeId."); + throw UsageError("you can only update local flakes, not flakes through their FlakeId"); } } diff --git a/src/nix/build.cc b/src/nix/build.cc index 12ef08679..5ab22e26c 100644 --- a/src/nix/build.cc +++ b/src/nix/build.cc @@ -11,7 +11,7 @@ struct CmdBuild : MixDryRun, InstallablesCommand { Path outLink = "result"; - std::optional flakeUri = std::nullopt; + std::optional gitRepo = std::nullopt; CmdBuild() { @@ -28,9 +28,9 @@ struct CmdBuild : MixDryRun, InstallablesCommand .set(&outLink, Path("")); mkFlag() - .longName("flake") - .description("update lock file of given flake") - .dest(&flakeUri); + .longName("update-lock-file") + .description("update the lock file") + .dest(&gitRepo); } std::string name() override @@ -78,9 +78,8 @@ struct CmdBuild : MixDryRun, InstallablesCommand } } - if (flakeUri) { - updateLockFile(*evalState, *flakeUri); - } + if(gitRepo) + updateLockFile(*evalState, *gitRepo); } }; diff --git a/src/nix/command.hh b/src/nix/command.hh index b3248222e..c58d5d56e 100644 --- a/src/nix/command.hh +++ b/src/nix/command.hh @@ -34,15 +34,24 @@ struct Buildable typedef std::vector Buildables; +struct GitRepoCommand : virtual Args +{ + std::string gitPath = absPath("."); + + GitRepoCommand () + { + expectArg("git-path", &gitPath, true); + } +}; + struct FlakeCommand : virtual Args, StoreCommand, MixEvalArgs { - std::string flakeUri; + std::string flakeUri; -public: - FlakeCommand() - { - expectArg("flake-uri", &flakeUri); - } + FlakeCommand() + { + expectArg("flake-uri", &flakeUri); + } }; struct Installable diff --git a/src/nix/flake.cc b/src/nix/flake.cc index 6cef38936..a5a1d34db 100644 --- a/src/nix/flake.cc +++ b/src/nix/flake.cc @@ -34,7 +34,7 @@ struct CmdFlakeList : StoreCommand, MixEvalArgs } }; -struct CmdFlakeUpdate : FlakeCommand +struct CmdFlakeUpdate : StoreCommand, GitRepoCommand, MixEvalArgs { std::string name() override { @@ -51,7 +51,12 @@ struct CmdFlakeUpdate : FlakeCommand auto evalState = std::make_shared(searchPath, store); if (flakeUri == "") flakeUri = absPath("./flake.nix"); - updateLockFile(*evalState, flakeUri); + int result = updateLockFile(*evalState, flakeUri); + if (result == 1) { + std::cout << "You can only update local flakes, not flakes on GitHub.\n"; + } else if (result == 2) { + std::cout << "You can only update local flakes, not flakes through their FlakeId.\n"; + } } }; @@ -77,8 +82,8 @@ struct CmdFlakeInfo : FlakeCommand, MixJSON j["description"] = flake.description; std::cout << j.dump(4) << std::endl; } else { - std::cout << "Location: " << flake.path << "\n"; std::cout << "Description: " << flake.description << "\n"; + std::cout << "Location: " << flake.path << "\n"; } } }; diff --git a/src/nix/installables.cc b/src/nix/installables.cc index 0453c72c2..21e9e73b8 100644 --- a/src/nix/installables.cc +++ b/src/nix/installables.cc @@ -234,6 +234,7 @@ Buildables build(ref store, RealiseMode mode, PathSet pathsToBuild; for (auto & i : installables) { + std::cout << i->what() << std::endl; for (auto & b : i->toBuildables()) { if (b.drvPath != "") { StringSet outputNames; From 5e4d92d267c080bcb81168e37429bbb56bc39fb2 Mon Sep 17 00:00:00 2001 From: Nick Van den Broeck Date: Sun, 10 Mar 2019 07:05:05 +0100 Subject: [PATCH 025/634] Issue #15 is finished --- src/libexpr/primops/flake.cc | 13 ++-- src/libexpr/primops/flake.hh | 5 ++ src/libexpr/primops/flakeref.cc | 15 +++++ src/libexpr/primops/flakeref.hh | 3 +- src/nix/command.hh | 2 +- src/nix/flake.cc | 113 +++++++++++++++++++++++++++++--- 6 files changed, 136 insertions(+), 15 deletions(-) diff --git a/src/libexpr/primops/flake.cc b/src/libexpr/primops/flake.cc index 48a036875..b74e0b4b7 100644 --- a/src/libexpr/primops/flake.cc +++ b/src/libexpr/primops/flake.cc @@ -12,9 +12,14 @@ namespace nix { +Path getUserRegistryPath() +{ + return getHome() + "/.config/nix/registry.json"; +} + /* Read the registry or a lock file. (Currently they have an identical format. */ -static std::unique_ptr readRegistry(const Path & path) +std::unique_ptr readRegistry(const Path & path) { auto registry = std::make_unique(); @@ -40,7 +45,7 @@ void writeRegistry(FlakeRegistry registry, Path path) json["version"] = 1; json["flakes"] = {}; for (auto elem : registry.entries) { - json["flakes"][elem.first] = elem.second.ref.to_string(); + json["flakes"][elem.first] = { {"uri", elem.second.ref.to_string()} }; } writeFile(path, json.dump(4)); // The '4' is the number of spaces used in the indentation in the json file. } @@ -183,8 +188,8 @@ Flake getFlake(EvalState & state, const FlakeRef & flakeRef) if (std::get_if(&newFlakeRef.data)) { FlakeSourceInfo srcInfo = fetchFlake(state, newFlakeRef); if (srcInfo.rev) { - std::string uri = flakeRef.to_string(); - newFlakeRef = FlakeRef(uri + "/" + srcInfo.rev->to_string()); + std::string uri = flakeRef.baseRef().to_string(); + newFlakeRef = FlakeRef(uri + "/" + srcInfo.rev->to_string(Base16, false)); } } diff --git a/src/libexpr/primops/flake.hh b/src/libexpr/primops/flake.hh index b3a755311..4e49becc7 100644 --- a/src/libexpr/primops/flake.hh +++ b/src/libexpr/primops/flake.hh @@ -14,14 +14,19 @@ struct FlakeRegistry { FlakeRef ref; Entry(const FlakeRef & flakeRef) : ref(flakeRef) {}; + Entry operator=(const Entry & entry) { return Entry(entry.ref); } }; std::map entries; }; +Path getUserRegistryPath(); + Value * makeFlakeRegistryValue(EvalState & state); Value * makeFlakeValue(EvalState & state, std::string flakeUri, Value & v); +std::unique_ptr readRegistry(const Path &); + void writeRegistry(FlakeRegistry, Path); struct Flake diff --git a/src/libexpr/primops/flakeref.cc b/src/libexpr/primops/flakeref.cc index a2700f102..8e7c1f8df 100644 --- a/src/libexpr/primops/flakeref.cc +++ b/src/libexpr/primops/flakeref.cc @@ -152,4 +152,19 @@ bool FlakeRef::isImmutable() const else abort(); } +FlakeRef FlakeRef::baseRef() const // Removes the ref and rev from a FlakeRef. +{ + FlakeRef result(*this); + if (auto refData = std::get_if(&result.data)) { + refData->ref = std::nullopt; + refData->rev = std::nullopt; + } else if (auto refData = std::get_if(&result.data)) { + refData->ref = std::nullopt; + refData->rev = std::nullopt; + } else if (auto refData = std::get_if(&result.data)) { + refData->ref = std::nullopt; + refData->rev = std::nullopt; + } + return result; +} } diff --git a/src/libexpr/primops/flakeref.hh b/src/libexpr/primops/flakeref.hh index 4d1756b49..fb365e101 100644 --- a/src/libexpr/primops/flakeref.hh +++ b/src/libexpr/primops/flakeref.hh @@ -153,6 +153,7 @@ struct FlakeRef /* Check whether this is an "immutable" flake reference, that is, one that contains a commit hash or content hash. */ bool isImmutable() const; -}; + FlakeRef baseRef() const; +}; } diff --git a/src/nix/command.hh b/src/nix/command.hh index c58d5d56e..ffe64ccb7 100644 --- a/src/nix/command.hh +++ b/src/nix/command.hh @@ -44,7 +44,7 @@ struct GitRepoCommand : virtual Args } }; -struct FlakeCommand : virtual Args, StoreCommand, MixEvalArgs +struct FlakeCommand : virtual Args { std::string flakeUri; diff --git a/src/nix/flake.cc b/src/nix/flake.cc index a5a1d34db..fda903944 100644 --- a/src/nix/flake.cc +++ b/src/nix/flake.cc @@ -50,17 +50,12 @@ struct CmdFlakeUpdate : StoreCommand, GitRepoCommand, MixEvalArgs { auto evalState = std::make_shared(searchPath, store); - if (flakeUri == "") flakeUri = absPath("./flake.nix"); - int result = updateLockFile(*evalState, flakeUri); - if (result == 1) { - std::cout << "You can only update local flakes, not flakes on GitHub.\n"; - } else if (result == 2) { - std::cout << "You can only update local flakes, not flakes through their FlakeId.\n"; - } + if (gitPath == "") gitPath = absPath("."); + updateLockFile(*evalState, gitPath); } }; -struct CmdFlakeInfo : FlakeCommand, MixJSON +struct CmdFlakeInfo : FlakeCommand, MixJSON, MixEvalArgs, StoreCommand { std::string name() override { @@ -88,12 +83,112 @@ struct CmdFlakeInfo : FlakeCommand, MixJSON } }; +struct CmdFlakeAdd : MixEvalArgs, Command +{ + std::string flakeId; + std::string flakeUri; + + std::string name() override + { + return "add"; + } + + std::string description() override + { + return "upsert flake in user flake registry"; + } + + CmdFlakeAdd() + { + expectArg("flake-id", &flakeId); + expectArg("flake-uri", &flakeUri); + } + + void run() override + { + FlakeRef newFlakeRef(flakeUri); + Path userRegistryPath = getUserRegistryPath(); + auto userRegistry = readRegistry(userRegistryPath); + FlakeRegistry::Entry entry(newFlakeRef); + userRegistry->entries.erase(flakeId); + userRegistry->entries.insert_or_assign(flakeId, newFlakeRef); + writeRegistry(*userRegistry, userRegistryPath); + } +}; + +struct CmdFlakeRemove : virtual Args, MixEvalArgs, Command +{ + std::string flakeId; + + std::string name() override + { + return "remove"; + } + + std::string description() override + { + return "remove flake from user flake registry"; + } + + CmdFlakeRemove() + { + expectArg("flake-id", &flakeId); + } + + void run() override + { + Path userRegistryPath = getUserRegistryPath(); + auto userRegistry = readRegistry(userRegistryPath); + userRegistry->entries.erase(flakeId); + writeRegistry(*userRegistry, userRegistryPath); + } +}; + +struct CmdFlakePin : virtual Args, StoreCommand, MixEvalArgs +{ + std::string flakeId; + + std::string name() override + { + return "pin"; + } + + std::string description() override + { + return "pin flake require in user flake registry"; + } + + CmdFlakePin() + { + expectArg("flake-id", &flakeId); + } + + void run(nix::ref store) override + { + auto evalState = std::make_shared(searchPath, store); + + Path userRegistryPath = getUserRegistryPath(); + FlakeRegistry userRegistry = *readRegistry(userRegistryPath); + auto it = userRegistry.entries.find(flakeId); + if (it != userRegistry.entries.end()) { + FlakeRef oldRef = it->second.ref; + it->second.ref = getFlake(*evalState, oldRef).ref; + // The 'ref' in 'flake' is immutable. + writeRegistry(userRegistry, userRegistryPath); + } else + throw Error("the flake identifier '%s' does not exist in the user registry", flakeId); + } +}; + struct CmdFlake : virtual MultiCommand, virtual Command { CmdFlake() : MultiCommand({make_ref() + , make_ref() , make_ref() - , make_ref()}) + , make_ref() + , make_ref() + , make_ref()}) { } From a554f523db34a5d6a8281c5228acfc128a8bd589 Mon Sep 17 00:00:00 2001 From: Nick Van den Broeck Date: Thu, 21 Mar 2019 09:30:16 +0100 Subject: [PATCH 026/634] Combining registries properly --- src/libexpr/eval.hh | 4 +- src/libexpr/primops/flake.cc | 104 +++++++++++++++++++++-------------- src/libexpr/primops/flake.hh | 4 +- src/nix/flake.cc | 8 ++- 4 files changed, 73 insertions(+), 47 deletions(-) diff --git a/src/libexpr/eval.hh b/src/libexpr/eval.hh index 35c01b97a..f6c894cad 100644 --- a/src/libexpr/eval.hh +++ b/src/libexpr/eval.hh @@ -316,10 +316,10 @@ private: public: - const FlakeRegistry & getFlakeRegistry(); + const std::vector> getFlakeRegistries(); private: - std::unique_ptr _flakeRegistry; + std::shared_ptr _flakeRegistry; std::once_flag _flakeRegistryInit; }; diff --git a/src/libexpr/primops/flake.cc b/src/libexpr/primops/flake.cc index b74e0b4b7..9a528ce82 100644 --- a/src/libexpr/primops/flake.cc +++ b/src/libexpr/primops/flake.cc @@ -12,16 +12,11 @@ namespace nix { -Path getUserRegistryPath() -{ - return getHome() + "/.config/nix/registry.json"; -} - /* Read the registry or a lock file. (Currently they have an identical format. */ -std::unique_ptr readRegistry(const Path & path) +std::shared_ptr readRegistry(const Path & path) { - auto registry = std::make_unique(); + auto registry = std::make_shared(); auto json = nlohmann::json::parse(readFile(path)); @@ -50,37 +45,71 @@ void writeRegistry(FlakeRegistry registry, Path path) writeFile(path, json.dump(4)); // The '4' is the number of spaces used in the indentation in the json file. } -const FlakeRegistry & EvalState::getFlakeRegistry() +Path getUserRegistryPath() { - std::call_once(_flakeRegistryInit, [&]() - { -#if 0 - auto registryUri = "file:///home/eelco/Dev/gists/nix-flakes/registry.json"; + return getHome() + "/.config/nix/registry.json"; +} - auto registryFile = getDownloader()->download(DownloadRequest(registryUri)); -#endif +std::shared_ptr getGlobalRegistry() +{ + // TODO: Make a global registry, and return it here. + return std::make_shared(); +} - auto registryFile = settings.nixDataDir + "/nix/flake-registry.json"; +std::shared_ptr getUserRegistry() +{ + return readRegistry(getUserRegistryPath()); +} - _flakeRegistry = readRegistry(registryFile); - }); +// Project-specific registry saved in flake-registry.json. +std::shared_ptr getLocalRegistry() +{ + Path registryFile = settings.nixDataDir + "/nix/flake-registry.json"; + return readRegistry(registryFile); +} - return *_flakeRegistry; +std::shared_ptr getFlagRegistry() +{ + return std::make_shared(); + // TODO: Implement this once the right flags are implemented. +} + +// This always returns a vector with globalReg, userReg, localReg, flakeReg. +// If one of them doesn't exist, the registry is left empty but does exist. +const std::vector> EvalState::getFlakeRegistries() +{ + std::vector> registries; + if (!evalSettings.pureEval) { + registries.push_back(std::make_shared()); // global + registries.push_back(std::make_shared()); // user + registries.push_back(std::make_shared()); // local + } else { + registries.push_back(getGlobalRegistry()); + registries.push_back(getUserRegistry()); + registries.push_back(getLocalRegistry()); + } + registries.push_back(getFlagRegistry()); + return registries; } Value * makeFlakeRegistryValue(EvalState & state) { auto v = state.allocValue(); - auto registry = state.getFlakeRegistry(); + auto registries = state.getFlakeRegistries(); - state.mkAttrs(*v, registry.entries.size()); + int size = 0; + for (auto registry : registries) + size += registry->entries.size(); + state.mkAttrs(*v, size); - for (auto & entry : registry.entries) { - auto vEntry = state.allocAttr(*v, entry.first); - state.mkAttrs(*vEntry, 2); - mkString(*state.allocAttr(*vEntry, state.symbols.create("uri")), entry.second.ref.to_string()); - vEntry->attrs->sort(); + for (auto & registry : registries) { + for (auto & entry : registry->entries) { + auto vEntry = state.allocAttr(*v, entry.first); + state.mkAttrs(*vEntry, 2); + mkString(*state.allocAttr(*vEntry, state.symbols.create("uri")), entry.second.ref.to_string()); + vEntry->attrs->sort(); + } } v->attrs->sort(); @@ -89,7 +118,7 @@ Value * makeFlakeRegistryValue(EvalState & state) } static FlakeRef lookupFlake(EvalState & state, const FlakeRef & flakeRef, - std::vector registries) + std::vector> registries) { if (auto refData = std::get_if(&flakeRef.data)) { for (auto registry : registries) { @@ -117,13 +146,7 @@ static FlakeSourceInfo fetchFlake(EvalState & state, const FlakeRef & flakeRef) FlakeRef directFlakeRef = FlakeRef(flakeRef); if (!flakeRef.isDirect()) { - std::vector registries; - // 'pureEval' is a setting which cannot be changed in `nix flake`, - // but without flagging it off, we can't use any FlakeIds. - // if (!evalSettings.pureEval) { - registries.push_back(&state.getFlakeRegistry()); - // } - directFlakeRef = lookupFlake(state, flakeRef, registries); + directFlakeRef = lookupFlake(state, flakeRef, state.getFlakeRegistries()); } assert(directFlakeRef.isDirect()); // NOTE FROM NICK: I don't see why one wouldn't fetch FlakeId flakes.. @@ -246,11 +269,8 @@ static std::tuple> resolveFlake(EvalState & st std::optional topFlakeId; /// FIXME: ambiguous todo.push({topRef, true}); - std::vector registries; - FlakeRegistry localRegistry; - registries.push_back(&localRegistry); - if (!evalSettings.pureEval) - registries.push_back(&state.getFlakeRegistry()); + std::vector> registries = state.getFlakeRegistries(); + std::shared_ptr localRegistry = registries.at(2); while (!todo.empty()) { auto [flakeRef, toplevel] = todo.front(); @@ -259,6 +279,7 @@ static std::tuple> resolveFlake(EvalState & st if (auto refData = std::get_if(&flakeRef.data)) { if (done.count(refData->id)) continue; // optimization flakeRef = lookupFlake(state, flakeRef, registries); + // This is why we need the `registries`. } if (evalSettings.pureEval && !flakeRef.isImmutable() && (!toplevel || !impureTopRef)) @@ -273,10 +294,13 @@ static std::tuple> resolveFlake(EvalState & st for (auto & require : flake.requires) todo.push({require, false}); + // The following piece of code basically adds the FlakeRefs from + // the lockfiles of dependencies to the localRegistry. This is used + // to resolve future `FlakeId`s, in `lookupFlake` a bit above this. if (flake.lockFile) for (auto & entry : flake.lockFile->entries) { - if (localRegistry.entries.count(entry.first)) continue; - localRegistry.entries.emplace(entry.first, entry.second); + if (localRegistry->entries.count(entry.first)) continue; + localRegistry->entries.emplace(entry.first, entry.second); } done.emplace(flake.id, std::move(flake)); diff --git a/src/libexpr/primops/flake.hh b/src/libexpr/primops/flake.hh index 4e49becc7..53cea1cc2 100644 --- a/src/libexpr/primops/flake.hh +++ b/src/libexpr/primops/flake.hh @@ -25,7 +25,7 @@ Value * makeFlakeRegistryValue(EvalState & state); Value * makeFlakeValue(EvalState & state, std::string flakeUri, Value & v); -std::unique_ptr readRegistry(const Path &); +std::shared_ptr readRegistry(const Path &); void writeRegistry(FlakeRegistry, Path); @@ -36,7 +36,7 @@ struct Flake std::string description; Path path; std::vector requires; - std::unique_ptr lockFile; + std::shared_ptr lockFile; Value * vProvides; // FIXME: gc // commit hash // date diff --git a/src/nix/flake.cc b/src/nix/flake.cc index fda903944..470dfdc08 100644 --- a/src/nix/flake.cc +++ b/src/nix/flake.cc @@ -24,12 +24,14 @@ struct CmdFlakeList : StoreCommand, MixEvalArgs { auto evalState = std::make_shared(searchPath, store); - auto registry = evalState->getFlakeRegistry(); + auto registries = evalState->getFlakeRegistries(); stopProgressBar(); - for (auto & entry : registry.entries) { - std::cout << entry.first << " " << entry.second.ref.to_string() << "\n"; + for (auto & registry : registries) { + for (auto & entry : registry->entries) { + std::cout << entry.first << " " << entry.second.ref.to_string() << "\n"; + } } } }; From f9c7176a87ccc71b719689cb28f8bc1bfb5354e3 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Tue, 26 Mar 2019 12:48:57 +0100 Subject: [PATCH 027/634] nix flake add: Handle ~/.config/nix not existing Fixes $ nix flake add fnord github:edolstra/fnord error: opening file '/home/eelco/.config/nix/registry.json': No such file or directory --- src/libexpr/primops/flake.cc | 21 +++++++++++++-------- 1 file changed, 13 insertions(+), 8 deletions(-) diff --git a/src/libexpr/primops/flake.cc b/src/libexpr/primops/flake.cc index b74e0b4b7..00eeba632 100644 --- a/src/libexpr/primops/flake.cc +++ b/src/libexpr/primops/flake.cc @@ -23,16 +23,20 @@ std::unique_ptr readRegistry(const Path & path) { auto registry = std::make_unique(); - auto json = nlohmann::json::parse(readFile(path)); + try { + auto json = nlohmann::json::parse(readFile(path)); - auto version = json.value("version", 0); - if (version != 1) - throw Error("flake registry '%s' has unsupported version %d", path, version); + auto version = json.value("version", 0); + if (version != 1) + throw Error("flake registry '%s' has unsupported version %d", path, version); - auto flakes = json["flakes"]; - for (auto i = flakes.begin(); i != flakes.end(); ++i) { - FlakeRegistry::Entry entry{FlakeRef(i->value("uri", ""))}; - registry->entries.emplace(i.key(), entry); + auto flakes = json["flakes"]; + for (auto i = flakes.begin(); i != flakes.end(); ++i) { + FlakeRegistry::Entry entry{FlakeRef(i->value("uri", ""))}; + registry->entries.emplace(i.key(), entry); + } + } catch (SysError & e) { + if (e.errNo != ENOENT) throw; } return registry; @@ -47,6 +51,7 @@ void writeRegistry(FlakeRegistry registry, Path path) for (auto elem : registry.entries) { json["flakes"][elem.first] = { {"uri", elem.second.ref.to_string()} }; } + createDirs(dirOf(path)); writeFile(path, json.dump(4)); // The '4' is the number of spaces used in the indentation in the json file. } From be7fd6359559717b83833d96d4b6dc38ceb83092 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Tue, 26 Mar 2019 14:25:43 +0100 Subject: [PATCH 028/634] Remove debug line --- src/nix/build.cc | 2 +- src/nix/installables.cc | 1 - 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/src/nix/build.cc b/src/nix/build.cc index 5ab22e26c..da7c7f614 100644 --- a/src/nix/build.cc +++ b/src/nix/build.cc @@ -78,7 +78,7 @@ struct CmdBuild : MixDryRun, InstallablesCommand } } - if(gitRepo) + if (gitRepo) updateLockFile(*evalState, *gitRepo); } }; diff --git a/src/nix/installables.cc b/src/nix/installables.cc index 21e9e73b8..0453c72c2 100644 --- a/src/nix/installables.cc +++ b/src/nix/installables.cc @@ -234,7 +234,6 @@ Buildables build(ref store, RealiseMode mode, PathSet pathsToBuild; for (auto & i : installables) { - std::cout << i->what() << std::endl; for (auto & b : i->toBuildables()) { if (b.drvPath != "") { StringSet outputNames; From 6b0ca8e803710342af70e257935724c5ad84ca04 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Mon, 8 Apr 2019 14:20:58 +0200 Subject: [PATCH 029/634] findAlongAttrPath(): Throw AttrPathNotFound --- src/libexpr/attr-path.cc | 4 ++-- src/libexpr/attr-path.hh | 2 ++ 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/src/libexpr/attr-path.cc b/src/libexpr/attr-path.cc index b0f80db32..832235cfd 100644 --- a/src/libexpr/attr-path.cc +++ b/src/libexpr/attr-path.cc @@ -70,7 +70,7 @@ Value * findAlongAttrPath(EvalState & state, const string & attrPath, Bindings::iterator a = v->attrs->find(state.symbols.create(attr)); if (a == v->attrs->end()) - throw Error(format("attribute '%1%' in selection path '%2%' not found") % attr % attrPath); + throw AttrPathNotFound("attribute '%1%' in selection path '%2%' not found", attr, attrPath); v = &*a->value; } @@ -82,7 +82,7 @@ Value * findAlongAttrPath(EvalState & state, const string & attrPath, % attrPath % showType(*v)); if (attrIndex >= v->listSize()) - throw Error(format("list index %1% in selection path '%2%' is out of range") % attrIndex % attrPath); + throw AttrPathNotFound("list index %1% in selection path '%2%' is out of range", attrIndex, attrPath); v = v->listElems()[attrIndex]; } diff --git a/src/libexpr/attr-path.hh b/src/libexpr/attr-path.hh index 46a341950..1eae64625 100644 --- a/src/libexpr/attr-path.hh +++ b/src/libexpr/attr-path.hh @@ -7,6 +7,8 @@ namespace nix { +MakeError(AttrPathNotFound, Error); + Value * findAlongAttrPath(EvalState & state, const string & attrPath, Bindings & autoArgs, Value & vIn); From 154244adc6c9831e00a41bf7799a2d29c6a3a3b4 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Mon, 8 Apr 2019 14:21:13 +0200 Subject: [PATCH 030/634] nix: New installables syntax The general syntax for an installable is now :. The attrpath is relative to the flake's 'provides.packages' or 'provides' if the former doesn't yield a result. E.g. $ nix build nixpkgs:hello is equivalent to $ nix build nixpkgs:packages.hello Also, ':' can be omitted, in which case it defaults to 'nixpkgs', e.g. $ nix build hello --- src/libexpr/primops/flake.cc | 55 +++++++++------------------- src/libexpr/primops/flake.hh | 2 +- src/nix/installables.cc | 71 +++++++++++++++++++++++++++--------- 3 files changed, 72 insertions(+), 56 deletions(-) diff --git a/src/libexpr/primops/flake.cc b/src/libexpr/primops/flake.cc index a8d46825f..dedd2f737 100644 --- a/src/libexpr/primops/flake.cc +++ b/src/libexpr/primops/flake.cc @@ -57,8 +57,9 @@ Path getUserRegistryPath() std::shared_ptr getGlobalRegistry() { - // TODO: Make a global registry, and return it here. - return std::make_shared(); + // FIXME: get from nixos.org. + Path registryFile = settings.nixDataDir + "/nix/flake-registry.json"; + return readRegistry(registryFile); } std::shared_ptr getUserRegistry() @@ -66,33 +67,17 @@ std::shared_ptr getUserRegistry() return readRegistry(getUserRegistryPath()); } -// Project-specific registry saved in flake-registry.json. -std::shared_ptr getLocalRegistry() -{ - Path registryFile = settings.nixDataDir + "/nix/flake-registry.json"; - return readRegistry(registryFile); -} - std::shared_ptr getFlagRegistry() { return std::make_shared(); // TODO: Implement this once the right flags are implemented. } -// This always returns a vector with globalReg, userReg, localReg, flakeReg. -// If one of them doesn't exist, the registry is left empty but does exist. const std::vector> EvalState::getFlakeRegistries() { std::vector> registries; - if (!evalSettings.pureEval) { - registries.push_back(std::make_shared()); // global - registries.push_back(std::make_shared()); // user - registries.push_back(std::make_shared()); // local - } else { - registries.push_back(getGlobalRegistry()); - registries.push_back(getUserRegistry()); - registries.push_back(getLocalRegistry()); - } + registries.push_back(getGlobalRegistry()); + registries.push_back(getUserRegistry()); registries.push_back(getFlagRegistry()); return registries; } @@ -149,8 +134,7 @@ struct FlakeSourceInfo static FlakeSourceInfo fetchFlake(EvalState & state, const FlakeRef & flakeRef) { FlakeRef directFlakeRef = FlakeRef(flakeRef); - if (!flakeRef.isDirect()) - { + if (!flakeRef.isDirect()) { directFlakeRef = lookupFlake(state, flakeRef, state.getFlakeRegistries()); } assert(directFlakeRef.isDirect()); @@ -274,8 +258,8 @@ static std::tuple> resolveFlake(EvalState & st std::optional topFlakeId; /// FIXME: ambiguous todo.push({topRef, true}); - std::vector> registries = state.getFlakeRegistries(); - std::shared_ptr localRegistry = registries.at(2); + auto registries = state.getFlakeRegistries(); + //std::shared_ptr localRegistry = registries.at(2); while (!todo.empty()) { auto [flakeRef, toplevel] = todo.front(); @@ -283,12 +267,15 @@ static std::tuple> resolveFlake(EvalState & st if (auto refData = std::get_if(&flakeRef.data)) { if (done.count(refData->id)) continue; // optimization - flakeRef = lookupFlake(state, flakeRef, registries); + flakeRef = lookupFlake(state, flakeRef, + !evalSettings.pureEval || (toplevel && impureTopRef) ? registries : std::vector>()); // This is why we need the `registries`. } +#if 0 if (evalSettings.pureEval && !flakeRef.isImmutable() && (!toplevel || !impureTopRef)) throw Error("mutable flake '%s' is not allowed in pure mode; use --no-pure-eval to disable", flakeRef.to_string()); +#endif auto flake = getFlake(state, flakeRef); @@ -299,6 +286,7 @@ static std::tuple> resolveFlake(EvalState & st for (auto & require : flake.requires) todo.push({require, false}); +#if 0 // The following piece of code basically adds the FlakeRefs from // the lockfiles of dependencies to the localRegistry. This is used // to resolve future `FlakeId`s, in `lookupFlake` a bit above this. @@ -307,6 +295,7 @@ static std::tuple> resolveFlake(EvalState & st if (localRegistry->entries.count(entry.first)) continue; localRegistry->entries.emplace(entry.first, entry.second); } +#endif done.emplace(flake.id, std::move(flake)); } @@ -344,19 +333,9 @@ void updateLockFile(EvalState & state, std::string path) } } -Value * makeFlakeValue(EvalState & state, std::string flakeUri, Value & v) +Value * makeFlakeValue(EvalState & state, const FlakeRef & flakeRef, bool impureTopRef, Value & v) { - // FIXME: temporary hack to make the default installation source - // work. - bool impure = false; - if (hasPrefix(flakeUri, "impure:")) { - flakeUri = std::string(flakeUri, 7); - impure = true; - } - - auto flakeRef = FlakeRef(flakeUri); - - auto [topFlakeId, flakes] = resolveFlake(state, flakeUri, impure); + auto [topFlakeId, flakes] = resolveFlake(state, flakeRef, impureTopRef); // FIXME: we should call each flake with only its dependencies // (rather than the closure of the top-level flake). @@ -387,7 +366,7 @@ Value * makeFlakeValue(EvalState & state, std::string flakeUri, Value & v) static void prim_getFlake(EvalState & state, const Pos & pos, Value * * args, Value & v) { - makeFlakeValue(state, state.forceStringNoCtx(*args[0], pos), v); + makeFlakeValue(state, state.forceStringNoCtx(*args[0], pos), false, v); } static RegisterPrimOp r2("getFlake", 1, prim_getFlake); diff --git a/src/libexpr/primops/flake.hh b/src/libexpr/primops/flake.hh index 53cea1cc2..df8cf9efb 100644 --- a/src/libexpr/primops/flake.hh +++ b/src/libexpr/primops/flake.hh @@ -23,7 +23,7 @@ Path getUserRegistryPath(); Value * makeFlakeRegistryValue(EvalState & state); -Value * makeFlakeValue(EvalState & state, std::string flakeUri, Value & v); +Value * makeFlakeValue(EvalState & state, const FlakeRef & flakeRef, bool impureTopRef, Value & v); std::shared_ptr readRegistry(const Path &); diff --git a/src/nix/installables.cc b/src/nix/installables.cc index 0453c72c2..5e4ea6054 100644 --- a/src/nix/installables.cc +++ b/src/nix/installables.cc @@ -21,13 +21,6 @@ SourceExprCommand::SourceExprCommand() .label("file") .description("evaluate FILE rather than use the default installation source") .dest(&file); - - mkFlag() - .shortName('F') - .longName("flake") - .label("flake") - .description("evaluate FLAKE rather than use the default installation source") - .dest(&flakeUri); } Value * SourceExprCommand::getSourceExpr(EvalState & state) @@ -36,17 +29,9 @@ Value * SourceExprCommand::getSourceExpr(EvalState & state) vSourceExpr = state.allocValue(); - if (file && flakeUri) - throw Error("cannot use both --file and --flake"); - if (file) state.evalFile(lookupFileArg(state, *file), *vSourceExpr); - else if (flakeUri) { - // FIXME: handle flakeUri being a relative path - auto vTemp = state.allocValue(); - auto vFlake = *makeFlakeValue(state, "impure:" + *flakeUri, *vTemp); - *vSourceExpr = *((*vFlake.attrs->get(state.symbols.create("provides")))->value); - } else { + else { // FIXME: remove "impure" hack, call some non-user-accessible // variant of getFlake instead. auto fun = state.parseExprFromString( @@ -176,6 +161,43 @@ struct InstallableAttrPath : InstallableValue } }; +struct InstallableFlake : InstallableValue +{ + FlakeRef flakeRef; + std::string attrPath; + + InstallableFlake(SourceExprCommand & cmd, FlakeRef && flakeRef, const std::string & attrPath) + : InstallableValue(cmd), flakeRef(flakeRef), attrPath(attrPath) + { } + + std::string what() override { return flakeRef.to_string() + ":" + attrPath; } + + Value * toValue(EvalState & state) override + { + auto vTemp = state.allocValue(); + auto vFlake = *makeFlakeValue(state, flakeRef, true, *vTemp); + + auto vProvides = (*vFlake.attrs->get(state.symbols.create("provides")))->value; + + state.forceValue(*vProvides); + + auto emptyArgs = state.allocBindings(0); + + if (auto aPackages = *vProvides->attrs->get(state.symbols.create("packages"))) { + try { + auto * v = findAlongAttrPath(state, attrPath, *emptyArgs, *aPackages->value); + state.forceValue(*v); + return v; + } catch (AttrPathNotFound & e) { + } + } + + auto * v = findAlongAttrPath(state, attrPath, *emptyArgs, *vProvides); + state.forceValue(*v); + return v; + } +}; + // FIXME: extend std::string attrRegex = R"([A-Za-z_][A-Za-z0-9-_+]*)"; static std::regex attrPathRegex(fmt(R"(%1%(\.%1%)*)", attrRegex)); @@ -196,19 +218,34 @@ static std::vector> parseInstallables( if (s.compare(0, 1, "(") == 0) result.push_back(std::make_shared(cmd, s)); - else if (s.find("/") != std::string::npos) { + /* + else if (s.find('/') != std::string::npos) { auto path = store->toStorePath(store->followLinksToStore(s)); if (store->isStorePath(path)) result.push_back(std::make_shared(path)); } + */ + else { + auto colon = s.rfind(':'); + if (colon != std::string::npos) { + auto flakeRef = std::string(s, 0, colon); + auto attrPath = std::string(s, colon + 1); + result.push_back(std::make_shared(cmd, FlakeRef(flakeRef), attrPath)); + } else { + result.push_back(std::make_shared(cmd, FlakeRef("nixpkgs"), s)); + } + } + + /* else if (s == "" || std::regex_match(s, attrPathRegex)) result.push_back(std::make_shared(cmd, s)); else throw UsageError("don't know what to do with argument '%s'", s); + */ } return result; From 101d964a59d5c9098845d3109ea1eba99b5f31df Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Mon, 8 Apr 2019 16:11:17 +0200 Subject: [PATCH 031/634] nix: Make -f work for compatibility --- src/nix/command.hh | 23 ++------ src/nix/installables.cc | 121 +++++++++++++++++----------------------- src/nix/search.cc | 4 +- src/nix/why-depends.cc | 4 +- 4 files changed, 61 insertions(+), 91 deletions(-) diff --git a/src/nix/command.hh b/src/nix/command.hh index ffe64ccb7..83959bf9a 100644 --- a/src/nix/command.hh +++ b/src/nix/command.hh @@ -74,23 +74,20 @@ struct Installable struct SourceExprCommand : virtual Args, StoreCommand, MixEvalArgs { std::optional file; - std::optional flakeUri; SourceExprCommand(); - /* Return a value representing the Nix expression from which we - are installing. This is either the file specified by ‘--file’, - or an attribute set constructed from $NIX_PATH, e.g. ‘{ nixpkgs - = import ...; bla = import ...; }’. */ - Value * getSourceExpr(EvalState & state); - ref getEvalState(); + std::vector> parseInstallables( + ref store, std::vector ss); + + std::shared_ptr parseInstallable( + ref store, const std::string & installable); + private: std::shared_ptr evalState; - - Value * vSourceExpr = 0; }; enum RealiseMode { Build, NoBuild, DryRun }; @@ -108,8 +105,6 @@ struct InstallablesCommand : virtual Args, SourceExprCommand void prepare() override; - virtual bool useDefaultInstallables() { return true; } - private: std::vector _installables; @@ -148,8 +143,6 @@ public: virtual void run(ref store, Paths storePaths) = 0; void run(ref store) override; - - bool useDefaultInstallables() override { return !all; } }; /* A command that operates on exactly one store path. */ @@ -174,10 +167,6 @@ struct RegisterCommand } }; -std::shared_ptr parseInstallable( - SourceExprCommand & cmd, ref store, const std::string & installable, - bool useDefaultInstallables); - Buildables build(ref store, RealiseMode mode, std::vector> installables); diff --git a/src/nix/installables.cc b/src/nix/installables.cc index 5e4ea6054..bcb22349a 100644 --- a/src/nix/installables.cc +++ b/src/nix/installables.cc @@ -19,33 +19,10 @@ SourceExprCommand::SourceExprCommand() .shortName('f') .longName("file") .label("file") - .description("evaluate FILE rather than use the default installation source") + .description("evaluate a set of attributes from FILE (deprecated)") .dest(&file); } -Value * SourceExprCommand::getSourceExpr(EvalState & state) -{ - if (vSourceExpr) return vSourceExpr; - - vSourceExpr = state.allocValue(); - - if (file) - state.evalFile(lookupFileArg(state, *file), *vSourceExpr); - else { - // FIXME: remove "impure" hack, call some non-user-accessible - // variant of getFlake instead. - auto fun = state.parseExprFromString( - "builtins.mapAttrs (flakeName: flakeInfo:" - " (getFlake (\"impure:\" + flakeInfo.uri)).${flakeName}.provides.packages or {})", "/"); - auto vFun = state.allocValue(); - state.eval(fun, *vFun); - auto vRegistry = makeFlakeRegistryValue(state); - mkApp(*vSourceExpr, *vFun, *vRegistry); - } - - return vSourceExpr; -} - ref SourceExprCommand::getEvalState() { if (!evalState) @@ -140,24 +117,20 @@ struct InstallableExpr : InstallableValue struct InstallableAttrPath : InstallableValue { + Value * v; std::string attrPath; - InstallableAttrPath(SourceExprCommand & cmd, const std::string & attrPath) - : InstallableValue(cmd), attrPath(attrPath) + InstallableAttrPath(SourceExprCommand & cmd, Value * v, const std::string & attrPath) + : InstallableValue(cmd), v(v), attrPath(attrPath) { } std::string what() override { return attrPath; } Value * toValue(EvalState & state) override { - auto source = cmd.getSourceExpr(state); - - Bindings & autoArgs = *cmd.getAutoArgs(state); - - Value * v = findAlongAttrPath(state, attrPath, autoArgs, *source); - state.forceValue(*v); - - return v; + auto vRes = findAlongAttrPath(state, attrPath, *cmd.getAutoArgs(state), *v); + state.forceValue(*vRes); + return vRes; } }; @@ -202,60 +175,66 @@ struct InstallableFlake : InstallableValue std::string attrRegex = R"([A-Za-z_][A-Za-z0-9-_+]*)"; static std::regex attrPathRegex(fmt(R"(%1%(\.%1%)*)", attrRegex)); -static std::vector> parseInstallables( - SourceExprCommand & cmd, ref store, std::vector ss, bool useDefaultInstallables) +std::vector> SourceExprCommand::parseInstallables( + ref store, std::vector ss) { std::vector> result; - if (ss.empty() && useDefaultInstallables) { - if (cmd.file == "") - cmd.file = "."; - ss = {""}; - } + if (file) { + // FIXME: backward compatibility hack + evalSettings.pureEval = false; - for (auto & s : ss) { + auto state = getEvalState(); + auto vFile = state->allocValue(); + state->evalFile(lookupFileArg(*state, *file), *vFile); - if (s.compare(0, 1, "(") == 0) - result.push_back(std::make_shared(cmd, s)); + if (ss.empty()) + ss = {""}; - /* - else if (s.find('/') != std::string::npos) { + for (auto & s : ss) + result.push_back(std::make_shared(*this, vFile, s)); - auto path = store->toStorePath(store->followLinksToStore(s)); + } else { - if (store->isStorePath(path)) - result.push_back(std::make_shared(path)); - } - */ + for (auto & s : ss) { - else { - auto colon = s.rfind(':'); - if (colon != std::string::npos) { + size_t colon; + + if (s.compare(0, 1, "(") == 0) + result.push_back(std::make_shared(*this, s)); + + else if ((colon = s.rfind(':')) != std::string::npos) { auto flakeRef = std::string(s, 0, colon); auto attrPath = std::string(s, colon + 1); - result.push_back(std::make_shared(cmd, FlakeRef(flakeRef), attrPath)); - } else { - result.push_back(std::make_shared(cmd, FlakeRef("nixpkgs"), s)); + result.push_back(std::make_shared(*this, FlakeRef(flakeRef), attrPath)); } + + else if (s.find('/') != std::string::npos) { + auto path = store->toStorePath(store->followLinksToStore(s)); + result.push_back(std::make_shared(path)); + } + + else { + result.push_back(std::make_shared(*this, FlakeRef("nixpkgs"), s)); + } + + /* + else if (s == "" || std::regex_match(s, attrPathRegex)) + result.push_back(std::make_shared(cmd, s)); + + else + throw UsageError("don't know what to do with argument '%s'", s); + */ } - - /* - else if (s == "" || std::regex_match(s, attrPathRegex)) - result.push_back(std::make_shared(cmd, s)); - - else - throw UsageError("don't know what to do with argument '%s'", s); - */ } return result; } -std::shared_ptr parseInstallable( - SourceExprCommand & cmd, ref store, const std::string & installable, - bool useDefaultInstallables) +std::shared_ptr SourceExprCommand::parseInstallable( + ref store, const std::string & installable) { - auto installables = parseInstallables(cmd, store, {installable}, false); + auto installables = parseInstallables(store, {installable}); assert(installables.size() == 1); return installables.front(); } @@ -342,12 +321,12 @@ PathSet toDerivations(ref store, void InstallablesCommand::prepare() { - installables = parseInstallables(*this, getStore(), _installables, useDefaultInstallables()); + installables = parseInstallables(getStore(), _installables); } void InstallableCommand::prepare() { - installable = parseInstallable(*this, getStore(), _installable, false); + installable = parseInstallable(getStore(), _installable); } } diff --git a/src/nix/search.cc b/src/nix/search.cc index e086de226..55f8d106a 100644 --- a/src/nix/search.cc +++ b/src/nix/search.cc @@ -257,7 +257,9 @@ struct CmdSearch : SourceExprCommand, MixJSON auto cache = writeCache ? std::make_unique(jsonCacheFile, false) : nullptr; - doExpr(getSourceExpr(*state), "", true, cache.get()); + // FIXME + throw Error("NOT IMPLEMENTED"); + //doExpr(getSourceExpr(*state), "", true, cache.get()); } catch (std::exception &) { /* Fun fact: catching std::ios::failure does not work diff --git a/src/nix/why-depends.cc b/src/nix/why-depends.cc index 325a2be0a..32ba5a1ad 100644 --- a/src/nix/why-depends.cc +++ b/src/nix/why-depends.cc @@ -74,9 +74,9 @@ struct CmdWhyDepends : SourceExprCommand void run(ref store) override { - auto package = parseInstallable(*this, store, _package, false); + auto package = parseInstallable(store, _package); auto packagePath = toStorePath(store, Build, package); - auto dependency = parseInstallable(*this, store, _dependency, false); + auto dependency = parseInstallable(store, _dependency); auto dependencyPath = toStorePath(store, NoBuild, dependency); auto dependencyPathHash = storePathToHash(dependencyPath); From d2875f678270b4c241055765ec65d9ddb66bd60f Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Mon, 8 Apr 2019 16:11:36 +0200 Subject: [PATCH 032/634] Fix tests --- tests/fetchGit.sh | 42 ++++++++++++++++++++--------------------- tests/fetchMercurial.sh | 28 +++++++++++++-------------- tests/pure-eval.sh | 16 ++++++++-------- tests/restricted.sh | 16 ++++++++-------- tests/search.sh | 2 ++ 5 files changed, 53 insertions(+), 51 deletions(-) diff --git a/tests/fetchGit.sh b/tests/fetchGit.sh index 4c46bdf04..301bf3022 100644 --- a/tests/fetchGit.sh +++ b/tests/fetchGit.sh @@ -26,30 +26,30 @@ git -C $repo commit -m 'Bla2' -a rev2=$(git -C $repo rev-parse HEAD) # Fetch the default branch. -path=$(nix eval --raw "(builtins.fetchGit file://$repo).outPath") +path=$(nix eval --no-pure-eval --raw "(builtins.fetchGit file://$repo).outPath") [[ $(cat $path/hello) = world ]] # In pure eval mode, fetchGit without a revision should fail. -[[ $(nix eval --raw "(builtins.readFile (fetchGit file://$repo + \"/hello\"))") = world ]] -(! nix eval --pure-eval --raw "(builtins.readFile (fetchGit file://$repo + \"/hello\"))") +[[ $(nix eval --no-pure-eval --raw "(builtins.readFile (fetchGit file://$repo + \"/hello\"))") = world ]] +(! nix eval --raw "(builtins.readFile (fetchGit file://$repo + \"/hello\"))") # Fetch using an explicit revision hash. path2=$(nix eval --raw "(builtins.fetchGit { url = file://$repo; rev = \"$rev2\"; }).outPath") [[ $path = $path2 ]] # In pure eval mode, fetchGit with a revision should succeed. -[[ $(nix eval --pure-eval --raw "(builtins.readFile (fetchGit { url = file://$repo; rev = \"$rev2\"; } + \"/hello\"))") = world ]] +[[ $(nix eval --raw "(builtins.readFile (fetchGit { url = file://$repo; rev = \"$rev2\"; } + \"/hello\"))") = world ]] # Fetch again. This should be cached. mv $repo ${repo}-tmp -path2=$(nix eval --raw "(builtins.fetchGit file://$repo).outPath") +path2=$(nix eval --no-pure-eval --raw "(builtins.fetchGit file://$repo).outPath") [[ $path = $path2 ]] -[[ $(nix eval "(builtins.fetchGit file://$repo).revCount") = 2 ]] -[[ $(nix eval --raw "(builtins.fetchGit file://$repo).rev") = $rev2 ]] +[[ $(nix eval --no-pure-eval "(builtins.fetchGit file://$repo).revCount") = 2 ]] +[[ $(nix eval --no-pure-eval --raw "(builtins.fetchGit file://$repo).rev") = $rev2 ]] # But with TTL 0, it should fail. -(! nix eval --tarball-ttl 0 "(builtins.fetchGit file://$repo)" -vvvvv) +(! nix eval --no-pure-eval --tarball-ttl 0 "(builtins.fetchGit file://$repo)" -vvvvv) # Fetching with a explicit hash should succeed. path2=$(nix eval --tarball-ttl 0 --raw "(builtins.fetchGit { url = file://$repo; rev = \"$rev2\"; }).outPath") @@ -61,7 +61,7 @@ path2=$(nix eval --tarball-ttl 0 --raw "(builtins.fetchGit { url = file://$repo; mv ${repo}-tmp $repo # Using a clean working tree should produce the same result. -path2=$(nix eval --raw "(builtins.fetchGit $repo).outPath") +path2=$(nix eval --no-pure-eval --raw "(builtins.fetchGit $repo).outPath") [[ $path = $path2 ]] # Using an unclean tree should yield the tracked but uncommitted changes. @@ -72,17 +72,17 @@ echo bar > $repo/dir2/bar git -C $repo add dir1/foo git -C $repo rm hello -path2=$(nix eval --raw "(builtins.fetchGit $repo).outPath") +path2=$(nix eval --no-pure-eval --raw "(builtins.fetchGit $repo).outPath") [ ! -e $path2/hello ] [ ! -e $path2/bar ] [ ! -e $path2/dir2/bar ] [ ! -e $path2/.git ] [[ $(cat $path2/dir1/foo) = foo ]] -[[ $(nix eval --raw "(builtins.fetchGit $repo).rev") = 0000000000000000000000000000000000000000 ]] +[[ $(nix eval --no-pure-eval --raw "(builtins.fetchGit $repo).rev") = 0000000000000000000000000000000000000000 ]] # ... unless we're using an explicit ref or rev. -path3=$(nix eval --raw "(builtins.fetchGit { url = $repo; ref = \"master\"; }).outPath") +path3=$(nix eval --no-pure-eval --raw "(builtins.fetchGit { url = $repo; ref = \"master\"; }).outPath") [[ $path = $path3 ]] path3=$(nix eval --raw "(builtins.fetchGit { url = $repo; rev = \"$rev2\"; }).outPath") @@ -91,7 +91,7 @@ path3=$(nix eval --raw "(builtins.fetchGit { url = $repo; rev = \"$rev2\"; }).ou # Committing should not affect the store path. git -C $repo commit -m 'Bla3' -a -path4=$(nix eval --tarball-ttl 0 --raw "(builtins.fetchGit file://$repo).outPath") +path4=$(nix eval --no-pure-eval --tarball-ttl 0 --raw "(builtins.fetchGit file://$repo).outPath") [[ $path2 = $path4 ]] # tarball-ttl should be ignored if we specify a rev @@ -102,29 +102,29 @@ rev3=$(git -C $repo rev-parse HEAD) nix eval --tarball-ttl 3600 "(builtins.fetchGit { url = $repo; rev = \"$rev3\"; })" >/dev/null # Update 'path' to reflect latest master -path=$(nix eval --raw "(builtins.fetchGit file://$repo).outPath") +path=$(nix eval --no-pure-eval --raw "(builtins.fetchGit file://$repo).outPath") # Check behavior when non-master branch is used git -C $repo checkout $rev2 -b dev echo dev > $repo/hello # File URI uses 'master' unless specified otherwise -path2=$(nix eval --raw "(builtins.fetchGit file://$repo).outPath") +path2=$(nix eval --no-pure-eval --raw "(builtins.fetchGit file://$repo).outPath") [[ $path = $path2 ]] # Using local path with branch other than 'master' should work when clean or dirty -path3=$(nix eval --raw "(builtins.fetchGit $repo).outPath") +path3=$(nix eval --no-pure-eval --raw "(builtins.fetchGit $repo).outPath") # (check dirty-tree handling was used) -[[ $(nix eval --raw "(builtins.fetchGit $repo).rev") = 0000000000000000000000000000000000000000 ]] +[[ $(nix eval --no-pure-eval --raw "(builtins.fetchGit $repo).rev") = 0000000000000000000000000000000000000000 ]] # Committing shouldn't change store path, or switch to using 'master' git -C $repo commit -m 'Bla5' -a -path4=$(nix eval --raw "(builtins.fetchGit $repo).outPath") +path4=$(nix eval --no-pure-eval --raw "(builtins.fetchGit $repo).outPath") [[ $(cat $path4/hello) = dev ]] [[ $path3 = $path4 ]] # Confirm same as 'dev' branch -path5=$(nix eval --raw "(builtins.fetchGit { url = $repo; ref = \"dev\"; }).outPath") +path5=$(nix eval --no-pure-eval --raw "(builtins.fetchGit { url = $repo; ref = \"dev\"; }).outPath") [[ $path3 = $path5 ]] @@ -134,8 +134,8 @@ rm -rf $TEST_HOME/.cache/nix/gitv2 # Try again, but without 'git' on PATH NIX=$(command -v nix) # This should fail -(! PATH= $NIX eval --raw "(builtins.fetchGit { url = $repo; ref = \"dev\"; }).outPath" ) +(! PATH= $NIX eval --no-pure-eval --raw "(builtins.fetchGit { url = $repo; ref = \"dev\"; }).outPath" ) # Try again, with 'git' available. This should work. -path5=$(nix eval --raw "(builtins.fetchGit { url = $repo; ref = \"dev\"; }).outPath") +path5=$(nix eval --no-pure-eval --raw "(builtins.fetchGit { url = $repo; ref = \"dev\"; }).outPath") [[ $path3 = $path5 ]] diff --git a/tests/fetchMercurial.sh b/tests/fetchMercurial.sh index 4088dbd39..d0735a381 100644 --- a/tests/fetchMercurial.sh +++ b/tests/fetchMercurial.sh @@ -26,31 +26,31 @@ hg commit --cwd $repo -m 'Bla2' rev2=$(hg log --cwd $repo -r tip --template '{node}') # Fetch the default branch. -path=$(nix eval --raw "(builtins.fetchMercurial file://$repo).outPath") +path=$(nix eval --no-pure-eval --raw "(builtins.fetchMercurial file://$repo).outPath") [[ $(cat $path/hello) = world ]] # In pure eval mode, fetchGit without a revision should fail. -[[ $(nix eval --raw "(builtins.readFile (fetchMercurial file://$repo + \"/hello\"))") = world ]] -(! nix eval --pure-eval --raw "(builtins.readFile (fetchMercurial file://$repo + \"/hello\"))") +[[ $(nix eval --no-pure-eval --raw "(builtins.readFile (fetchMercurial file://$repo + \"/hello\"))") = world ]] +(! nix eval --raw "(builtins.readFile (fetchMercurial file://$repo + \"/hello\"))") # Fetch using an explicit revision hash. -path2=$(nix eval --raw "(builtins.fetchMercurial { url = file://$repo; rev = \"$rev2\"; }).outPath") +path2=$(nix eval --no-pure-eval --raw "(builtins.fetchMercurial { url = file://$repo; rev = \"$rev2\"; }).outPath") [[ $path = $path2 ]] # In pure eval mode, fetchGit with a revision should succeed. -[[ $(nix eval --pure-eval --raw "(builtins.readFile (fetchMercurial { url = file://$repo; rev = \"$rev2\"; } + \"/hello\"))") = world ]] +[[ $(nix eval --raw "(builtins.readFile (fetchMercurial { url = file://$repo; rev = \"$rev2\"; } + \"/hello\"))") = world ]] # Fetch again. This should be cached. mv $repo ${repo}-tmp -path2=$(nix eval --raw "(builtins.fetchMercurial file://$repo).outPath") +path2=$(nix eval --no-pure-eval --raw "(builtins.fetchMercurial file://$repo).outPath") [[ $path = $path2 ]] -[[ $(nix eval --raw "(builtins.fetchMercurial file://$repo).branch") = default ]] -[[ $(nix eval "(builtins.fetchMercurial file://$repo).revCount") = 1 ]] -[[ $(nix eval --raw "(builtins.fetchMercurial file://$repo).rev") = $rev2 ]] +[[ $(nix eval --no-pure-eval --raw "(builtins.fetchMercurial file://$repo).branch") = default ]] +[[ $(nix eval --no-pure-eval "(builtins.fetchMercurial file://$repo).revCount") = 1 ]] +[[ $(nix eval --no-pure-eval --raw "(builtins.fetchMercurial file://$repo).rev") = $rev2 ]] # But with TTL 0, it should fail. -(! nix eval --tarball-ttl 0 "(builtins.fetchMercurial file://$repo)") +(! nix eval --no-pure-eval --tarball-ttl 0 "(builtins.fetchMercurial file://$repo)") # Fetching with a explicit hash should succeed. path2=$(nix eval --tarball-ttl 0 --raw "(builtins.fetchMercurial { url = file://$repo; rev = \"$rev2\"; }).outPath") @@ -62,7 +62,7 @@ path2=$(nix eval --tarball-ttl 0 --raw "(builtins.fetchMercurial { url = file:// mv ${repo}-tmp $repo # Using a clean working tree should produce the same result. -path2=$(nix eval --raw "(builtins.fetchMercurial $repo).outPath") +path2=$(nix eval --no-pure-eval --raw "(builtins.fetchMercurial $repo).outPath") [[ $path = $path2 ]] # Using an unclean tree should yield the tracked but uncommitted changes. @@ -73,14 +73,14 @@ echo bar > $repo/dir2/bar hg add --cwd $repo dir1/foo hg rm --cwd $repo hello -path2=$(nix eval --raw "(builtins.fetchMercurial $repo).outPath") +path2=$(nix eval --no-pure-eval --raw "(builtins.fetchMercurial $repo).outPath") [ ! -e $path2/hello ] [ ! -e $path2/bar ] [ ! -e $path2/dir2/bar ] [ ! -e $path2/.hg ] [[ $(cat $path2/dir1/foo) = foo ]] -[[ $(nix eval --raw "(builtins.fetchMercurial $repo).rev") = 0000000000000000000000000000000000000000 ]] +[[ $(nix eval --no-pure-eval --raw "(builtins.fetchMercurial $repo).rev") = 0000000000000000000000000000000000000000 ]] # ... unless we're using an explicit rev. path3=$(nix eval --raw "(builtins.fetchMercurial { url = $repo; rev = \"default\"; }).outPath") @@ -89,5 +89,5 @@ path3=$(nix eval --raw "(builtins.fetchMercurial { url = $repo; rev = \"default\ # Committing should not affect the store path. hg commit --cwd $repo -m 'Bla3' -path4=$(nix eval --tarball-ttl 0 --raw "(builtins.fetchMercurial file://$repo).outPath") +path4=$(nix eval --no-pure-eval --tarball-ttl 0 --raw "(builtins.fetchMercurial file://$repo).outPath") [[ $path2 = $path4 ]] diff --git a/tests/pure-eval.sh b/tests/pure-eval.sh index 49c856448..307942940 100644 --- a/tests/pure-eval.sh +++ b/tests/pure-eval.sh @@ -2,17 +2,17 @@ source common.sh clearStore -nix eval --pure-eval '(assert 1 + 2 == 3; true)' +nix eval '(assert 1 + 2 == 3; true)' -[[ $(nix eval '(builtins.readFile ./pure-eval.sh)') =~ clearStore ]] +[[ $(nix eval --no-pure-eval '(builtins.readFile ./pure-eval.sh)') =~ clearStore ]] -(! nix eval --pure-eval '(builtins.readFile ./pure-eval.sh)') +(! nix eval '(builtins.readFile ./pure-eval.sh)') -(! nix eval --pure-eval '(builtins.currentTime)') -(! nix eval --pure-eval '(builtins.currentSystem)') +(! nix eval '(builtins.currentTime)') +(! nix eval '(builtins.currentSystem)') (! nix-instantiate --pure-eval ./simple.nix) -[[ $(nix eval "((import (builtins.fetchurl { url = file://$(pwd)/pure-eval.nix; })).x)") == 123 ]] -(! nix eval --pure-eval "((import (builtins.fetchurl { url = file://$(pwd)/pure-eval.nix; })).x)") -nix eval --pure-eval "((import (builtins.fetchurl { url = file://$(pwd)/pure-eval.nix; sha256 = \"$(nix hash-file pure-eval.nix --type sha256)\"; })).x)" +[[ $(nix eval --no-pure-eval "((import (builtins.fetchurl { url = file://$(pwd)/pure-eval.nix; })).x)") == 123 ]] +(! nix eval "((import (builtins.fetchurl { url = file://$(pwd)/pure-eval.nix; })).x)") +nix eval "((import (builtins.fetchurl { url = file://$(pwd)/pure-eval.nix; sha256 = \"$(nix hash-file pure-eval.nix --type sha256)\"; })).x)" diff --git a/tests/restricted.sh b/tests/restricted.sh index e02becc60..68913cd36 100644 --- a/tests/restricted.sh +++ b/tests/restricted.sh @@ -17,18 +17,18 @@ nix-instantiate --restrict-eval --eval -E 'builtins.readDir ../src/nix-channel' (! nix-instantiate --restrict-eval --eval -E 'let __nixPath = [ { prefix = "foo"; path = ./.; } ]; in ') nix-instantiate --restrict-eval --eval -E 'let __nixPath = [ { prefix = "foo"; path = ./.; } ]; in ' -I src=. -p=$(nix eval --raw "(builtins.fetchurl file://$(pwd)/restricted.sh)" --restrict-eval --allowed-uris "file://$(pwd)") +p=$(nix eval --raw "(builtins.fetchurl file://$(pwd)/restricted.sh)" --no-pure-eval --restrict-eval --allowed-uris "file://$(pwd)") cmp $p restricted.sh -(! nix eval --raw "(builtins.fetchurl file://$(pwd)/restricted.sh)" --restrict-eval) +(! nix eval --raw "(builtins.fetchurl file://$(pwd)/restricted.sh)" --no-pure-eval --restrict-eval) -(! nix eval --raw "(builtins.fetchurl file://$(pwd)/restricted.sh)" --restrict-eval --allowed-uris "file://$(pwd)/restricted.sh/") +(! nix eval --raw "(builtins.fetchurl file://$(pwd)/restricted.sh)" --no-pure-eval --restrict-eval --allowed-uris "file://$(pwd)/restricted.sh/") -nix eval --raw "(builtins.fetchurl file://$(pwd)/restricted.sh)" --restrict-eval --allowed-uris "file://$(pwd)/restricted.sh" +nix eval --raw "(builtins.fetchurl file://$(pwd)/restricted.sh)" --no-pure-eval --restrict-eval --allowed-uris "file://$(pwd)/restricted.sh" -(! nix eval --raw "(builtins.fetchurl https://github.com/NixOS/patchelf/archive/master.tar.gz)" --restrict-eval) -(! nix eval --raw "(builtins.fetchTarball https://github.com/NixOS/patchelf/archive/master.tar.gz)" --restrict-eval) -(! nix eval --raw "(fetchGit git://github.com/NixOS/patchelf.git)" --restrict-eval) +(! nix eval --raw "(builtins.fetchurl https://github.com/NixOS/patchelf/archive/master.tar.gz)" --no-pure-eval --restrict-eval) +(! nix eval --raw "(builtins.fetchTarball https://github.com/NixOS/patchelf/archive/master.tar.gz)" --no-pure-eval --restrict-eval) +(! nix eval --raw "(fetchGit git://github.com/NixOS/patchelf.git)" --no-pure-eval --restrict-eval) ln -sfn $(pwd)/restricted.nix $TEST_ROOT/restricted.nix [[ $(nix-instantiate --eval $TEST_ROOT/restricted.nix) == 3 ]] @@ -37,7 +37,7 @@ ln -sfn $(pwd)/restricted.nix $TEST_ROOT/restricted.nix (! nix-instantiate --eval --restrict-eval $TEST_ROOT/restricted.nix -I .) nix-instantiate --eval --restrict-eval $TEST_ROOT/restricted.nix -I $TEST_ROOT -I . -[[ $(nix eval --raw --restrict-eval -I . '(builtins.readFile "${import ./simple.nix}/hello")') == 'Hello World!' ]] +[[ $(nix eval --raw --no-pure-eval --restrict-eval -I . '(builtins.readFile "${import ./simple.nix}/hello")') == 'Hello World!' ]] # Check whether we can leak symlink information through directory traversal. traverseDir="$(pwd)/restricted-traverse-me" diff --git a/tests/search.sh b/tests/search.sh index 14da3127b..6c4d791c1 100644 --- a/tests/search.sh +++ b/tests/search.sh @@ -3,6 +3,8 @@ source common.sh clearStore clearCache +exit 0 # FIXME + # No packages (( $(NIX_PATH= nix search -u|wc -l) == 0 )) From 4023ae4cdf146b2ee491c12ec64e8605984bf49a Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Mon, 8 Apr 2019 16:22:04 +0200 Subject: [PATCH 033/634] nix: Support nixpkgs. for compatibility --- src/nix/installables.cc | 17 +++++++---------- 1 file changed, 7 insertions(+), 10 deletions(-) diff --git a/src/nix/installables.cc b/src/nix/installables.cc index bcb22349a..6d3969e95 100644 --- a/src/nix/installables.cc +++ b/src/nix/installables.cc @@ -203,6 +203,12 @@ std::vector> SourceExprCommand::parseInstallables( if (s.compare(0, 1, "(") == 0) result.push_back(std::make_shared(*this, s)); + else if (hasPrefix(s, "nixpkgs.")) { + bool static warned; + warnOnce(warned, "the syntax 'nixpkgs.' is deprecated; use 'nixpkgs:' instead"); + result.push_back(std::make_shared(*this, FlakeRef("nixpkgs"), std::string(s, 8))); + } + else if ((colon = s.rfind(':')) != std::string::npos) { auto flakeRef = std::string(s, 0, colon); auto attrPath = std::string(s, colon + 1); @@ -214,17 +220,8 @@ std::vector> SourceExprCommand::parseInstallables( result.push_back(std::make_shared(path)); } - else { - result.push_back(std::make_shared(*this, FlakeRef("nixpkgs"), s)); - } - - /* - else if (s == "" || std::regex_match(s, attrPathRegex)) - result.push_back(std::make_shared(cmd, s)); - else - throw UsageError("don't know what to do with argument '%s'", s); - */ + result.push_back(std::make_shared(*this, FlakeRef("nixpkgs"), s)); } } From a9ceeeb4b0caf6891c8cd8fcbe744d3d567c1d8e Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Mon, 8 Apr 2019 17:28:05 +0200 Subject: [PATCH 034/634] Add a flake.nix --- flake.nix | 18 ++++++++++++++++++ release.nix | 2 +- 2 files changed, 19 insertions(+), 1 deletion(-) create mode 100644 flake.nix diff --git a/flake.nix b/flake.nix new file mode 100644 index 000000000..b119f0324 --- /dev/null +++ b/flake.nix @@ -0,0 +1,18 @@ +{ + name = "nix"; + + description = "The purely functional package manager"; + + requires = [ flake:nixpkgs ]; + + provides = flakes: rec { + + hydraJobs = import ./release.nix { + nix = flakes.nix; # => flakes.self? + nixpkgs = flakes.nixpkgs; + }; + + packages.nix = hydraJobs.build.x86_64-linux; + + }; +} diff --git a/release.nix b/release.nix index ab13451ff..f52120474 100644 --- a/release.nix +++ b/release.nix @@ -19,7 +19,7 @@ let releaseTools.sourceTarball { name = "nix-tarball"; version = builtins.readFile ./.version; - versionSuffix = if officialRelease then "" else "pre${toString nix.revCount}_${nix.shortRev}"; + versionSuffix = if officialRelease then "" else "pre${toString nix.revCount or 0}_${nix.shortRev or "0000000"}"; src = nix; inherit officialRelease; From 6a4c7fb9759dbbf5ddaf0ebd00921d0f8045f355 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Mon, 8 Apr 2019 22:46:25 +0200 Subject: [PATCH 035/634] Add path flakeref variant Unlike file://, this allows the path to be a dirty Git tree, so nix build /path/to/flake:attr is a convenient way to test building a local flake. --- src/libexpr/primops/fetchGit.cc | 4 ++-- src/libexpr/primops/fetchGit.hh | 2 +- src/libexpr/primops/flake.cc | 27 ++++++++++++++++++++++++++- src/libexpr/primops/flake.hh | 1 + src/libexpr/primops/flakeref.cc | 13 +++++++++++++ src/libexpr/primops/flakeref.hh | 7 ++++++- 6 files changed, 49 insertions(+), 5 deletions(-) diff --git a/src/libexpr/primops/fetchGit.cc b/src/libexpr/primops/fetchGit.cc index bbf13c87b..391308224 100644 --- a/src/libexpr/primops/fetchGit.cc +++ b/src/libexpr/primops/fetchGit.cc @@ -170,7 +170,7 @@ GitInfo exportGit(ref store, const std::string & uri, json["uri"] = uri; json["name"] = name; json["rev"] = gitInfo.rev; - json["revCount"] = gitInfo.revCount; + json["revCount"] = *gitInfo.revCount; writeFile(storeLink, json.dump()); @@ -224,7 +224,7 @@ static void prim_fetchGit(EvalState & state, const Pos & pos, Value * * args, Va mkString(*state.allocAttr(v, state.sOutPath), gitInfo.storePath, PathSet({gitInfo.storePath})); mkString(*state.allocAttr(v, state.symbols.create("rev")), gitInfo.rev); mkString(*state.allocAttr(v, state.symbols.create("shortRev")), gitInfo.shortRev); - mkInt(*state.allocAttr(v, state.symbols.create("revCount")), gitInfo.revCount); + mkInt(*state.allocAttr(v, state.symbols.create("revCount")), gitInfo.revCount.value_or(0)); v.attrs->sort(); if (state.allowedPaths) diff --git a/src/libexpr/primops/fetchGit.hh b/src/libexpr/primops/fetchGit.hh index d7a0e165a..60c439426 100644 --- a/src/libexpr/primops/fetchGit.hh +++ b/src/libexpr/primops/fetchGit.hh @@ -11,7 +11,7 @@ struct GitInfo Path storePath; std::string rev; std::string shortRev; - uint64_t revCount = 0; + std::optional revCount; }; GitInfo exportGit(ref store, const std::string & uri, diff --git a/src/libexpr/primops/flake.cc b/src/libexpr/primops/flake.cc index dedd2f737..f068569a6 100644 --- a/src/libexpr/primops/flake.cc +++ b/src/libexpr/primops/flake.cc @@ -129,6 +129,7 @@ struct FlakeSourceInfo { Path storePath; std::optional rev; + std::optional revCount; }; static FlakeSourceInfo fetchFlake(EvalState & state, const FlakeRef & flakeRef) @@ -178,6 +179,18 @@ static FlakeSourceInfo fetchFlake(EvalState & state, const FlakeRef & flakeRef) FlakeSourceInfo info; info.storePath = gitInfo.storePath; info.rev = Hash(gitInfo.rev, htSHA1); + info.revCount = gitInfo.revCount; + return info; + } + + else if (auto refData = std::get_if(&directFlakeRef.data)) { + if (!pathExists(refData->path + "/.git")) + throw Error("flake '%s' does not reference a Git repository", refData->path); + auto gitInfo = exportGit(state.store, refData->path, {}, "", "source"); + FlakeSourceInfo info; + info.storePath = gitInfo.storePath; + info.rev = Hash(gitInfo.rev, htSHA1); + info.revCount = gitInfo.revCount; return info; } @@ -206,6 +219,8 @@ Flake getFlake(EvalState & state, const FlakeRef & flakeRef) } Flake flake(newFlakeRef); + flake.path = flakePath; + flake.revCount = sourceInfo.revCount; Value vInfo; state.evalFile(flakePath + "/flake.nix", vInfo); // FIXME: symlink attack @@ -349,10 +364,20 @@ Value * makeFlakeValue(EvalState & state, const FlakeRef & flakeRef, bool impure for (auto & flake : flakes) { auto vFlake = state.allocAttr(*vResult, flake.second.id); if (topFlakeId == flake.second.id) vTop = vFlake; - state.mkAttrs(*vFlake, 2); + + state.mkAttrs(*vFlake, 4); + mkString(*state.allocAttr(*vFlake, state.sDescription), flake.second.description); + + state.store->assertStorePath(flake.second.path); + mkString(*state.allocAttr(*vFlake, state.sOutPath), flake.second.path, {flake.second.path}); + + if (flake.second.revCount) + mkInt(*state.allocAttr(*vFlake, state.symbols.create("revCount")), *flake.second.revCount); + auto vProvides = state.allocAttr(*vFlake, state.symbols.create("provides")); mkApp(*vProvides, *flake.second.vProvides, *vResult); + vFlake->attrs->sort(); } diff --git a/src/libexpr/primops/flake.hh b/src/libexpr/primops/flake.hh index df8cf9efb..aea4e8aa2 100644 --- a/src/libexpr/primops/flake.hh +++ b/src/libexpr/primops/flake.hh @@ -35,6 +35,7 @@ struct Flake FlakeRef ref; std::string description; Path path; + std::optional revCount; std::vector requires; std::shared_ptr lockFile; Value * vProvides; // FIXME: gc diff --git a/src/libexpr/primops/flakeref.cc b/src/libexpr/primops/flakeref.cc index 8e7c1f8df..5f9a29260 100644 --- a/src/libexpr/primops/flakeref.cc +++ b/src/libexpr/primops/flakeref.cc @@ -106,6 +106,12 @@ FlakeRef::FlakeRef(const std::string & uri) data = d; } + else if (hasPrefix(uri, "/")) { + IsPath d; + d.path = canonPath(uri); + data = d; + } + else throw Error("'%s' is not a valid flake reference", uri); } @@ -135,6 +141,10 @@ std::string FlakeRef::to_string() const (refData->rev ? "&rev=" + refData->rev->to_string(Base16, false) : ""); } + else if (auto refData = std::get_if(&data)) { + return refData->path; + } + else abort(); } @@ -149,6 +159,9 @@ bool FlakeRef::isImmutable() const else if (auto refData = std::get_if(&data)) return (bool) refData->rev; + else if (std::get_if(&data)) + return false; + else abort(); } diff --git a/src/libexpr/primops/flakeref.hh b/src/libexpr/primops/flakeref.hh index fb365e101..832d7dd03 100644 --- a/src/libexpr/primops/flakeref.hh +++ b/src/libexpr/primops/flakeref.hh @@ -122,9 +122,14 @@ struct FlakeRef std::optional rev; }; + struct IsPath + { + Path path; + }; + // Git, Tarball - std::variant data; + std::variant data; // Parse a flake URI. FlakeRef(const std::string & uri); From ee1254d4f50f5908fa4913253a643d14cb263c45 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Mon, 8 Apr 2019 23:19:19 +0200 Subject: [PATCH 036/634] nix: Add --impure as a shorter alias of --no-pure-eval --- src/libexpr/primops/flake.cc | 2 +- src/nix/installables.cc | 7 +++++++ tests/fetchGit.sh | 38 ++++++++++++++++++------------------ tests/fetchMercurial.sh | 24 +++++++++++------------ tests/pure-eval.sh | 4 ++-- tests/restricted.sh | 16 +++++++-------- 6 files changed, 49 insertions(+), 42 deletions(-) diff --git a/src/libexpr/primops/flake.cc b/src/libexpr/primops/flake.cc index f068569a6..7cfb2038c 100644 --- a/src/libexpr/primops/flake.cc +++ b/src/libexpr/primops/flake.cc @@ -289,7 +289,7 @@ static std::tuple> resolveFlake(EvalState & st #if 0 if (evalSettings.pureEval && !flakeRef.isImmutable() && (!toplevel || !impureTopRef)) - throw Error("mutable flake '%s' is not allowed in pure mode; use --no-pure-eval to disable", flakeRef.to_string()); + throw Error("mutable flake '%s' is not allowed in pure mode; use --impure to disable", flakeRef.to_string()); #endif auto flake = getFlake(state, flakeRef); diff --git a/src/nix/installables.cc b/src/nix/installables.cc index 6d3969e95..631a849cd 100644 --- a/src/nix/installables.cc +++ b/src/nix/installables.cc @@ -21,6 +21,13 @@ SourceExprCommand::SourceExprCommand() .label("file") .description("evaluate a set of attributes from FILE (deprecated)") .dest(&file); + + mkFlag() + .longName("impure") + .description("allow access to mutable paths and repositories") + .handler([&](std::vector ss) { + evalSettings.pureEval = false; + }); } ref SourceExprCommand::getEvalState() diff --git a/tests/fetchGit.sh b/tests/fetchGit.sh index 301bf3022..51fd49e9f 100644 --- a/tests/fetchGit.sh +++ b/tests/fetchGit.sh @@ -26,11 +26,11 @@ git -C $repo commit -m 'Bla2' -a rev2=$(git -C $repo rev-parse HEAD) # Fetch the default branch. -path=$(nix eval --no-pure-eval --raw "(builtins.fetchGit file://$repo).outPath") +path=$(nix eval --impure --raw "(builtins.fetchGit file://$repo).outPath") [[ $(cat $path/hello) = world ]] # In pure eval mode, fetchGit without a revision should fail. -[[ $(nix eval --no-pure-eval --raw "(builtins.readFile (fetchGit file://$repo + \"/hello\"))") = world ]] +[[ $(nix eval --impure --raw "(builtins.readFile (fetchGit file://$repo + \"/hello\"))") = world ]] (! nix eval --raw "(builtins.readFile (fetchGit file://$repo + \"/hello\"))") # Fetch using an explicit revision hash. @@ -42,14 +42,14 @@ path2=$(nix eval --raw "(builtins.fetchGit { url = file://$repo; rev = \"$rev2\" # Fetch again. This should be cached. mv $repo ${repo}-tmp -path2=$(nix eval --no-pure-eval --raw "(builtins.fetchGit file://$repo).outPath") +path2=$(nix eval --impure --raw "(builtins.fetchGit file://$repo).outPath") [[ $path = $path2 ]] -[[ $(nix eval --no-pure-eval "(builtins.fetchGit file://$repo).revCount") = 2 ]] -[[ $(nix eval --no-pure-eval --raw "(builtins.fetchGit file://$repo).rev") = $rev2 ]] +[[ $(nix eval --impure "(builtins.fetchGit file://$repo).revCount") = 2 ]] +[[ $(nix eval --impure --raw "(builtins.fetchGit file://$repo).rev") = $rev2 ]] # But with TTL 0, it should fail. -(! nix eval --no-pure-eval --tarball-ttl 0 "(builtins.fetchGit file://$repo)" -vvvvv) +(! nix eval --impure --tarball-ttl 0 "(builtins.fetchGit file://$repo)" -vvvvv) # Fetching with a explicit hash should succeed. path2=$(nix eval --tarball-ttl 0 --raw "(builtins.fetchGit { url = file://$repo; rev = \"$rev2\"; }).outPath") @@ -61,7 +61,7 @@ path2=$(nix eval --tarball-ttl 0 --raw "(builtins.fetchGit { url = file://$repo; mv ${repo}-tmp $repo # Using a clean working tree should produce the same result. -path2=$(nix eval --no-pure-eval --raw "(builtins.fetchGit $repo).outPath") +path2=$(nix eval --impure --raw "(builtins.fetchGit $repo).outPath") [[ $path = $path2 ]] # Using an unclean tree should yield the tracked but uncommitted changes. @@ -72,17 +72,17 @@ echo bar > $repo/dir2/bar git -C $repo add dir1/foo git -C $repo rm hello -path2=$(nix eval --no-pure-eval --raw "(builtins.fetchGit $repo).outPath") +path2=$(nix eval --impure --raw "(builtins.fetchGit $repo).outPath") [ ! -e $path2/hello ] [ ! -e $path2/bar ] [ ! -e $path2/dir2/bar ] [ ! -e $path2/.git ] [[ $(cat $path2/dir1/foo) = foo ]] -[[ $(nix eval --no-pure-eval --raw "(builtins.fetchGit $repo).rev") = 0000000000000000000000000000000000000000 ]] +[[ $(nix eval --impure --raw "(builtins.fetchGit $repo).rev") = 0000000000000000000000000000000000000000 ]] # ... unless we're using an explicit ref or rev. -path3=$(nix eval --no-pure-eval --raw "(builtins.fetchGit { url = $repo; ref = \"master\"; }).outPath") +path3=$(nix eval --impure --raw "(builtins.fetchGit { url = $repo; ref = \"master\"; }).outPath") [[ $path = $path3 ]] path3=$(nix eval --raw "(builtins.fetchGit { url = $repo; rev = \"$rev2\"; }).outPath") @@ -91,7 +91,7 @@ path3=$(nix eval --raw "(builtins.fetchGit { url = $repo; rev = \"$rev2\"; }).ou # Committing should not affect the store path. git -C $repo commit -m 'Bla3' -a -path4=$(nix eval --no-pure-eval --tarball-ttl 0 --raw "(builtins.fetchGit file://$repo).outPath") +path4=$(nix eval --impure --tarball-ttl 0 --raw "(builtins.fetchGit file://$repo).outPath") [[ $path2 = $path4 ]] # tarball-ttl should be ignored if we specify a rev @@ -102,29 +102,29 @@ rev3=$(git -C $repo rev-parse HEAD) nix eval --tarball-ttl 3600 "(builtins.fetchGit { url = $repo; rev = \"$rev3\"; })" >/dev/null # Update 'path' to reflect latest master -path=$(nix eval --no-pure-eval --raw "(builtins.fetchGit file://$repo).outPath") +path=$(nix eval --impure --raw "(builtins.fetchGit file://$repo).outPath") # Check behavior when non-master branch is used git -C $repo checkout $rev2 -b dev echo dev > $repo/hello # File URI uses 'master' unless specified otherwise -path2=$(nix eval --no-pure-eval --raw "(builtins.fetchGit file://$repo).outPath") +path2=$(nix eval --impure --raw "(builtins.fetchGit file://$repo).outPath") [[ $path = $path2 ]] # Using local path with branch other than 'master' should work when clean or dirty -path3=$(nix eval --no-pure-eval --raw "(builtins.fetchGit $repo).outPath") +path3=$(nix eval --impure --raw "(builtins.fetchGit $repo).outPath") # (check dirty-tree handling was used) -[[ $(nix eval --no-pure-eval --raw "(builtins.fetchGit $repo).rev") = 0000000000000000000000000000000000000000 ]] +[[ $(nix eval --impure --raw "(builtins.fetchGit $repo).rev") = 0000000000000000000000000000000000000000 ]] # Committing shouldn't change store path, or switch to using 'master' git -C $repo commit -m 'Bla5' -a -path4=$(nix eval --no-pure-eval --raw "(builtins.fetchGit $repo).outPath") +path4=$(nix eval --impure --raw "(builtins.fetchGit $repo).outPath") [[ $(cat $path4/hello) = dev ]] [[ $path3 = $path4 ]] # Confirm same as 'dev' branch -path5=$(nix eval --no-pure-eval --raw "(builtins.fetchGit { url = $repo; ref = \"dev\"; }).outPath") +path5=$(nix eval --impure --raw "(builtins.fetchGit { url = $repo; ref = \"dev\"; }).outPath") [[ $path3 = $path5 ]] @@ -134,8 +134,8 @@ rm -rf $TEST_HOME/.cache/nix/gitv2 # Try again, but without 'git' on PATH NIX=$(command -v nix) # This should fail -(! PATH= $NIX eval --no-pure-eval --raw "(builtins.fetchGit { url = $repo; ref = \"dev\"; }).outPath" ) +(! PATH= $NIX eval --impure --raw "(builtins.fetchGit { url = $repo; ref = \"dev\"; }).outPath" ) # Try again, with 'git' available. This should work. -path5=$(nix eval --no-pure-eval --raw "(builtins.fetchGit { url = $repo; ref = \"dev\"; }).outPath") +path5=$(nix eval --impure --raw "(builtins.fetchGit { url = $repo; ref = \"dev\"; }).outPath") [[ $path3 = $path5 ]] diff --git a/tests/fetchMercurial.sh b/tests/fetchMercurial.sh index d0735a381..a0f792612 100644 --- a/tests/fetchMercurial.sh +++ b/tests/fetchMercurial.sh @@ -26,15 +26,15 @@ hg commit --cwd $repo -m 'Bla2' rev2=$(hg log --cwd $repo -r tip --template '{node}') # Fetch the default branch. -path=$(nix eval --no-pure-eval --raw "(builtins.fetchMercurial file://$repo).outPath") +path=$(nix eval --impure --raw "(builtins.fetchMercurial file://$repo).outPath") [[ $(cat $path/hello) = world ]] # In pure eval mode, fetchGit without a revision should fail. -[[ $(nix eval --no-pure-eval --raw "(builtins.readFile (fetchMercurial file://$repo + \"/hello\"))") = world ]] +[[ $(nix eval --impure --raw "(builtins.readFile (fetchMercurial file://$repo + \"/hello\"))") = world ]] (! nix eval --raw "(builtins.readFile (fetchMercurial file://$repo + \"/hello\"))") # Fetch using an explicit revision hash. -path2=$(nix eval --no-pure-eval --raw "(builtins.fetchMercurial { url = file://$repo; rev = \"$rev2\"; }).outPath") +path2=$(nix eval --impure --raw "(builtins.fetchMercurial { url = file://$repo; rev = \"$rev2\"; }).outPath") [[ $path = $path2 ]] # In pure eval mode, fetchGit with a revision should succeed. @@ -42,15 +42,15 @@ path2=$(nix eval --no-pure-eval --raw "(builtins.fetchMercurial { url = file://$ # Fetch again. This should be cached. mv $repo ${repo}-tmp -path2=$(nix eval --no-pure-eval --raw "(builtins.fetchMercurial file://$repo).outPath") +path2=$(nix eval --impure --raw "(builtins.fetchMercurial file://$repo).outPath") [[ $path = $path2 ]] -[[ $(nix eval --no-pure-eval --raw "(builtins.fetchMercurial file://$repo).branch") = default ]] -[[ $(nix eval --no-pure-eval "(builtins.fetchMercurial file://$repo).revCount") = 1 ]] -[[ $(nix eval --no-pure-eval --raw "(builtins.fetchMercurial file://$repo).rev") = $rev2 ]] +[[ $(nix eval --impure --raw "(builtins.fetchMercurial file://$repo).branch") = default ]] +[[ $(nix eval --impure "(builtins.fetchMercurial file://$repo).revCount") = 1 ]] +[[ $(nix eval --impure --raw "(builtins.fetchMercurial file://$repo).rev") = $rev2 ]] # But with TTL 0, it should fail. -(! nix eval --no-pure-eval --tarball-ttl 0 "(builtins.fetchMercurial file://$repo)") +(! nix eval --impure --tarball-ttl 0 "(builtins.fetchMercurial file://$repo)") # Fetching with a explicit hash should succeed. path2=$(nix eval --tarball-ttl 0 --raw "(builtins.fetchMercurial { url = file://$repo; rev = \"$rev2\"; }).outPath") @@ -62,7 +62,7 @@ path2=$(nix eval --tarball-ttl 0 --raw "(builtins.fetchMercurial { url = file:// mv ${repo}-tmp $repo # Using a clean working tree should produce the same result. -path2=$(nix eval --no-pure-eval --raw "(builtins.fetchMercurial $repo).outPath") +path2=$(nix eval --impure --raw "(builtins.fetchMercurial $repo).outPath") [[ $path = $path2 ]] # Using an unclean tree should yield the tracked but uncommitted changes. @@ -73,14 +73,14 @@ echo bar > $repo/dir2/bar hg add --cwd $repo dir1/foo hg rm --cwd $repo hello -path2=$(nix eval --no-pure-eval --raw "(builtins.fetchMercurial $repo).outPath") +path2=$(nix eval --impure --raw "(builtins.fetchMercurial $repo).outPath") [ ! -e $path2/hello ] [ ! -e $path2/bar ] [ ! -e $path2/dir2/bar ] [ ! -e $path2/.hg ] [[ $(cat $path2/dir1/foo) = foo ]] -[[ $(nix eval --no-pure-eval --raw "(builtins.fetchMercurial $repo).rev") = 0000000000000000000000000000000000000000 ]] +[[ $(nix eval --impure --raw "(builtins.fetchMercurial $repo).rev") = 0000000000000000000000000000000000000000 ]] # ... unless we're using an explicit rev. path3=$(nix eval --raw "(builtins.fetchMercurial { url = $repo; rev = \"default\"; }).outPath") @@ -89,5 +89,5 @@ path3=$(nix eval --raw "(builtins.fetchMercurial { url = $repo; rev = \"default\ # Committing should not affect the store path. hg commit --cwd $repo -m 'Bla3' -path4=$(nix eval --no-pure-eval --tarball-ttl 0 --raw "(builtins.fetchMercurial file://$repo).outPath") +path4=$(nix eval --impure --tarball-ttl 0 --raw "(builtins.fetchMercurial file://$repo).outPath") [[ $path2 = $path4 ]] diff --git a/tests/pure-eval.sh b/tests/pure-eval.sh index 307942940..6e2c6962d 100644 --- a/tests/pure-eval.sh +++ b/tests/pure-eval.sh @@ -4,7 +4,7 @@ clearStore nix eval '(assert 1 + 2 == 3; true)' -[[ $(nix eval --no-pure-eval '(builtins.readFile ./pure-eval.sh)') =~ clearStore ]] +[[ $(nix eval --impure '(builtins.readFile ./pure-eval.sh)') =~ clearStore ]] (! nix eval '(builtins.readFile ./pure-eval.sh)') @@ -13,6 +13,6 @@ nix eval '(assert 1 + 2 == 3; true)' (! nix-instantiate --pure-eval ./simple.nix) -[[ $(nix eval --no-pure-eval "((import (builtins.fetchurl { url = file://$(pwd)/pure-eval.nix; })).x)") == 123 ]] +[[ $(nix eval --impure "((import (builtins.fetchurl { url = file://$(pwd)/pure-eval.nix; })).x)") == 123 ]] (! nix eval "((import (builtins.fetchurl { url = file://$(pwd)/pure-eval.nix; })).x)") nix eval "((import (builtins.fetchurl { url = file://$(pwd)/pure-eval.nix; sha256 = \"$(nix hash-file pure-eval.nix --type sha256)\"; })).x)" diff --git a/tests/restricted.sh b/tests/restricted.sh index 68913cd36..e660de127 100644 --- a/tests/restricted.sh +++ b/tests/restricted.sh @@ -17,18 +17,18 @@ nix-instantiate --restrict-eval --eval -E 'builtins.readDir ../src/nix-channel' (! nix-instantiate --restrict-eval --eval -E 'let __nixPath = [ { prefix = "foo"; path = ./.; } ]; in ') nix-instantiate --restrict-eval --eval -E 'let __nixPath = [ { prefix = "foo"; path = ./.; } ]; in ' -I src=. -p=$(nix eval --raw "(builtins.fetchurl file://$(pwd)/restricted.sh)" --no-pure-eval --restrict-eval --allowed-uris "file://$(pwd)") +p=$(nix eval --raw "(builtins.fetchurl file://$(pwd)/restricted.sh)" --impure --restrict-eval --allowed-uris "file://$(pwd)") cmp $p restricted.sh -(! nix eval --raw "(builtins.fetchurl file://$(pwd)/restricted.sh)" --no-pure-eval --restrict-eval) +(! nix eval --raw "(builtins.fetchurl file://$(pwd)/restricted.sh)" --impure --restrict-eval) -(! nix eval --raw "(builtins.fetchurl file://$(pwd)/restricted.sh)" --no-pure-eval --restrict-eval --allowed-uris "file://$(pwd)/restricted.sh/") +(! nix eval --raw "(builtins.fetchurl file://$(pwd)/restricted.sh)" --impure --restrict-eval --allowed-uris "file://$(pwd)/restricted.sh/") -nix eval --raw "(builtins.fetchurl file://$(pwd)/restricted.sh)" --no-pure-eval --restrict-eval --allowed-uris "file://$(pwd)/restricted.sh" +nix eval --raw "(builtins.fetchurl file://$(pwd)/restricted.sh)" --impure --restrict-eval --allowed-uris "file://$(pwd)/restricted.sh" -(! nix eval --raw "(builtins.fetchurl https://github.com/NixOS/patchelf/archive/master.tar.gz)" --no-pure-eval --restrict-eval) -(! nix eval --raw "(builtins.fetchTarball https://github.com/NixOS/patchelf/archive/master.tar.gz)" --no-pure-eval --restrict-eval) -(! nix eval --raw "(fetchGit git://github.com/NixOS/patchelf.git)" --no-pure-eval --restrict-eval) +(! nix eval --raw "(builtins.fetchurl https://github.com/NixOS/patchelf/archive/master.tar.gz)" --impure --restrict-eval) +(! nix eval --raw "(builtins.fetchTarball https://github.com/NixOS/patchelf/archive/master.tar.gz)" --impure --restrict-eval) +(! nix eval --raw "(fetchGit git://github.com/NixOS/patchelf.git)" --impure --restrict-eval) ln -sfn $(pwd)/restricted.nix $TEST_ROOT/restricted.nix [[ $(nix-instantiate --eval $TEST_ROOT/restricted.nix) == 3 ]] @@ -37,7 +37,7 @@ ln -sfn $(pwd)/restricted.nix $TEST_ROOT/restricted.nix (! nix-instantiate --eval --restrict-eval $TEST_ROOT/restricted.nix -I .) nix-instantiate --eval --restrict-eval $TEST_ROOT/restricted.nix -I $TEST_ROOT -I . -[[ $(nix eval --raw --no-pure-eval --restrict-eval -I . '(builtins.readFile "${import ./simple.nix}/hello")') == 'Hello World!' ]] +[[ $(nix eval --raw --impure --restrict-eval -I . '(builtins.readFile "${import ./simple.nix}/hello")') == 'Hello World!' ]] # Check whether we can leak symlink information through directory traversal. traverseDir="$(pwd)/restricted-traverse-me" From 47727252ff4e536dd47b73949033d3349923fbbb Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Mon, 8 Apr 2019 23:36:12 +0200 Subject: [PATCH 037/634] Add "nix flake init" command for creating a flake --- src/nix/flake.cc | 49 +++++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 48 insertions(+), 1 deletion(-) diff --git a/src/nix/flake.cc b/src/nix/flake.cc index 470dfdc08..01385ff8d 100644 --- a/src/nix/flake.cc +++ b/src/nix/flake.cc @@ -182,6 +182,51 @@ struct CmdFlakePin : virtual Args, StoreCommand, MixEvalArgs } }; +struct CmdFlakeInit : virtual Args, Command +{ + std::string name() override + { + return "init"; + } + + std::string description() override + { + return "create a skeleton 'flake.nix' file in the current directory"; + } + + void run() override + { + Path flakeDir = absPath("."); + + if (!pathExists(flakeDir + "/.git")) + throw Error("the directory '%s' is not a Git repository", flakeDir); + + Path flakePath = flakeDir + "/flake.nix"; + + if (pathExists(flakePath)) + throw Error("file '%s' already exists", flakePath); + + writeFile(flakePath, +R"str( +{ + name = "hello"; + + description = "A flake for building Hello World"; + + epoch = 2019; + + requires = [ "nixpkgs" ]; + + provides = deps: rec { + + packages.hello = deps.nixpkgs.provides.packages.hello; + + }; +} +)str"); + } +}; + struct CmdFlake : virtual MultiCommand, virtual Command { CmdFlake() @@ -190,7 +235,9 @@ struct CmdFlake : virtual MultiCommand, virtual Command , make_ref() , make_ref() , make_ref() - , make_ref()}) + , make_ref() + , make_ref() + }) { } From 507da65900ccb3c6356673e93ad2271c58e43b07 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Mon, 8 Apr 2019 23:39:38 +0200 Subject: [PATCH 038/634] Move flake template into a separate file --- .gitignore | 2 +- src/nix/flake-template.nix | 15 +++++++++++++++ src/nix/flake.cc | 19 ++----------------- src/nix/local.mk | 2 ++ 4 files changed, 20 insertions(+), 18 deletions(-) create mode 100644 src/nix/flake-template.nix diff --git a/.gitignore b/.gitignore index b75c5d489..da0e7c843 100644 --- a/.gitignore +++ b/.gitignore @@ -46,7 +46,7 @@ perl/Makefile.config /src/libexpr/nix.tbl # /src/libstore/ -/src/libstore/*.gen.hh +*.gen.* /src/nix/nix diff --git a/src/nix/flake-template.nix b/src/nix/flake-template.nix new file mode 100644 index 000000000..fe89e647e --- /dev/null +++ b/src/nix/flake-template.nix @@ -0,0 +1,15 @@ +{ + name = "hello"; + + description = "A flake for building Hello World"; + + epoch = 2019; + + requires = [ "nixpkgs" ]; + + provides = deps: rec { + + packages.hello = deps.nixpkgs.provides.packages.hello; + + }; +} diff --git a/src/nix/flake.cc b/src/nix/flake.cc index 01385ff8d..3d2fb7832 100644 --- a/src/nix/flake.cc +++ b/src/nix/flake.cc @@ -207,23 +207,8 @@ struct CmdFlakeInit : virtual Args, Command throw Error("file '%s' already exists", flakePath); writeFile(flakePath, -R"str( -{ - name = "hello"; - - description = "A flake for building Hello World"; - - epoch = 2019; - - requires = [ "nixpkgs" ]; - - provides = deps: rec { - - packages.hello = deps.nixpkgs.provides.packages.hello; - - }; -} -)str"); +#include "flake-template.nix.gen.hh" + ); } }; diff --git a/src/nix/local.mk b/src/nix/local.mk index ca4604d56..4003d0005 100644 --- a/src/nix/local.mk +++ b/src/nix/local.mk @@ -23,3 +23,5 @@ $(foreach name, \ nix-build nix-channel nix-collect-garbage nix-copy-closure nix-daemon nix-env nix-hash nix-instantiate nix-prefetch-url nix-shell nix-store, \ $(eval $(call install-symlink, nix, $(bindir)/$(name)))) $(eval $(call install-symlink, $(bindir)/nix, $(libexecdir)/nix/build-remote)) + +$(d)/flake.cc: $(d)/flake-template.nix.gen.hh From c996e04aca2db1755ded4864465338afab677ff5 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Mon, 8 Apr 2019 23:47:29 +0200 Subject: [PATCH 039/634] Allow relative paths in flakerefs Also allow "." as an installable to refer to the flake in the current directory. E.g. $ nix build . will build 'provides.defaultPackage' in the flake in the current directory. --- flake.nix | 1 + src/libexpr/primops/flakeref.cc | 6 +++--- src/libexpr/primops/flakeref.hh | 2 +- src/nix/installables.cc | 14 ++++++++++---- 4 files changed, 15 insertions(+), 8 deletions(-) diff --git a/flake.nix b/flake.nix index b119f0324..695f67fa4 100644 --- a/flake.nix +++ b/flake.nix @@ -14,5 +14,6 @@ packages.nix = hydraJobs.build.x86_64-linux; + defaultPackage = packages.nix; }; } diff --git a/src/libexpr/primops/flakeref.cc b/src/libexpr/primops/flakeref.cc index 5f9a29260..1df53bfb8 100644 --- a/src/libexpr/primops/flakeref.cc +++ b/src/libexpr/primops/flakeref.cc @@ -32,7 +32,7 @@ const static std::string segmentRegex = "[a-zA-Z0-9._~-]+"; const static std::string pathRegex = "/?" + segmentRegex + "(?:/" + segmentRegex + ")*"; const static std::string paramRegex = "[a-z]+=[a-zA-Z0-9._-]*"; -FlakeRef::FlakeRef(const std::string & uri) +FlakeRef::FlakeRef(const std::string & uri, bool allowRelative) { // FIXME: could combine this into one regex. @@ -106,9 +106,9 @@ FlakeRef::FlakeRef(const std::string & uri) data = d; } - else if (hasPrefix(uri, "/")) { + else if (hasPrefix(uri, "/") || (allowRelative && (hasPrefix(uri, "./") || uri == "."))) { IsPath d; - d.path = canonPath(uri); + d.path = allowRelative ? absPath(uri) : canonPath(uri); data = d; } diff --git a/src/libexpr/primops/flakeref.hh b/src/libexpr/primops/flakeref.hh index 832d7dd03..fa14f7c25 100644 --- a/src/libexpr/primops/flakeref.hh +++ b/src/libexpr/primops/flakeref.hh @@ -132,7 +132,7 @@ struct FlakeRef std::variant data; // Parse a flake URI. - FlakeRef(const std::string & uri); + FlakeRef(const std::string & uri, bool allowRelative = false); // Default constructor FlakeRef(const FlakeRef & flakeRef) : data(flakeRef.data) {}; diff --git a/src/nix/installables.cc b/src/nix/installables.cc index 631a849cd..f3be7b628 100644 --- a/src/nix/installables.cc +++ b/src/nix/installables.cc @@ -219,12 +219,18 @@ std::vector> SourceExprCommand::parseInstallables( else if ((colon = s.rfind(':')) != std::string::npos) { auto flakeRef = std::string(s, 0, colon); auto attrPath = std::string(s, colon + 1); - result.push_back(std::make_shared(*this, FlakeRef(flakeRef), attrPath)); + result.push_back(std::make_shared(*this, FlakeRef(flakeRef, true), attrPath)); } - else if (s.find('/') != std::string::npos) { - auto path = store->toStorePath(store->followLinksToStore(s)); - result.push_back(std::make_shared(path)); + else if (s.find('/') != std::string::npos || s == ".") { + Path storePath; + try { + storePath = store->toStorePath(store->followLinksToStore(s)); + } catch (Error) { } + if (storePath != "") + result.push_back(std::make_shared(storePath)); + else + result.push_back(std::make_shared(*this, FlakeRef(s, true), "defaultPackage")); } else From 87033f2c4e32f4851e8c2abf8ab3b56444b65590 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Mon, 8 Apr 2019 23:58:33 +0200 Subject: [PATCH 040/634] Whitespace --- src/nix/installables.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/nix/installables.cc b/src/nix/installables.cc index f3be7b628..e792ce96d 100644 --- a/src/nix/installables.cc +++ b/src/nix/installables.cc @@ -300,7 +300,7 @@ Path toStorePath(ref store, RealiseMode mode, auto paths = toStorePaths(store, mode, {installable}); if (paths.size() != 1) - throw Error("argument '%s' should evaluate to one store path", installable->what()); + throw Error("argument '%s' should evaluate to one store path", installable->what()); return *paths.begin(); } From 18c019b616f457b1f9a39da8cafc012be5ddffcc Mon Sep 17 00:00:00 2001 From: Nick Van den Broeck Date: Thu, 21 Mar 2019 09:30:16 +0100 Subject: [PATCH 041/634] Added nonFlakeRequires and the command `nix flake deps` --- src/libexpr/primops/flake.cc | 191 +++++++++++++++++++++-------------- src/libexpr/primops/flake.hh | 23 ++++- src/nix/build.cc | 14 +-- src/nix/flake.cc | 65 ++++++++++-- 4 files changed, 200 insertions(+), 93 deletions(-) diff --git a/src/libexpr/primops/flake.cc b/src/libexpr/primops/flake.cc index 7cfb2038c..c4ae29022 100644 --- a/src/libexpr/primops/flake.cc +++ b/src/libexpr/primops/flake.cc @@ -18,20 +18,19 @@ std::shared_ptr readRegistry(const Path & path) { auto registry = std::make_shared(); - try { - auto json = nlohmann::json::parse(readFile(path)); + if (!pathExists(path)) + return std::make_shared(); - auto version = json.value("version", 0); - if (version != 1) - throw Error("flake registry '%s' has unsupported version %d", path, version); + auto json = nlohmann::json::parse(readFile(path)); - auto flakes = json["flakes"]; - for (auto i = flakes.begin(); i != flakes.end(); ++i) { - FlakeRegistry::Entry entry{FlakeRef(i->value("uri", ""))}; - registry->entries.emplace(i.key(), entry); - } - } catch (SysError & e) { - if (e.errNo != ENOENT) throw; + auto version = json.value("version", 0); + if (version != 1) + throw Error("flake registry '%s' has unsupported version %d", path, version); + + auto flakes = json["flakes"]; + for (auto i = flakes.begin(); i != flakes.end(); ++i) { + FlakeRegistry::Entry entry{FlakeRef(i->value("uri", ""))}; + registry->entries.emplace(i.key(), entry); } return registry; @@ -54,7 +53,6 @@ Path getUserRegistryPath() { return getHome() + "/.config/nix/registry.json"; } - std::shared_ptr getGlobalRegistry() { // FIXME: get from nixos.org. @@ -76,12 +74,20 @@ std::shared_ptr getFlagRegistry() const std::vector> EvalState::getFlakeRegistries() { std::vector> registries; - registries.push_back(getGlobalRegistry()); - registries.push_back(getUserRegistry()); + if (evalSettings.pureEval) { + registries.push_back(std::make_shared()); // global + registries.push_back(std::make_shared()); // user + registries.push_back(std::make_shared()); // local + } else { + registries.push_back(getGlobalRegistry()); + registries.push_back(getUserRegistry()); + registries.push_back(getLocalRegistry()); + } registries.push_back(getFlagRegistry()); return registries; } +// Creates a Nix attribute set value listing all dependencies, so they can be used in `provides`. Value * makeFlakeRegistryValue(EvalState & state) { auto v = state.allocValue(); @@ -199,7 +205,7 @@ static FlakeSourceInfo fetchFlake(EvalState & state, const FlakeRef & flakeRef) Flake getFlake(EvalState & state, const FlakeRef & flakeRef) { - auto sourceInfo = fetchFlake(state, flakeRef); + FlakeSourceInfo sourceInfo = fetchFlake(state, flakeRef); debug("got flake source '%s' with revision %s", sourceInfo.storePath, sourceInfo.rev.value_or(Hash(htSHA1)).to_string(Base16, false)); @@ -209,18 +215,16 @@ Flake getFlake(EvalState & state, const FlakeRef & flakeRef) if (state.allowedPaths) state.allowedPaths->insert(flakePath); - FlakeRef newFlakeRef(flakeRef); - if (std::get_if(&newFlakeRef.data)) { - FlakeSourceInfo srcInfo = fetchFlake(state, newFlakeRef); - if (srcInfo.rev) { - std::string uri = flakeRef.baseRef().to_string(); - newFlakeRef = FlakeRef(uri + "/" + srcInfo.rev->to_string(Base16, false)); - } + Flake flake(flakeRef); + if (std::get_if(&flakeRef.data)) { + if (sourceInfo.rev) + flake.ref = FlakeRef(flakeRef.baseRef().to_string() + + "/" + sourceInfo.rev->to_string(Base16, false)); } - Flake flake(newFlakeRef); flake.path = flakePath; flake.revCount = sourceInfo.revCount; + flake.path = flakePath; Value vInfo; state.evalFile(flakePath + "/flake.nix", vInfo); // FIXME: symlink attack @@ -242,6 +246,15 @@ Flake getFlake(EvalState & state, const FlakeRef & flakeRef) *(**requires).value->listElems()[n], *(**requires).pos))); } + if (std::optional nonFlakeRequires = vInfo.attrs->get(state.symbols.create("nonFlakeRequires"))) { + state.forceAttrs(*(**nonFlakeRequires).value, *(**nonFlakeRequires).pos); + for (Attr attr : *(*(**nonFlakeRequires).value).attrs) { + std::string myNonFlakeUri = state.forceStringNoCtx(*attr.value, *attr.pos); + FlakeRef nonFlakeRef = FlakeRef(myNonFlakeUri); + flake.nonFlakeRequires.insert_or_assign(attr.name, nonFlakeRef); + } + } + if (auto provides = vInfo.attrs->get(state.symbols.create("provides"))) { state.forceFunction(*(**provides).value, *(**provides).pos); flake.vProvides = (**provides).value; @@ -250,86 +263,107 @@ Flake getFlake(EvalState & state, const FlakeRef & flakeRef) auto lockFile = flakePath + "/flake.lock"; // FIXME: symlink attack - if (pathExists(lockFile)) { - flake.lockFile = readRegistry(lockFile); - for (auto & entry : flake.lockFile->entries) - if (!entry.second.ref.isImmutable()) - throw Error("flake lock file '%s' contains mutable entry '%s'", - lockFile, entry.second.ref.to_string()); - } + flake.lockFile = readRegistry(lockFile); + for (auto & entry : flake.lockFile->entries) + if (!entry.second.ref.isImmutable()) + throw Error("flake lock file '%s' contains mutable entry '%s'", + lockFile, entry.second.ref.to_string()); + return flake; } +// Get the `NonFlake` corresponding to a `FlakeRef`. +NonFlake getNonFlake(EvalState & state, const FlakeRef & flakeRef, FlakeId flakeId) +{ + FlakeSourceInfo sourceInfo = fetchFlake(state, flakeRef); + debug("got non-flake source '%s' with revision %s", + sourceInfo.storePath, sourceInfo.rev.value_or(Hash(htSHA1)).to_string(Base16, false)); + + auto flakePath = sourceInfo.storePath; + state.store->assertStorePath(flakePath); + + if (state.allowedPaths) + state.allowedPaths->insert(flakePath); + + NonFlake nonFlake(flakeRef); + if (std::get_if(&flakeRef.data)) { + if (sourceInfo.rev) + nonFlake.ref = FlakeRef(flakeRef.baseRef().to_string() + + "/" + sourceInfo.rev->to_string(Base16, false)); + } + + nonFlake.path = flakePath; + + nonFlake.id = flakeId; + + return nonFlake; +} + /* Given a flake reference, recursively fetch it and its dependencies. FIXME: this should return a graph of flakes. */ -static std::tuple> resolveFlake(EvalState & state, - const FlakeRef & topRef, bool impureTopRef) +Dependencies resolveFlake(EvalState & state, const FlakeRef & topRef, bool impureTopRef) { - std::map done; - std::queue> todo; - std::optional topFlakeId; /// FIXME: ambiguous - todo.push({topRef, true}); + Dependencies deps; + std::queue todo; + bool isTopLevel = true; + todo.push(topRef); auto registries = state.getFlakeRegistries(); - //std::shared_ptr localRegistry = registries.at(2); while (!todo.empty()) { - auto [flakeRef, toplevel] = todo.front(); + auto flakeRef = todo.front(); todo.pop(); if (auto refData = std::get_if(&flakeRef.data)) { if (done.count(refData->id)) continue; // optimization - flakeRef = lookupFlake(state, flakeRef, - !evalSettings.pureEval || (toplevel && impureTopRef) ? registries : std::vector>()); - // This is why we need the `registries`. - } - -#if 0 - if (evalSettings.pureEval && !flakeRef.isImmutable() && (!toplevel || !impureTopRef)) + flakeRef = lookupFlake(state, flakeRef, registries); + if (evalSettings.pureEval && !flakeRef.isImmutable() && (!isTopLevel || !impureTopRef)) throw Error("mutable flake '%s' is not allowed in pure mode; use --impure to disable", flakeRef.to_string()); -#endif auto flake = getFlake(state, flakeRef); - if (done.count(flake.id)) continue; + if (isTopLevel) { + deps.topFlakeId = flake.id; + isTopLevel = false; + } - if (toplevel) topFlakeId = flake.id; + for (auto & flakeRef : flake.requires) + todo.push(flakeRef); - for (auto & require : flake.requires) - todo.push({require, false}); + for (auto & x : flake.nonFlakeRequires) + deps.nonFlakes.push_back(getNonFlake(state, x.second, x.first)); + // TODO (Nick): If there are 2 non-flake dependencies with the same + // FlakeId, this will lead to trouble! One of the dependencies won't + // be used! -#if 0 - // The following piece of code basically adds the FlakeRefs from - // the lockfiles of dependencies to the localRegistry. This is used - // to resolve future `FlakeId`s, in `lookupFlake` a bit above this. - if (flake.lockFile) - for (auto & entry : flake.lockFile->entries) { - if (localRegistry->entries.count(entry.first)) continue; - localRegistry->entries.emplace(entry.first, entry.second); - } -#endif - - done.emplace(flake.id, std::move(flake)); + deps.flakes.push_back(flake); } - assert(topFlakeId); - return {*topFlakeId, std::move(done)}; + return deps; } FlakeRegistry updateLockFile(EvalState & evalState, FlakeRef & flakeRef) { FlakeRegistry newLockFile; - std::map myDependencyMap = get<1>(resolveFlake(evalState, flakeRef, false)); + Dependencies deps = resolveFlake(evalState, flakeRef, false); // Nick assumed that "topRefPure" means that the Flake for flakeRef can be // fetched purely. - for (auto const& require : myDependencyMap) { - FlakeRegistry::Entry entry = FlakeRegistry::Entry(require.second.ref); - // The FlakeRefs are immutable because they come out of the Flake objects, - // not from the requires. - newLockFile.entries.insert(std::pair(require.first, entry)); + for (auto const& require : deps.flakes) { + FlakeRegistry::Entry entry = FlakeRegistry::Entry(require.ref); + // The FlakeRefs are immutable because they come out of the Flake objects. + if (require.id != deps.topFlakeId) + newLockFile.entries.insert_or_assign(require.id, entry); + // TODO (Nick): If there are 2 flake dependencies with the same FlakeId, + // one of them gets ignored! + } + for (auto const& nonFlake : deps.nonFlakes) { + FlakeRegistry::Entry entry = FlakeRegistry::Entry(nonFlake.ref); + newLockFile.entries.insert_or_assign(nonFlake.id, entry); + // We are assuming the sets of FlakeIds for flakes and non-flakes + // are disjoint. } return newLockFile; } @@ -348,21 +382,26 @@ void updateLockFile(EvalState & state, std::string path) } } -Value * makeFlakeValue(EvalState & state, const FlakeRef & flakeRef, bool impureTopRef, Value & v) +// Return the `provides` of the top flake, while assigning to `v` the provides +// of the dependencies as well. +Value * makeFlakeValue(EvalState & state, FlakeUri flakeUri, bool impureTopRef, Value & v) { - auto [topFlakeId, flakes] = resolveFlake(state, flakeRef, impureTopRef); + FlakeRef flakeRef = FlakeRef(flakeUri); + + Dependencies deps = resolveFlake(state, flakeRef, impure); // FIXME: we should call each flake with only its dependencies // (rather than the closure of the top-level flake). auto vResult = state.allocValue(); + // This will store the attribute set of the `nonFlakeRequires` and the `requires.provides`. - state.mkAttrs(*vResult, flakes.size()); + state.mkAttrs(*vResult, dependencies.flakes.size()); Value * vTop = 0; - for (auto & flake : flakes) { - auto vFlake = state.allocAttr(*vResult, flake.second.id); + for (auto & flake : deps.flakes) { + auto vFlake = state.allocAttr(*vResult, flake.id); if (topFlakeId == flake.second.id) vTop = vFlake; state.mkAttrs(*vFlake, 4); diff --git a/src/libexpr/primops/flake.hh b/src/libexpr/primops/flake.hh index aea4e8aa2..ffd962561 100644 --- a/src/libexpr/primops/flake.hh +++ b/src/libexpr/primops/flake.hh @@ -38,15 +38,34 @@ struct Flake std::optional revCount; std::vector requires; std::shared_ptr lockFile; + std::map nonFlakeRequires; Value * vProvides; // FIXME: gc - // commit hash // date // content hash - Flake(FlakeRef & flakeRef) : ref(flakeRef) {}; + Flake(const FlakeRef flakeRef) : ref(flakeRef) {}; +}; + +struct NonFlake +{ + FlakeId id; + FlakeRef ref; + Path path; + // date + // content hash + NonFlake(const FlakeRef flakeRef) : ref(flakeRef) {}; }; Flake getFlake(EvalState &, const FlakeRef &); +struct Dependencies +{ + FlakeId topFlakeId; + std::vector flakes; + std::vector nonFlakes; +}; + +Dependencies resolveFlake(EvalState &, const FlakeRef &, bool impureTopRef); + FlakeRegistry updateLockFile(EvalState &, Flake &); void updateLockFile(EvalState &, std::string); diff --git a/src/nix/build.cc b/src/nix/build.cc index da7c7f614..a6fcf5094 100644 --- a/src/nix/build.cc +++ b/src/nix/build.cc @@ -11,7 +11,7 @@ struct CmdBuild : MixDryRun, InstallablesCommand { Path outLink = "result"; - std::optional gitRepo = std::nullopt; + bool updateLock = true; CmdBuild() { @@ -28,9 +28,9 @@ struct CmdBuild : MixDryRun, InstallablesCommand .set(&outLink, Path("")); mkFlag() - .longName("update-lock-file") - .description("update the lock file") - .dest(&gitRepo); + .longName("no-update") + .description("don't update the lock file") + .set(&updateLock, false); } std::string name() override @@ -78,8 +78,10 @@ struct CmdBuild : MixDryRun, InstallablesCommand } } - if (gitRepo) - updateLockFile(*evalState, *gitRepo); + if(updateLock) + for (int i = 0; i < installables.size(); i++) + if (auto flakeUri = installableToFlakeUri) + updateLockFile(*evalState, FlakeRef(*flakeUri)); } }; diff --git a/src/nix/flake.cc b/src/nix/flake.cc index 3d2fb7832..07d31c45a 100644 --- a/src/nix/flake.cc +++ b/src/nix/flake.cc @@ -36,6 +36,60 @@ struct CmdFlakeList : StoreCommand, MixEvalArgs } }; +void printFlakeInfo(Flake & flake, bool json) { + if (json) { + nlohmann::json j; + j["name"] = flake.id; + j["location"] = flake.path; + j["description"] = flake.description; + std::cout << j.dump(4) << std::endl; + } else { + std::cout << "Name: " << flake.id << "\n"; + std::cout << "Description: " << flake.description << "\n"; + std::cout << "Location: " << flake.path << "\n"; + } +} + +void printNonFlakeInfo(NonFlake & nonFlake, bool json) { + if (json) { + nlohmann::json j; + j["name"] = nonFlake.id; + j["location"] = nonFlake.path; + std::cout << j.dump(4) << std::endl; + } else { + std::cout << "name: " << nonFlake.id << "\n"; + std::cout << "Location: " << nonFlake.path << "\n"; + } +} + +struct CmdFlakeDeps : FlakeCommand, MixJSON, StoreCommand, MixEvalArgs +{ + std::string name() override + { + return "deps"; + } + + std::string description() override + { + return "list informaton about dependencies"; + } + + void run(nix::ref store) override + { + auto evalState = std::make_shared(searchPath, store); + + FlakeRef flakeRef(flakeUri); + + Dependencies deps = resolveFlake(*evalState, flakeRef, true); + + for (auto & flake : deps.flakes) + printFlakeInfo(flake, json); + + for (auto & nonFlake : deps.nonFlakes) + printNonFlakeInfo(nonFlake, json); + } +}; + struct CmdFlakeUpdate : StoreCommand, GitRepoCommand, MixEvalArgs { std::string name() override @@ -73,15 +127,7 @@ struct CmdFlakeInfo : FlakeCommand, MixJSON, MixEvalArgs, StoreCommand { auto evalState = std::make_shared(searchPath, store); nix::Flake flake = nix::getFlake(*evalState, FlakeRef(flakeUri)); - if (json) { - nlohmann::json j; - j["location"] = flake.path; - j["description"] = flake.description; - std::cout << j.dump(4) << std::endl; - } else { - std::cout << "Description: " << flake.description << "\n"; - std::cout << "Location: " << flake.path << "\n"; - } + printFlakeInfo(flake, json); } }; @@ -218,6 +264,7 @@ struct CmdFlake : virtual MultiCommand, virtual Command : MultiCommand({make_ref() , make_ref() , make_ref() + , make_ref() , make_ref() , make_ref() , make_ref() From 641db127be9df82fe4d51290120a8ba6d0b5f4fd Mon Sep 17 00:00:00 2001 From: Nick Van den Broeck Date: Thu, 21 Mar 2019 09:30:16 +0100 Subject: [PATCH 042/634] FlakeIds are now properly looked up in registries --- src/libexpr/primops/flake.cc | 23 +++++++++++++---------- src/libexpr/primops/flake.hh | 2 +- src/libexpr/primops/flakeref.hh | 18 ++++++++++++++++++ src/nix/build.cc | 7 ++++--- 4 files changed, 36 insertions(+), 14 deletions(-) diff --git a/src/libexpr/primops/flake.cc b/src/libexpr/primops/flake.cc index c4ae29022..70d1b871a 100644 --- a/src/libexpr/primops/flake.cc +++ b/src/libexpr/primops/flake.cc @@ -123,6 +123,10 @@ static FlakeRef lookupFlake(EvalState & state, const FlakeRef & flakeRef, auto newRef = FlakeRef(i->second.ref); if (!newRef.isDirect()) throw Error("found indirect flake URI '%s' in the flake registry", i->second.ref.to_string()); + if (refData->ref) + newRef.setRef(*refData->ref); + if (refData->rev) + newRef.setRev(*refData->rev); return newRef; } } @@ -224,7 +228,6 @@ Flake getFlake(EvalState & state, const FlakeRef & flakeRef) flake.path = flakePath; flake.revCount = sourceInfo.revCount; - flake.path = flakePath; Value vInfo; state.evalFile(flakePath + "/flake.nix", vInfo); // FIXME: symlink attack @@ -317,9 +320,9 @@ Dependencies resolveFlake(EvalState & state, const FlakeRef & topRef, bool impur auto flakeRef = todo.front(); todo.pop(); - if (auto refData = std::get_if(&flakeRef.data)) { - if (done.count(refData->id)) continue; // optimization + if (std::get_if(&flakeRef.data)) flakeRef = lookupFlake(state, flakeRef, registries); + if (evalSettings.pureEval && !flakeRef.isImmutable() && (!isTopLevel || !impureTopRef)) throw Error("mutable flake '%s' is not allowed in pure mode; use --impure to disable", flakeRef.to_string()); @@ -368,7 +371,7 @@ FlakeRegistry updateLockFile(EvalState & evalState, FlakeRef & flakeRef) return newLockFile; } -void updateLockFile(EvalState & state, std::string path) +void updateLockFile(EvalState & state, Path path) { // 'path' is the path to the local flake repo. FlakeRef flakeRef = FlakeRef("file://" + path); @@ -384,7 +387,7 @@ void updateLockFile(EvalState & state, std::string path) // Return the `provides` of the top flake, while assigning to `v` the provides // of the dependencies as well. -Value * makeFlakeValue(EvalState & state, FlakeUri flakeUri, bool impureTopRef, Value & v) +Value * makeFlakeValue(EvalState & state, FlakeUri flakeUri, Value & v) { FlakeRef flakeRef = FlakeRef(flakeUri); @@ -406,16 +409,16 @@ Value * makeFlakeValue(EvalState & state, FlakeUri flakeUri, bool impureTopRef, state.mkAttrs(*vFlake, 4); - mkString(*state.allocAttr(*vFlake, state.sDescription), flake.second.description); + mkString(*state.allocAttr(*vFlake, state.sDescription), flake.description); - state.store->assertStorePath(flake.second.path); - mkString(*state.allocAttr(*vFlake, state.sOutPath), flake.second.path, {flake.second.path}); + state.store->assertStorePath(flake.path); + mkString(*state.allocAttr(*vFlake, state.sOutPath), flake.path, {flake.path}); if (flake.second.revCount) - mkInt(*state.allocAttr(*vFlake, state.symbols.create("revCount")), *flake.second.revCount); + mkInt(*state.allocAttr(*vFlake, state.symbols.create("revCount")), *flake.revCount); auto vProvides = state.allocAttr(*vFlake, state.symbols.create("provides")); - mkApp(*vProvides, *flake.second.vProvides, *vResult); + mkApp(*vProvides, *flake.vProvides, *vResult); // Should this be vResult or vFlake??? Or both! vFlake->attrs->sort(); } diff --git a/src/libexpr/primops/flake.hh b/src/libexpr/primops/flake.hh index ffd962561..a8f907784 100644 --- a/src/libexpr/primops/flake.hh +++ b/src/libexpr/primops/flake.hh @@ -68,5 +68,5 @@ Dependencies resolveFlake(EvalState &, const FlakeRef &, bool impureTopRef); FlakeRegistry updateLockFile(EvalState &, Flake &); -void updateLockFile(EvalState &, std::string); +void updateLockFile(EvalState &, Path); } diff --git a/src/libexpr/primops/flakeref.hh b/src/libexpr/primops/flakeref.hh index fa14f7c25..94a75fb2b 100644 --- a/src/libexpr/primops/flakeref.hh +++ b/src/libexpr/primops/flakeref.hh @@ -160,5 +160,23 @@ struct FlakeRef bool isImmutable() const; FlakeRef baseRef() const; + + void setRef(std::optional ref) { + if (auto refData = std::get_if(&data)) + refData->ref = ref; + else if (auto refData = std::get_if(&data)) + refData->ref = ref; + else if (auto refData = std::get_if(&data)) + refData->ref = ref; + } + + void setRev(std::optional rev) { + if (auto refData = std::get_if(&data)) + refData->rev = rev; + else if (auto refData = std::get_if(&data)) + refData->rev = rev; + else if (auto refData = std::get_if(&data)) + refData->rev = rev; + } }; } diff --git a/src/nix/build.cc b/src/nix/build.cc index a6fcf5094..f6908b0c0 100644 --- a/src/nix/build.cc +++ b/src/nix/build.cc @@ -78,10 +78,11 @@ struct CmdBuild : MixDryRun, InstallablesCommand } } + std::string flakeUri = ""; if(updateLock) - for (int i = 0; i < installables.size(); i++) - if (auto flakeUri = installableToFlakeUri) - updateLockFile(*evalState, FlakeRef(*flakeUri)); + for (uint i = 0; i < installables.size(); i++) + // if (auto flakeUri = installableToFlakeUri) + updateLockFile(*evalState, flakeUri); } }; From 3ec0c82fab94533807c5c3bb25df2b43d8339ed3 Mon Sep 17 00:00:00 2001 From: Nick Van den Broeck Date: Fri, 29 Mar 2019 16:18:25 +0100 Subject: [PATCH 043/634] Fixed dependency resolution --- src/libexpr/primops/flake.cc | 204 +++++++++++++++++++++-------------- src/libexpr/primops/flake.hh | 31 ++++-- src/nix/build.cc | 10 +- src/nix/flake.cc | 25 +++-- 4 files changed, 168 insertions(+), 102 deletions(-) diff --git a/src/libexpr/primops/flake.cc b/src/libexpr/primops/flake.cc index 70d1b871a..1ad9ad3f8 100644 --- a/src/libexpr/primops/flake.cc +++ b/src/libexpr/primops/flake.cc @@ -39,9 +39,8 @@ std::shared_ptr readRegistry(const Path & path) /* Write the registry or lock file to a file. */ void writeRegistry(FlakeRegistry registry, Path path) { - nlohmann::json json = {}; + nlohmann::json json; json["version"] = 1; - json["flakes"] = {}; for (auto elem : registry.entries) { json["flakes"][elem.first] = { {"uri", elem.second.ref.to_string()} }; } @@ -49,6 +48,85 @@ void writeRegistry(FlakeRegistry registry, Path path) writeFile(path, json.dump(4)); // The '4' is the number of spaces used in the indentation in the json file. } +LockFile::FlakeEntry readFlakeEntry(nlohmann::json json) +{ + FlakeRef flakeRef(json["uri"]); + if (!flakeRef.isImmutable()) + throw Error("requested to fetch FlakeRef '%s' purely, which is mutable", flakeRef.to_string()); + + LockFile::FlakeEntry entry(flakeRef); + + auto nonFlakeRequires = json["nonFlakeRequires"]; + + for (auto i = nonFlakeRequires.begin(); i != nonFlakeRequires.end(); ++i) { + FlakeRef flakeRef(i->value("uri", "")); + if (!flakeRef.isImmutable()) + throw Error("requested to fetch FlakeRef '%s' purely, which is mutable", flakeRef.to_string()); + entry.nonFlakeEntries.insert_or_assign(i.key(), flakeRef); + } + + auto requires = json["requires"]; + + for (auto i = requires.begin(); i != requires.end(); ++i) + entry.flakeEntries.insert_or_assign(i.key(), readFlakeEntry(*i)); + + return entry; +} + +LockFile readLockFile(const Path & path) +{ + LockFile lockFile; + + if (!pathExists(path)) + return lockFile; + + auto json = nlohmann::json::parse(readFile(path)); + + auto version = json.value("version", 0); + if (version != 1) + throw Error("lock file '%s' has unsupported version %d", path, version); + + auto nonFlakeRequires = json["nonFlakeRequires"]; + + for (auto i = nonFlakeRequires.begin(); i != nonFlakeRequires.end(); ++i) { + FlakeRef flakeRef(i->value("uri", "")); + if (!flakeRef.isImmutable()) + throw Error("requested to fetch FlakeRef '%s' purely, which is mutable", flakeRef.to_string()); + lockFile.nonFlakeEntries.insert_or_assign(i.key(), flakeRef); + } + + auto requires = json["requires"]; + + for (auto i = requires.begin(); i != requires.end(); ++i) + lockFile.flakeEntries.insert_or_assign(i.key(), readFlakeEntry(*i)); + + return lockFile; +} + +nlohmann::json flakeEntryToJson(LockFile::FlakeEntry & entry) +{ + nlohmann::json json; + json["uri"] = entry.ref.to_string(); + for (auto & x : entry.nonFlakeEntries) + json["nonFlakeRequires"][x.first]["uri"] = x.second.to_string(); + for (auto & x : entry.flakeEntries) + json["requires"][x.first] = flakeEntryToJson(x.second); + return json; +} + +void writeLockFile(LockFile lockFile, Path path) +{ + nlohmann::json json; + json["version"] = 1; + json["nonFlakeRequires"]; + for (auto & x : lockFile.nonFlakeEntries) + json["nonFlakeRequires"][x.first]["uri"] = x.second.to_string(); + for (auto & x : lockFile.flakeEntries) + json["requires"][x.first] = flakeEntryToJson(x.second); + createDirs(dirOf(path)); + writeFile(path, json.dump(4)); // '4' = indentation in json file +} + Path getUserRegistryPath() { return getHome() + "/.config/nix/registry.json"; @@ -142,17 +220,13 @@ struct FlakeSourceInfo std::optional revCount; }; -static FlakeSourceInfo fetchFlake(EvalState & state, const FlakeRef & flakeRef) +static FlakeSourceInfo fetchFlake(EvalState & state, const FlakeRef & flakeRef, bool impureIsAllowed = false) { - FlakeRef directFlakeRef = FlakeRef(flakeRef); - if (!flakeRef.isDirect()) { - directFlakeRef = lookupFlake(state, flakeRef, state.getFlakeRegistries()); - } - assert(directFlakeRef.isDirect()); - // NOTE FROM NICK: I don't see why one wouldn't fetch FlakeId flakes.. + FlakeRef directFlakeRef = lookupFlake(state, flakeRef, state.getFlakeRegistries()); if (auto refData = std::get_if(&directFlakeRef.data)) { - // FIXME: require hash in pure mode. + if (evalSettings.pureEval && !impureIsAllowed && !directFlakeRef.isImmutable()) + throw Error("requested to fetch FlakeRef '%s' purely, which is mutable", directFlakeRef.to_string()); // FIXME: use regular /archive URLs instead? api.github.com // might have stricter rate limits. @@ -207,7 +281,8 @@ static FlakeSourceInfo fetchFlake(EvalState & state, const FlakeRef & flakeRef) else abort(); } -Flake getFlake(EvalState & state, const FlakeRef & flakeRef) +// This will return the flake which corresponds to a given FlakeRef. The lookupFlake is done within this function. +Flake getFlake(EvalState & state, const FlakeRef & flakeRef, bool impureIsAllowed = false) { FlakeSourceInfo sourceInfo = fetchFlake(state, flakeRef); debug("got flake source '%s' with revision %s", @@ -264,14 +339,9 @@ Flake getFlake(EvalState & state, const FlakeRef & flakeRef) } else throw Error("flake lacks attribute 'provides'"); - auto lockFile = flakePath + "/flake.lock"; // FIXME: symlink attack - - flake.lockFile = readRegistry(lockFile); - for (auto & entry : flake.lockFile->entries) - if (!entry.second.ref.isImmutable()) - throw Error("flake lock file '%s' contains mutable entry '%s'", - lockFile, entry.second.ref.to_string()); + const Path lockFile = flakePath + "/flake.lock"; // FIXME: symlink attack + flake.lockFile = readLockFile(lockFile); return flake; } @@ -307,77 +377,50 @@ NonFlake getNonFlake(EvalState & state, const FlakeRef & flakeRef, FlakeId flake dependencies. FIXME: this should return a graph of flakes. */ -Dependencies resolveFlake(EvalState & state, const FlakeRef & topRef, bool impureTopRef) +Dependencies resolveFlake(EvalState & state, const FlakeRef & topRef, bool impureTopRef, bool isTopFlake = true) { - Dependencies deps; - std::queue todo; - bool isTopLevel = true; - todo.push(topRef); + Flake flake = getFlake(state, topRef, isTopFlake && impureTopRef); + Dependencies deps(flake); - auto registries = state.getFlakeRegistries(); + for (auto & nonFlakeInfo : flake.nonFlakeRequires) + deps.nonFlakeDeps.push_back(getNonFlake(state, nonFlakeInfo.second, nonFlakeInfo.first)); - while (!todo.empty()) { - auto flakeRef = todo.front(); - todo.pop(); - - if (std::get_if(&flakeRef.data)) - flakeRef = lookupFlake(state, flakeRef, registries); - - if (evalSettings.pureEval && !flakeRef.isImmutable() && (!isTopLevel || !impureTopRef)) - throw Error("mutable flake '%s' is not allowed in pure mode; use --impure to disable", flakeRef.to_string()); - - auto flake = getFlake(state, flakeRef); - - if (isTopLevel) { - deps.topFlakeId = flake.id; - isTopLevel = false; - } - - for (auto & flakeRef : flake.requires) - todo.push(flakeRef); - - for (auto & x : flake.nonFlakeRequires) - deps.nonFlakes.push_back(getNonFlake(state, x.second, x.first)); - // TODO (Nick): If there are 2 non-flake dependencies with the same - // FlakeId, this will lead to trouble! One of the dependencies won't - // be used! - - deps.flakes.push_back(flake); - } + for (auto & newFlakeRef : flake.requires) + deps.flakeDeps.push_back(resolveFlake(state, newFlakeRef, impureTopRef, false)); return deps; } -FlakeRegistry updateLockFile(EvalState & evalState, FlakeRef & flakeRef) +LockFile::FlakeEntry dependenciesToFlakeEntry(Dependencies & deps) { - FlakeRegistry newLockFile; - Dependencies deps = resolveFlake(evalState, flakeRef, false); - // Nick assumed that "topRefPure" means that the Flake for flakeRef can be - // fetched purely. - for (auto const& require : deps.flakes) { - FlakeRegistry::Entry entry = FlakeRegistry::Entry(require.ref); - // The FlakeRefs are immutable because they come out of the Flake objects. - if (require.id != deps.topFlakeId) - newLockFile.entries.insert_or_assign(require.id, entry); - // TODO (Nick): If there are 2 flake dependencies with the same FlakeId, - // one of them gets ignored! - } - for (auto const& nonFlake : deps.nonFlakes) { - FlakeRegistry::Entry entry = FlakeRegistry::Entry(nonFlake.ref); - newLockFile.entries.insert_or_assign(nonFlake.id, entry); - // We are assuming the sets of FlakeIds for flakes and non-flakes - // are disjoint. - } - return newLockFile; + LockFile::FlakeEntry entry(deps.flake.ref); + + for (Dependencies & deps : deps.flakeDeps) + entry.flakeEntries.insert_or_assign(deps.flake.id, dependenciesToFlakeEntry(deps)); + + for (NonFlake & nonFlake : deps.nonFlakeDeps) + entry.nonFlakeEntries.insert_or_assign(nonFlake.id, nonFlake.ref); + + return entry; } -void updateLockFile(EvalState & state, Path path) +LockFile getLockFile(EvalState & evalState, FlakeRef & flakeRef, bool impureTopRef) +{ + Dependencies deps = resolveFlake(evalState, flakeRef, impureTopRef); + LockFile::FlakeEntry entry = dependenciesToFlakeEntry(deps); + LockFile lockFile; + lockFile.flakeEntries = entry.flakeEntries; + lockFile.nonFlakeEntries = entry.nonFlakeEntries; + return lockFile; +} + +void updateLockFile(EvalState & state, Path path, bool impureTopRef) { // 'path' is the path to the local flake repo. FlakeRef flakeRef = FlakeRef("file://" + path); if (std::get_if(&flakeRef.data)) { - FlakeRegistry newLockFile = updateLockFile(state, flakeRef); - writeRegistry(newLockFile, path + "/flake.lock"); + LockFile lockFile = getLockFile(state, flakeRef, impureTopRef); + writeLockFile(lockFile, path + "/flake.lock"); } else if (std::get_if(&flakeRef.data)) { throw UsageError("you can only update local flakes, not flakes on GitHub"); } else { @@ -393,19 +436,19 @@ Value * makeFlakeValue(EvalState & state, FlakeUri flakeUri, Value & v) Dependencies deps = resolveFlake(state, flakeRef, impure); - // FIXME: we should call each flake with only its dependencies - // (rather than the closure of the top-level flake). + // // FIXME: we should call each flake with only its dependencies + // // (rather than the closure of the top-level flake). auto vResult = state.allocValue(); // This will store the attribute set of the `nonFlakeRequires` and the `requires.provides`. - state.mkAttrs(*vResult, dependencies.flakes.size()); + state.mkAttrs(*vResult, deps.flakeDeps.size()); Value * vTop = 0; - for (auto & flake : deps.flakes) { + for (auto & flake : deps.flakeDeps) { auto vFlake = state.allocAttr(*vResult, flake.id); - if (topFlakeId == flake.second.id) vTop = vFlake; + if (deps.topFlakeId == flake.id) vTop = vFlake; state.mkAttrs(*vFlake, 4); @@ -431,6 +474,7 @@ Value * makeFlakeValue(EvalState & state, FlakeUri flakeUri, Value & v) return vTop; } +// This function is exposed to be used in nix files. static void prim_getFlake(EvalState & state, const Pos & pos, Value * * args, Value & v) { makeFlakeValue(state, state.forceStringNoCtx(*args[0], pos), false, v); diff --git a/src/libexpr/primops/flake.hh b/src/libexpr/primops/flake.hh index a8f907784..019688f37 100644 --- a/src/libexpr/primops/flake.hh +++ b/src/libexpr/primops/flake.hh @@ -19,6 +19,20 @@ struct FlakeRegistry std::map entries; }; +struct LockFile +{ + struct FlakeEntry + { + FlakeRef ref; + std::map flakeEntries; + std::map nonFlakeEntries; + FlakeEntry(const FlakeRef & flakeRef) : ref(flakeRef) {}; + }; + + std::map flakeEntries; + std::map nonFlakeEntries; +}; + Path getUserRegistryPath(); Value * makeFlakeRegistryValue(EvalState & state); @@ -37,7 +51,7 @@ struct Flake Path path; std::optional revCount; std::vector requires; - std::shared_ptr lockFile; + LockFile lockFile; std::map nonFlakeRequires; Value * vProvides; // FIXME: gc // date @@ -55,18 +69,17 @@ struct NonFlake NonFlake(const FlakeRef flakeRef) : ref(flakeRef) {}; }; -Flake getFlake(EvalState &, const FlakeRef &); +Flake getFlake(EvalState &, const FlakeRef &, bool impureIsAllowed); struct Dependencies { - FlakeId topFlakeId; - std::vector flakes; - std::vector nonFlakes; + Flake flake; + std::vector flakeDeps; // The flake dependencies + std::vector nonFlakeDeps; + Dependencies(const Flake & flake) : flake(flake) {} }; -Dependencies resolveFlake(EvalState &, const FlakeRef &, bool impureTopRef); +Dependencies resolveFlake(EvalState &, const FlakeRef &, bool impureTopRef, bool isTopFlake); -FlakeRegistry updateLockFile(EvalState &, Flake &); - -void updateLockFile(EvalState &, Path); +void updateLockFile(EvalState &, Path path, bool impureTopRef); } diff --git a/src/nix/build.cc b/src/nix/build.cc index f6908b0c0..226c21e9e 100644 --- a/src/nix/build.cc +++ b/src/nix/build.cc @@ -78,11 +78,11 @@ struct CmdBuild : MixDryRun, InstallablesCommand } } - std::string flakeUri = ""; - if(updateLock) - for (uint i = 0; i < installables.size(); i++) - // if (auto flakeUri = installableToFlakeUri) - updateLockFile(*evalState, flakeUri); + // std::string flakeUri = ""; + // if(updateLock) + // for (uint i = 0; i < installables.size(); i++) + // // if (auto flakeUri = installableToFlakeUri) + // updateLockFile(*evalState, flakeUri, true); } }; diff --git a/src/nix/flake.cc b/src/nix/flake.cc index 07d31c45a..ff291aa80 100644 --- a/src/nix/flake.cc +++ b/src/nix/flake.cc @@ -5,6 +5,7 @@ #include "progress-bar.hh" #include "eval.hh" #include +#include using namespace nix; @@ -80,13 +81,21 @@ struct CmdFlakeDeps : FlakeCommand, MixJSON, StoreCommand, MixEvalArgs FlakeRef flakeRef(flakeUri); - Dependencies deps = resolveFlake(*evalState, flakeRef, true); + Dependencies deps = resolveFlake(*evalState, flakeRef, true, true); - for (auto & flake : deps.flakes) - printFlakeInfo(flake, json); + std::queue todo; + todo.push(deps); - for (auto & nonFlake : deps.nonFlakes) - printNonFlakeInfo(nonFlake, json); + while (!todo.empty()) { + deps = todo.front(); + todo.pop(); + + for (auto & nonFlake : deps.nonFlakeDeps) + printNonFlakeInfo(nonFlake, json); + + for (auto & newDeps : deps.flakeDeps) + todo.push(newDeps); + } } }; @@ -107,7 +116,7 @@ struct CmdFlakeUpdate : StoreCommand, GitRepoCommand, MixEvalArgs auto evalState = std::make_shared(searchPath, store); if (gitPath == "") gitPath = absPath("."); - updateLockFile(*evalState, gitPath); + updateLockFile(*evalState, gitPath, true); } }; @@ -126,7 +135,7 @@ struct CmdFlakeInfo : FlakeCommand, MixJSON, MixEvalArgs, StoreCommand void run(nix::ref store) override { auto evalState = std::make_shared(searchPath, store); - nix::Flake flake = nix::getFlake(*evalState, FlakeRef(flakeUri)); + nix::Flake flake = nix::getFlake(*evalState, FlakeRef(flakeUri), true); printFlakeInfo(flake, json); } }; @@ -220,7 +229,7 @@ struct CmdFlakePin : virtual Args, StoreCommand, MixEvalArgs auto it = userRegistry.entries.find(flakeId); if (it != userRegistry.entries.end()) { FlakeRef oldRef = it->second.ref; - it->second.ref = getFlake(*evalState, oldRef).ref; + it->second.ref = getFlake(*evalState, oldRef, true).ref; // The 'ref' in 'flake' is immutable. writeRegistry(userRegistry, userRegistryPath); } else From f39670c6318ba8d2260b3ac54f46161d74649266 Mon Sep 17 00:00:00 2001 From: Nick Van den Broeck Date: Sat, 6 Apr 2019 20:45:35 +0200 Subject: [PATCH 044/634] Took ref and rev out of FlakeRef --- src/libexpr/primops/flake.cc | 31 +++++++-------- src/libexpr/primops/flakeref.cc | 69 +++++++++++---------------------- src/libexpr/primops/flakeref.hh | 28 +++---------- 3 files changed, 41 insertions(+), 87 deletions(-) diff --git a/src/libexpr/primops/flake.cc b/src/libexpr/primops/flake.cc index 1ad9ad3f8..cea3854e4 100644 --- a/src/libexpr/primops/flake.cc +++ b/src/libexpr/primops/flake.cc @@ -201,10 +201,8 @@ static FlakeRef lookupFlake(EvalState & state, const FlakeRef & flakeRef, auto newRef = FlakeRef(i->second.ref); if (!newRef.isDirect()) throw Error("found indirect flake URI '%s' in the flake registry", i->second.ref.to_string()); - if (refData->ref) - newRef.setRef(*refData->ref); - if (refData->rev) - newRef.setRev(*refData->rev); + if (flakeRef.ref) newRef.setRef(*flakeRef.ref); + if (flakeRef.rev) newRef.setRev(*flakeRef.rev); return newRef; } } @@ -220,13 +218,13 @@ struct FlakeSourceInfo std::optional revCount; }; -static FlakeSourceInfo fetchFlake(EvalState & state, const FlakeRef & flakeRef, bool impureIsAllowed = false) +static FlakeSourceInfo fetchFlake(EvalState & state, const FlakeRef flakeRef, bool impureIsAllowed = false) { - FlakeRef directFlakeRef = lookupFlake(state, flakeRef, state.getFlakeRegistries()); + FlakeRef fRef = lookupFlake(state, flakeRef, state.getFlakeRegistries()); - if (auto refData = std::get_if(&directFlakeRef.data)) { - if (evalSettings.pureEval && !impureIsAllowed && !directFlakeRef.isImmutable()) - throw Error("requested to fetch FlakeRef '%s' purely, which is mutable", directFlakeRef.to_string()); + if (auto refData = std::get_if(&fRef.data)) { + if (evalSettings.pureEval && !impureIsAllowed && !fRef.isImmutable()) + throw Error("requested to fetch FlakeRef '%s' purely, which is mutable", fRef.to_string()); // FIXME: use regular /archive URLs instead? api.github.com // might have stricter rate limits. @@ -235,14 +233,11 @@ static FlakeSourceInfo fetchFlake(EvalState & state, const FlakeRef & flakeRef, auto url = fmt("https://api.github.com/repos/%s/%s/tarball/%s", refData->owner, refData->repo, - refData->rev - ? refData->rev->to_string(Base16, false) - : refData->ref - ? *refData->ref - : "master"); + fRef.rev ? fRef.rev->to_string(Base16, false) + : fRef.ref ? *fRef.ref : "master"); auto result = getDownloader()->downloadCached(state.store, url, true, "source", - Hash(), nullptr, refData->rev ? 1000000000 : settings.tarballTtl); + Hash(), nullptr, fRef.rev ? 1000000000 : settings.tarballTtl); if (!result.etag) throw Error("did not receive an ETag header from '%s'", url); @@ -257,9 +252,9 @@ static FlakeSourceInfo fetchFlake(EvalState & state, const FlakeRef & flakeRef, return info; } - else if (auto refData = std::get_if(&directFlakeRef.data)) { - auto gitInfo = exportGit(state.store, refData->uri, refData->ref, - refData->rev ? refData->rev->to_string(Base16, false) : "", "source"); + else if (auto refData = std::get_if(&fRef.data)) { + auto gitInfo = exportGit(state.store, refData->uri, fRef.ref, + fRef.rev ? fRef.rev->to_string(Base16, false) : "", "source"); FlakeSourceInfo info; info.storePath = gitInfo.storePath; info.rev = Hash(gitInfo.rev, htSHA1); diff --git a/src/libexpr/primops/flakeref.cc b/src/libexpr/primops/flakeref.cc index 1df53bfb8..f160b257b 100644 --- a/src/libexpr/primops/flakeref.cc +++ b/src/libexpr/primops/flakeref.cc @@ -58,11 +58,11 @@ FlakeRef::FlakeRef(const std::string & uri, bool allowRelative) IsFlakeId d; d.id = match[1]; if (match[2].matched) - d.rev = Hash(match[2], htSHA1); + rev = Hash(match[2], htSHA1); else if (match[3].matched) { - d.ref = match[3]; + ref = match[3]; if (match[4].matched) - d.rev = Hash(match[4], htSHA1); + rev = Hash(match[4], htSHA1); } data = d; } @@ -72,9 +72,9 @@ FlakeRef::FlakeRef(const std::string & uri, bool allowRelative) d.owner = match[1]; d.repo = match[2]; if (match[3].matched) - d.rev = Hash(match[3], htSHA1); + rev = Hash(match[3], htSHA1); else if (match[4].matched) { - d.ref = match[4]; + ref = match[4]; } data = d; } @@ -92,16 +92,16 @@ FlakeRef::FlakeRef(const std::string & uri, bool allowRelative) if (name == "rev") { if (!std::regex_match(value, revRegex)) throw Error("invalid Git revision '%s'", value); - d.rev = Hash(value, htSHA1); + rev = Hash(value, htSHA1); } else if (name == "ref") { if (!std::regex_match(value, refRegex2)) throw Error("invalid Git ref '%s'", value); - d.ref = value; + ref = value; } else // FIXME: should probably pass through unknown parameters throw Error("invalid Git flake reference parameter '%s', in '%s'", name, uri); } - if (d.rev && !d.ref) + if (rev && !ref) throw Error("flake URI '%s' lacks a Git ref", uri); data = d; } @@ -118,27 +118,18 @@ FlakeRef::FlakeRef(const std::string & uri, bool allowRelative) std::string FlakeRef::to_string() const { - if (auto refData = std::get_if(&data)) { - return - "flake:" + refData->id + - (refData->ref ? "/" + *refData->ref : "") + - (refData->rev ? "/" + refData->rev->to_string(Base16, false) : ""); - } + std::string string; + if (auto refData = std::get_if(&data)) + string = "flake:" + refData->id; else if (auto refData = std::get_if(&data)) { - assert(!refData->ref || !refData->rev); - return - "github:" + refData->owner + "/" + refData->repo + - (refData->ref ? "/" + *refData->ref : "") + - (refData->rev ? "/" + refData->rev->to_string(Base16, false) : ""); + assert(!ref || !rev); + string = "github:" + refData->owner + "/" + refData->repo; } else if (auto refData = std::get_if(&data)) { - assert(refData->ref || !refData->rev); - return - refData->uri + - (refData->ref ? "?ref=" + *refData->ref : "") + - (refData->rev ? "&rev=" + refData->rev->to_string(Base16, false) : ""); + assert(ref || !rev); + string = refData->uri; } else if (auto refData = std::get_if(&data)) { @@ -146,38 +137,22 @@ std::string FlakeRef::to_string() const } else abort(); + + string += (ref ? "/" + *ref : "") + + (rev ? "/" + rev->to_string(Base16, false) : ""); + return string; } bool FlakeRef::isImmutable() const { - if (auto refData = std::get_if(&data)) - return (bool) refData->rev; - - else if (auto refData = std::get_if(&data)) - return (bool) refData->rev; - - else if (auto refData = std::get_if(&data)) - return (bool) refData->rev; - - else if (std::get_if(&data)) - return false; - - else abort(); + return (bool) rev; } FlakeRef FlakeRef::baseRef() const // Removes the ref and rev from a FlakeRef. { FlakeRef result(*this); - if (auto refData = std::get_if(&result.data)) { - refData->ref = std::nullopt; - refData->rev = std::nullopt; - } else if (auto refData = std::get_if(&result.data)) { - refData->ref = std::nullopt; - refData->rev = std::nullopt; - } else if (auto refData = std::get_if(&result.data)) { - refData->ref = std::nullopt; - refData->rev = std::nullopt; - } + result.ref = std::nullopt; + result.rev = std::nullopt; return result; } } diff --git a/src/libexpr/primops/flakeref.hh b/src/libexpr/primops/flakeref.hh index 94a75fb2b..9276fc737 100644 --- a/src/libexpr/primops/flakeref.hh +++ b/src/libexpr/primops/flakeref.hh @@ -101,25 +101,23 @@ typedef std::string FlakeId; struct FlakeRef { + std::optional ref; + std::optional rev; + struct IsFlakeId { FlakeId id; - std::optional ref; - std::optional rev; }; struct IsGitHub { std::string owner, repo; - std::optional ref; - std::optional rev; }; + // Git, Tarball struct IsGit { std::string uri; - std::optional ref; - std::optional rev; }; struct IsPath @@ -161,22 +159,8 @@ struct FlakeRef FlakeRef baseRef() const; - void setRef(std::optional ref) { - if (auto refData = std::get_if(&data)) - refData->ref = ref; - else if (auto refData = std::get_if(&data)) - refData->ref = ref; - else if (auto refData = std::get_if(&data)) - refData->ref = ref; - } + void setRef(std::optional ref) { ref = ref; } - void setRev(std::optional rev) { - if (auto refData = std::get_if(&data)) - refData->rev = rev; - else if (auto refData = std::get_if(&data)) - refData->rev = rev; - else if (auto refData = std::get_if(&data)) - refData->rev = rev; - } + void setRev(std::optional rev) { rev = rev; } }; } From c64f98b883515df70e2457ae01070b5af9ae69b9 Mon Sep 17 00:00:00 2001 From: Nick Van den Broeck Date: Thu, 21 Mar 2019 09:30:16 +0100 Subject: [PATCH 045/634] FlakeAlias is implemented --- src/libexpr/primops/flake.cc | 104 ++++++++++++++++++++++++++++---- src/libexpr/primops/flake.hh | 10 +-- src/libexpr/primops/flakeref.cc | 15 +++-- src/libexpr/primops/flakeref.hh | 8 ++- src/nix/build.cc | 2 +- src/nix/flake.cc | 36 +++++------ 6 files changed, 129 insertions(+), 46 deletions(-) diff --git a/src/libexpr/primops/flake.cc b/src/libexpr/primops/flake.cc index cea3854e4..729b1da95 100644 --- a/src/libexpr/primops/flake.cc +++ b/src/libexpr/primops/flake.cc @@ -127,6 +127,86 @@ void writeLockFile(LockFile lockFile, Path path) writeFile(path, json.dump(4)); // '4' = indentation in json file } +Path getUserRegistryPath() +>>>>>>> Fixed dependency resolution +{ + FlakeRef flakeRef(json["uri"]); + if (!flakeRef.isImmutable()) + throw Error("requested to fetch FlakeRef '%s' purely, which is mutable", flakeRef.to_string()); + + LockFile::FlakeEntry entry(flakeRef); + + auto nonFlakeRequires = json["nonFlakeRequires"]; + + for (auto i = nonFlakeRequires.begin(); i != nonFlakeRequires.end(); ++i) { + FlakeRef flakeRef(i->value("uri", "")); + if (!flakeRef.isImmutable()) + throw Error("requested to fetch FlakeRef '%s' purely, which is mutable", flakeRef.to_string()); + entry.nonFlakeEntries.insert_or_assign(i.key(), flakeRef); + } + + auto requires = json["requires"]; + + for (auto i = requires.begin(); i != requires.end(); ++i) + entry.flakeEntries.insert_or_assign(i.key(), readFlakeEntry(*i)); + + return entry; +} + +LockFile readLockFile(const Path & path) +{ + LockFile lockFile; + + if (!pathExists(path)) + return lockFile; + + auto json = nlohmann::json::parse(readFile(path)); + + auto version = json.value("version", 0); + if (version != 1) + throw Error("lock file '%s' has unsupported version %d", path, version); + + auto nonFlakeRequires = json["nonFlakeRequires"]; + + for (auto i = nonFlakeRequires.begin(); i != nonFlakeRequires.end(); ++i) { + FlakeRef flakeRef(i->value("uri", "")); + if (!flakeRef.isImmutable()) + throw Error("requested to fetch FlakeRef '%s' purely, which is mutable", flakeRef.to_string()); + lockFile.nonFlakeEntries.insert_or_assign(i.key(), flakeRef); + } + + auto requires = json["requires"]; + + for (auto i = requires.begin(); i != requires.end(); ++i) + lockFile.flakeEntries.insert_or_assign(i.key(), readFlakeEntry(*i)); + + return lockFile; +} + +nlohmann::json flakeEntryToJson(LockFile::FlakeEntry & entry) +{ + nlohmann::json json; + json["uri"] = entry.ref.to_string(); + for (auto & x : entry.nonFlakeEntries) + json["nonFlakeRequires"][x.first]["uri"] = x.second.to_string(); + for (auto & x : entry.flakeEntries) + json["requires"][x.first] = flakeEntryToJson(x.second); + return json; +} + +void writeLockFile(LockFile lockFile, Path path) +{ + nlohmann::json json; + json["version"] = 1; + json["nonFlakeRequires"]; + for (auto & x : lockFile.nonFlakeEntries) + json["nonFlakeRequires"][x.first]["uri"] = x.second.to_string(); + for (auto & x : lockFile.flakeEntries) + json["requires"][x.first] = flakeEntryToJson(x.second); + createDirs(dirOf(path)); + writeFile(path, json.dump(4)); // '4' = indentation in json file +} + Path getUserRegistryPath() { return getHome() + "/.config/nix/registry.json"; @@ -194,9 +274,9 @@ Value * makeFlakeRegistryValue(EvalState & state) static FlakeRef lookupFlake(EvalState & state, const FlakeRef & flakeRef, std::vector> registries) { - if (auto refData = std::get_if(&flakeRef.data)) { + if (auto refData = std::get_if(&flakeRef.data)) { for (auto registry : registries) { - auto i = registry->entries.find(refData->id); + auto i = registry->entries.find(refData->alias); if (i != registry->entries.end()) { auto newRef = FlakeRef(i->second.ref); if (!newRef.isDirect()) @@ -206,7 +286,7 @@ static FlakeRef lookupFlake(EvalState & state, const FlakeRef & flakeRef, return newRef; } } - throw Error("cannot find flake '%s' in the flake registry or in the flake lock file", refData->id); + throw Error("cannot find flake with alias '%s' in the flake registry or in the flake lock file", refData->alias); } else return flakeRef; } @@ -342,7 +422,7 @@ Flake getFlake(EvalState & state, const FlakeRef & flakeRef, bool impureIsAllowe } // Get the `NonFlake` corresponding to a `FlakeRef`. -NonFlake getNonFlake(EvalState & state, const FlakeRef & flakeRef, FlakeId flakeId) +NonFlake getNonFlake(EvalState & state, const FlakeRef & flakeRef, FlakeAlias alias) { FlakeSourceInfo sourceInfo = fetchFlake(state, flakeRef); debug("got non-flake source '%s' with revision %s", @@ -363,7 +443,7 @@ NonFlake getNonFlake(EvalState & state, const FlakeRef & flakeRef, FlakeId flake nonFlake.path = flakePath; - nonFlake.id = flakeId; + nonFlake.alias = alias; return nonFlake; } @@ -394,14 +474,14 @@ LockFile::FlakeEntry dependenciesToFlakeEntry(Dependencies & deps) entry.flakeEntries.insert_or_assign(deps.flake.id, dependenciesToFlakeEntry(deps)); for (NonFlake & nonFlake : deps.nonFlakeDeps) - entry.nonFlakeEntries.insert_or_assign(nonFlake.id, nonFlake.ref); + entry.nonFlakeEntries.insert_or_assign(nonFlake.alias, nonFlake.ref); return entry; } -LockFile getLockFile(EvalState & evalState, FlakeRef & flakeRef, bool impureTopRef) +LockFile getLockFile(EvalState & evalState, FlakeRef & flakeRef) { - Dependencies deps = resolveFlake(evalState, flakeRef, impureTopRef); + Dependencies deps = resolveFlake(evalState, flakeRef, true); LockFile::FlakeEntry entry = dependenciesToFlakeEntry(deps); LockFile lockFile; lockFile.flakeEntries = entry.flakeEntries; @@ -409,17 +489,17 @@ LockFile getLockFile(EvalState & evalState, FlakeRef & flakeRef, bool impureTopR return lockFile; } -void updateLockFile(EvalState & state, Path path, bool impureTopRef) +void updateLockFile(EvalState & state, Path path) { // 'path' is the path to the local flake repo. FlakeRef flakeRef = FlakeRef("file://" + path); if (std::get_if(&flakeRef.data)) { - LockFile lockFile = getLockFile(state, flakeRef, impureTopRef); + LockFile lockFile = getLockFile(state, flakeRef); writeLockFile(lockFile, path + "/flake.lock"); } else if (std::get_if(&flakeRef.data)) { throw UsageError("you can only update local flakes, not flakes on GitHub"); } else { - throw UsageError("you can only update local flakes, not flakes through their FlakeId"); + throw UsageError("you can only update local flakes, not flakes through their FlakeAlias"); } } @@ -456,7 +536,7 @@ Value * makeFlakeValue(EvalState & state, FlakeUri flakeUri, Value & v) mkInt(*state.allocAttr(*vFlake, state.symbols.create("revCount")), *flake.revCount); auto vProvides = state.allocAttr(*vFlake, state.symbols.create("provides")); - mkApp(*vProvides, *flake.vProvides, *vResult); // Should this be vResult or vFlake??? Or both! + mkApp(*vProvides, *flake.vProvides, *vResult); vFlake->attrs->sort(); } diff --git a/src/libexpr/primops/flake.hh b/src/libexpr/primops/flake.hh index 019688f37..adf8b07af 100644 --- a/src/libexpr/primops/flake.hh +++ b/src/libexpr/primops/flake.hh @@ -16,7 +16,7 @@ struct FlakeRegistry Entry(const FlakeRef & flakeRef) : ref(flakeRef) {}; Entry operator=(const Entry & entry) { return Entry(entry.ref); } }; - std::map entries; + std::map entries; }; struct LockFile @@ -52,7 +52,7 @@ struct Flake std::optional revCount; std::vector requires; LockFile lockFile; - std::map nonFlakeRequires; + std::map nonFlakeRequires; Value * vProvides; // FIXME: gc // date // content hash @@ -61,7 +61,7 @@ struct Flake struct NonFlake { - FlakeId id; + FlakeAlias alias; FlakeRef ref; Path path; // date @@ -81,5 +81,7 @@ struct Dependencies Dependencies resolveFlake(EvalState &, const FlakeRef &, bool impureTopRef, bool isTopFlake); -void updateLockFile(EvalState &, Path path, bool impureTopRef); +FlakeRegistry updateLockFile(EvalState &, Flake &); + +void updateLockFile(EvalState &, Path path); } diff --git a/src/libexpr/primops/flakeref.cc b/src/libexpr/primops/flakeref.cc index f160b257b..ab1e5e152 100644 --- a/src/libexpr/primops/flakeref.cc +++ b/src/libexpr/primops/flakeref.cc @@ -19,7 +19,7 @@ const static std::string revOrRefRegex = "(?:(" + revRegexS + ")|(" + refRegex + // "master/e72daba8250068216d79d2aeef40d4d95aff6666"). const static std::string refAndOrRevRegex = "(?:(" + revRegexS + ")|(?:(" + refRegex + ")(?:/(" + revRegexS + "))?))"; -const static std::string flakeId = "[a-zA-Z][a-zA-Z0-9_-]*"; +const static std::string flakeAlias = "[a-zA-Z][a-zA-Z0-9_-]*"; // GitHub references. const static std::string ownerRegex = "[a-zA-Z][a-zA-Z0-9_-]*"; @@ -37,7 +37,7 @@ FlakeRef::FlakeRef(const std::string & uri, bool allowRelative) // FIXME: could combine this into one regex. static std::regex flakeRegex( - "(?:flake:)?(" + flakeId + ")(?:/(?:" + refAndOrRevRegex + "))?", + "(?:flake:)?(" + flakeAlias + ")(?:/(?:" + refAndOrRevRegex + "))?", std::regex::ECMAScript); static std::regex githubRegex( @@ -55,8 +55,8 @@ FlakeRef::FlakeRef(const std::string & uri, bool allowRelative) std::cmatch match; if (std::regex_match(uri.c_str(), match, flakeRegex)) { - IsFlakeId d; - d.id = match[1]; + IsAlias d; + d.alias = match[1]; if (match[2].matched) rev = Hash(match[2], htSHA1); else if (match[3].matched) { @@ -119,8 +119,8 @@ FlakeRef::FlakeRef(const std::string & uri, bool allowRelative) std::string FlakeRef::to_string() const { std::string string; - if (auto refData = std::get_if(&data)) - string = "flake:" + refData->id; + if (auto refData = std::get_if(&data)) + string = "flake:" + refData->alias; else if (auto refData = std::get_if(&data)) { assert(!ref || !rev); @@ -132,9 +132,8 @@ std::string FlakeRef::to_string() const string = refData->uri; } - else if (auto refData = std::get_if(&data)) { + else if (auto refData = std::get_if(&data)) return refData->path; - } else abort(); diff --git a/src/libexpr/primops/flakeref.hh b/src/libexpr/primops/flakeref.hh index 9276fc737..32904953a 100644 --- a/src/libexpr/primops/flakeref.hh +++ b/src/libexpr/primops/flakeref.hh @@ -98,15 +98,17 @@ namespace nix { */ typedef std::string FlakeId; +typedef std::string FlakeAlias; +typedef std::string FlakeUri; struct FlakeRef { std::optional ref; std::optional rev; - struct IsFlakeId + struct IsAlias { - FlakeId id; + FlakeAlias alias; }; struct IsGitHub @@ -150,7 +152,7 @@ struct FlakeRef a flake ID, which requires a lookup in the flake registry. */ bool isDirect() const { - return !std::get_if(&data); + return !std::get_if(&data); } /* Check whether this is an "immutable" flake reference, that is, diff --git a/src/nix/build.cc b/src/nix/build.cc index 226c21e9e..a2fc56e69 100644 --- a/src/nix/build.cc +++ b/src/nix/build.cc @@ -82,7 +82,7 @@ struct CmdBuild : MixDryRun, InstallablesCommand // if(updateLock) // for (uint i = 0; i < installables.size(); i++) // // if (auto flakeUri = installableToFlakeUri) - // updateLockFile(*evalState, flakeUri, true); + // updateLockFile(*evalState, flakeUri); } }; diff --git a/src/nix/flake.cc b/src/nix/flake.cc index ff291aa80..df944a148 100644 --- a/src/nix/flake.cc +++ b/src/nix/flake.cc @@ -40,12 +40,12 @@ struct CmdFlakeList : StoreCommand, MixEvalArgs void printFlakeInfo(Flake & flake, bool json) { if (json) { nlohmann::json j; - j["name"] = flake.id; + j["id"] = flake.id; j["location"] = flake.path; j["description"] = flake.description; std::cout << j.dump(4) << std::endl; } else { - std::cout << "Name: " << flake.id << "\n"; + std::cout << "ID: " << flake.id << "\n"; std::cout << "Description: " << flake.description << "\n"; std::cout << "Location: " << flake.path << "\n"; } @@ -54,11 +54,11 @@ void printFlakeInfo(Flake & flake, bool json) { void printNonFlakeInfo(NonFlake & nonFlake, bool json) { if (json) { nlohmann::json j; - j["name"] = nonFlake.id; + j["name"] = nonFlake.alias; j["location"] = nonFlake.path; std::cout << j.dump(4) << std::endl; } else { - std::cout << "name: " << nonFlake.id << "\n"; + std::cout << "name: " << nonFlake.alias << "\n"; std::cout << "Location: " << nonFlake.path << "\n"; } } @@ -116,7 +116,7 @@ struct CmdFlakeUpdate : StoreCommand, GitRepoCommand, MixEvalArgs auto evalState = std::make_shared(searchPath, store); if (gitPath == "") gitPath = absPath("."); - updateLockFile(*evalState, gitPath, true); + updateLockFile(*evalState, gitPath); } }; @@ -135,15 +135,15 @@ struct CmdFlakeInfo : FlakeCommand, MixJSON, MixEvalArgs, StoreCommand void run(nix::ref store) override { auto evalState = std::make_shared(searchPath, store); - nix::Flake flake = nix::getFlake(*evalState, FlakeRef(flakeUri), true); + nix::Flake flake = nix::getFlake(*evalState, FlakeRef(flakeUri)); printFlakeInfo(flake, json); } }; struct CmdFlakeAdd : MixEvalArgs, Command { - std::string flakeId; - std::string flakeUri; + FlakeAlias flakeAlias; + FlakeUri flakeUri; std::string name() override { @@ -157,7 +157,7 @@ struct CmdFlakeAdd : MixEvalArgs, Command CmdFlakeAdd() { - expectArg("flake-id", &flakeId); + expectArg("flake-id", &flakeAlias); expectArg("flake-uri", &flakeUri); } @@ -167,15 +167,15 @@ struct CmdFlakeAdd : MixEvalArgs, Command Path userRegistryPath = getUserRegistryPath(); auto userRegistry = readRegistry(userRegistryPath); FlakeRegistry::Entry entry(newFlakeRef); - userRegistry->entries.erase(flakeId); - userRegistry->entries.insert_or_assign(flakeId, newFlakeRef); + userRegistry->entries.erase(flakeAlias); + userRegistry->entries.insert_or_assign(flakeAlias, newFlakeRef); writeRegistry(*userRegistry, userRegistryPath); } }; struct CmdFlakeRemove : virtual Args, MixEvalArgs, Command { - std::string flakeId; + FlakeAlias flakeAlias; std::string name() override { @@ -189,21 +189,21 @@ struct CmdFlakeRemove : virtual Args, MixEvalArgs, Command CmdFlakeRemove() { - expectArg("flake-id", &flakeId); + expectArg("flake-id", &flakeAlias); } void run() override { Path userRegistryPath = getUserRegistryPath(); auto userRegistry = readRegistry(userRegistryPath); - userRegistry->entries.erase(flakeId); + userRegistry->entries.erase(flakeAlias); writeRegistry(*userRegistry, userRegistryPath); } }; struct CmdFlakePin : virtual Args, StoreCommand, MixEvalArgs { - std::string flakeId; + FlakeAlias flakeAlias; std::string name() override { @@ -217,7 +217,7 @@ struct CmdFlakePin : virtual Args, StoreCommand, MixEvalArgs CmdFlakePin() { - expectArg("flake-id", &flakeId); + expectArg("flake-id", &flakeAlias); } void run(nix::ref store) override @@ -226,14 +226,14 @@ struct CmdFlakePin : virtual Args, StoreCommand, MixEvalArgs Path userRegistryPath = getUserRegistryPath(); FlakeRegistry userRegistry = *readRegistry(userRegistryPath); - auto it = userRegistry.entries.find(flakeId); + auto it = userRegistry.entries.find(flakeAlias); if (it != userRegistry.entries.end()) { FlakeRef oldRef = it->second.ref; it->second.ref = getFlake(*evalState, oldRef, true).ref; // The 'ref' in 'flake' is immutable. writeRegistry(userRegistry, userRegistryPath); } else - throw Error("the flake identifier '%s' does not exist in the user registry", flakeId); + throw Error("the flake alias '%s' does not exist in the user registry", flakeAlias); } }; From 4ad4e4866891a62a6e1bb919d81e224ba0a1cf1c Mon Sep 17 00:00:00 2001 From: Nick Van den Broeck Date: Mon, 8 Apr 2019 19:03:00 +0200 Subject: [PATCH 046/634] FlakeRegistry = FlakeRef -> FlakeRef --- flake-registry.json | 4 +- src/libexpr/primops/flake.cc | 169 +++++++++----------------------- src/libexpr/primops/flake.hh | 10 +- src/libexpr/primops/flakeref.hh | 50 ++++++---- src/nix/build.cc | 3 +- src/nix/command.hh | 3 +- src/nix/flake.cc | 45 ++++----- src/nix/installables.cc | 1 - 8 files changed, 105 insertions(+), 180 deletions(-) diff --git a/flake-registry.json b/flake-registry.json index b850daa74..378290ec6 100644 --- a/flake-registry.json +++ b/flake-registry.json @@ -1,5 +1,4 @@ { - "version": 1, "flakes": { "dwarffs": { "uri": "github:edolstra/dwarffs/flake" @@ -7,5 +6,6 @@ "nixpkgs": { "uri": "github:edolstra/nixpkgs/flake" } - } + }, + "version": 1 } diff --git a/src/libexpr/primops/flake.cc b/src/libexpr/primops/flake.cc index 729b1da95..145d79446 100644 --- a/src/libexpr/primops/flake.cc +++ b/src/libexpr/primops/flake.cc @@ -28,10 +28,8 @@ std::shared_ptr readRegistry(const Path & path) throw Error("flake registry '%s' has unsupported version %d", path, version); auto flakes = json["flakes"]; - for (auto i = flakes.begin(); i != flakes.end(); ++i) { - FlakeRegistry::Entry entry{FlakeRef(i->value("uri", ""))}; - registry->entries.emplace(i.key(), entry); - } + for (auto i = flakes.begin(); i != flakes.end(); ++i) + registry->entries.emplace(i.key(), FlakeRef(i->value("uri", ""))); return registry; } @@ -41,9 +39,8 @@ void writeRegistry(FlakeRegistry registry, Path path) { nlohmann::json json; json["version"] = 1; - for (auto elem : registry.entries) { - json["flakes"][elem.first] = { {"uri", elem.second.ref.to_string()} }; - } + for (auto elem : registry.entries) + json["flakes"][elem.first.to_string()] = { {"uri", elem.second.to_string()} }; createDirs(dirOf(path)); writeFile(path, json.dump(4)); // The '4' is the number of spaces used in the indentation in the json file. } @@ -127,106 +124,31 @@ void writeLockFile(LockFile lockFile, Path path) writeFile(path, json.dump(4)); // '4' = indentation in json file } -Path getUserRegistryPath() ->>>>>>> Fixed dependency resolution +std::shared_ptr getGlobalRegistry() { - FlakeRef flakeRef(json["uri"]); - if (!flakeRef.isImmutable()) - throw Error("requested to fetch FlakeRef '%s' purely, which is mutable", flakeRef.to_string()); - - LockFile::FlakeEntry entry(flakeRef); - - auto nonFlakeRequires = json["nonFlakeRequires"]; - - for (auto i = nonFlakeRequires.begin(); i != nonFlakeRequires.end(); ++i) { - FlakeRef flakeRef(i->value("uri", "")); - if (!flakeRef.isImmutable()) - throw Error("requested to fetch FlakeRef '%s' purely, which is mutable", flakeRef.to_string()); - entry.nonFlakeEntries.insert_or_assign(i.key(), flakeRef); - } - - auto requires = json["requires"]; - - for (auto i = requires.begin(); i != requires.end(); ++i) - entry.flakeEntries.insert_or_assign(i.key(), readFlakeEntry(*i)); - - return entry; -} - -LockFile readLockFile(const Path & path) -{ - LockFile lockFile; - - if (!pathExists(path)) - return lockFile; - - auto json = nlohmann::json::parse(readFile(path)); - - auto version = json.value("version", 0); - if (version != 1) - throw Error("lock file '%s' has unsupported version %d", path, version); - - auto nonFlakeRequires = json["nonFlakeRequires"]; - - for (auto i = nonFlakeRequires.begin(); i != nonFlakeRequires.end(); ++i) { - FlakeRef flakeRef(i->value("uri", "")); - if (!flakeRef.isImmutable()) - throw Error("requested to fetch FlakeRef '%s' purely, which is mutable", flakeRef.to_string()); - lockFile.nonFlakeEntries.insert_or_assign(i.key(), flakeRef); - } - - auto requires = json["requires"]; - - for (auto i = requires.begin(); i != requires.end(); ++i) - lockFile.flakeEntries.insert_or_assign(i.key(), readFlakeEntry(*i)); - - return lockFile; -} - -nlohmann::json flakeEntryToJson(LockFile::FlakeEntry & entry) -{ - nlohmann::json json; - json["uri"] = entry.ref.to_string(); - for (auto & x : entry.nonFlakeEntries) - json["nonFlakeRequires"][x.first]["uri"] = x.second.to_string(); - for (auto & x : entry.flakeEntries) - json["requires"][x.first] = flakeEntryToJson(x.second); - return json; -} - -void writeLockFile(LockFile lockFile, Path path) -{ - nlohmann::json json; - json["version"] = 1; - json["nonFlakeRequires"]; - for (auto & x : lockFile.nonFlakeEntries) - json["nonFlakeRequires"][x.first]["uri"] = x.second.to_string(); - for (auto & x : lockFile.flakeEntries) - json["requires"][x.first] = flakeEntryToJson(x.second); - createDirs(dirOf(path)); - writeFile(path, json.dump(4)); // '4' = indentation in json file + return std::make_shared(); } Path getUserRegistryPath() { return getHome() + "/.config/nix/registry.json"; } -std::shared_ptr getGlobalRegistry() -{ - // FIXME: get from nixos.org. - Path registryFile = settings.nixDataDir + "/nix/flake-registry.json"; - return readRegistry(registryFile); -} std::shared_ptr getUserRegistry() { return readRegistry(getUserRegistryPath()); } +std::shared_ptr getLocalRegistry() +{ + Path registryFile = settings.nixDataDir + "/nix/flake-registry.json"; + return readRegistry(registryFile); +} + std::shared_ptr getFlagRegistry() { + // TODO (Nick): Implement this. return std::make_shared(); - // TODO: Implement this once the right flags are implemented. } const std::vector> EvalState::getFlakeRegistries() @@ -259,9 +181,9 @@ Value * makeFlakeRegistryValue(EvalState & state) for (auto & registry : registries) { for (auto & entry : registry->entries) { - auto vEntry = state.allocAttr(*v, entry.first); + auto vEntry = state.allocAttr(*v, entry.first.to_string()); state.mkAttrs(*vEntry, 2); - mkString(*state.allocAttr(*vEntry, state.symbols.create("uri")), entry.second.ref.to_string()); + mkString(*state.allocAttr(*vEntry, state.symbols.create("uri")), entry.second.to_string()); vEntry->attrs->sort(); } } @@ -272,23 +194,30 @@ Value * makeFlakeRegistryValue(EvalState & state) } static FlakeRef lookupFlake(EvalState & state, const FlakeRef & flakeRef, - std::vector> registries) + std::vector> registries, std::vector pastSearches = {}) { - if (auto refData = std::get_if(&flakeRef.data)) { - for (auto registry : registries) { - auto i = registry->entries.find(refData->alias); - if (i != registry->entries.end()) { - auto newRef = FlakeRef(i->second.ref); - if (!newRef.isDirect()) - throw Error("found indirect flake URI '%s' in the flake registry", i->second.ref.to_string()); - if (flakeRef.ref) newRef.setRef(*flakeRef.ref); - if (flakeRef.rev) newRef.setRev(*flakeRef.rev); - return newRef; + for (std::shared_ptr registry : registries) { + auto i = registry->entries.find(flakeRef); + if (i != registry->entries.end()) { + auto newRef = i->second; + if (std::get_if(&flakeRef.data)) { + if (flakeRef.ref) newRef.ref = flakeRef.ref; + if (flakeRef.rev) newRef.rev = flakeRef.rev; } + std::string errorMsg = "found cycle in flake registries: "; + for (FlakeRef oldRef : pastSearches) { + errorMsg += oldRef.to_string(); + if (oldRef == newRef) + throw Error(errorMsg); + errorMsg += " - "; + } + pastSearches.push_back(newRef); + return lookupFlake(state, newRef, registries, pastSearches); } - throw Error("cannot find flake with alias '%s' in the flake registry or in the flake lock file", refData->alias); - } else - return flakeRef; + } + if (!flakeRef.isDirect()) + throw Error("indirect flake URI '%s' is the result of a lookup", flakeRef.to_string()); + return flakeRef; } struct FlakeSourceInfo @@ -302,6 +231,7 @@ static FlakeSourceInfo fetchFlake(EvalState & state, const FlakeRef flakeRef, bo { FlakeRef fRef = lookupFlake(state, flakeRef, state.getFlakeRegistries()); + // This only downloads only one revision of the repo, not the entire history. if (auto refData = std::get_if(&fRef.data)) { if (evalSettings.pureEval && !impureIsAllowed && !fRef.isImmutable()) throw Error("requested to fetch FlakeRef '%s' purely, which is mutable", fRef.to_string()); @@ -332,6 +262,7 @@ static FlakeSourceInfo fetchFlake(EvalState & state, const FlakeRef flakeRef, bo return info; } + // This downloads the entire git history else if (auto refData = std::get_if(&fRef.data)) { auto gitInfo = exportGit(state.store, refData->uri, fRef.ref, fRef.rev ? fRef.rev->to_string(Base16, false) : "", "source"); @@ -342,7 +273,7 @@ static FlakeSourceInfo fetchFlake(EvalState & state, const FlakeRef flakeRef, bo return info; } - else if (auto refData = std::get_if(&directFlakeRef.data)) { + else if (auto refData = std::get_if(&fRef.data)) { if (!pathExists(refData->path + "/.git")) throw Error("flake '%s' does not reference a Git repository", refData->path); auto gitInfo = exportGit(state.store, refData->path, {}, "", "source"); @@ -452,7 +383,7 @@ NonFlake getNonFlake(EvalState & state, const FlakeRef & flakeRef, FlakeAlias al dependencies. FIXME: this should return a graph of flakes. */ -Dependencies resolveFlake(EvalState & state, const FlakeRef & topRef, bool impureTopRef, bool isTopFlake = true) +Dependencies resolveFlake(EvalState & state, const FlakeRef & topRef, bool impureTopRef, bool isTopFlake) { Flake flake = getFlake(state, topRef, isTopFlake && impureTopRef); Dependencies deps(flake); @@ -461,7 +392,7 @@ Dependencies resolveFlake(EvalState & state, const FlakeRef & topRef, bool impur deps.nonFlakeDeps.push_back(getNonFlake(state, nonFlakeInfo.second, nonFlakeInfo.first)); for (auto & newFlakeRef : flake.requires) - deps.flakeDeps.push_back(resolveFlake(state, newFlakeRef, impureTopRef, false)); + deps.flakeDeps.push_back(resolveFlake(state, newFlakeRef, false)); return deps; } @@ -505,25 +436,23 @@ void updateLockFile(EvalState & state, Path path) // Return the `provides` of the top flake, while assigning to `v` the provides // of the dependencies as well. -Value * makeFlakeValue(EvalState & state, FlakeUri flakeUri, Value & v) +Value * makeFlakeValue(EvalState & state, const FlakeRef & flakeRef, bool impureTopRef, Value & v) { - FlakeRef flakeRef = FlakeRef(flakeUri); + Dependencies deps = resolveFlake(state, flakeRef, impureTopRef); - Dependencies deps = resolveFlake(state, flakeRef, impure); - - // // FIXME: we should call each flake with only its dependencies - // // (rather than the closure of the top-level flake). + // FIXME: we should call each flake with only its dependencies + // (rather than the closure of the top-level flake). auto vResult = state.allocValue(); // This will store the attribute set of the `nonFlakeRequires` and the `requires.provides`. state.mkAttrs(*vResult, deps.flakeDeps.size()); - Value * vTop = 0; + Value * vTop = state.allocAttr(*vResult, deps.flake.id); - for (auto & flake : deps.flakeDeps) { + for (auto & dep : deps.flakeDeps) { + Flake flake = dep.flake; auto vFlake = state.allocAttr(*vResult, flake.id); - if (deps.topFlakeId == flake.id) vTop = vFlake; state.mkAttrs(*vFlake, 4); @@ -532,7 +461,7 @@ Value * makeFlakeValue(EvalState & state, FlakeUri flakeUri, Value & v) state.store->assertStorePath(flake.path); mkString(*state.allocAttr(*vFlake, state.sOutPath), flake.path, {flake.path}); - if (flake.second.revCount) + if (flake.revCount) mkInt(*state.allocAttr(*vFlake, state.symbols.create("revCount")), *flake.revCount); auto vProvides = state.allocAttr(*vFlake, state.symbols.create("provides")); diff --git a/src/libexpr/primops/flake.hh b/src/libexpr/primops/flake.hh index adf8b07af..9da065234 100644 --- a/src/libexpr/primops/flake.hh +++ b/src/libexpr/primops/flake.hh @@ -10,13 +10,7 @@ class EvalState; struct FlakeRegistry { - struct Entry - { - FlakeRef ref; - Entry(const FlakeRef & flakeRef) : ref(flakeRef) {}; - Entry operator=(const Entry & entry) { return Entry(entry.ref); } - }; - std::map entries; + std::map entries; }; struct LockFile @@ -79,7 +73,7 @@ struct Dependencies Dependencies(const Flake & flake) : flake(flake) {} }; -Dependencies resolveFlake(EvalState &, const FlakeRef &, bool impureTopRef, bool isTopFlake); +Dependencies resolveFlake(EvalState &, const FlakeRef &, bool impureTopRef, bool isTopFlake = true); FlakeRegistry updateLockFile(EvalState &, Flake &); diff --git a/src/libexpr/primops/flakeref.hh b/src/libexpr/primops/flakeref.hh index 32904953a..d789a6f70 100644 --- a/src/libexpr/primops/flakeref.hh +++ b/src/libexpr/primops/flakeref.hh @@ -103,48 +103,60 @@ typedef std::string FlakeUri; struct FlakeRef { - std::optional ref; - std::optional rev; - struct IsAlias { FlakeAlias alias; + bool operator<(const IsAlias & b) const { return alias < b.alias; }; + bool operator==(const IsAlias & b) const { return alias == b.alias; }; }; - struct IsGitHub - { + struct IsGitHub { std::string owner, repo; + bool operator<(const IsGitHub & b) const { + return std::make_tuple(owner, repo) < std::make_tuple(b.owner, b.repo); + } + bool operator==(const IsGitHub & b) const { + return owner == b.owner && repo == b.repo; + } }; // Git, Tarball struct IsGit { std::string uri; + bool operator<(const IsGit & b) const { return uri < b.uri; } + bool operator==(const IsGit & b) const { return uri == b.uri; } }; struct IsPath { Path path; + bool operator<(const IsPath & b) const { return path < b.path; } + bool operator==(const IsPath & b) const { return path == b.path; } }; // Git, Tarball - std::variant data; + std::variant data; + + std::optional ref; + std::optional rev; + + bool operator<(const FlakeRef & flakeRef) const + { + return std::make_tuple(this->data, ref, rev) < + std::make_tuple(flakeRef.data, flakeRef.ref, flakeRef.rev); + } + + bool operator==(const FlakeRef & flakeRef) const + { + return std::make_tuple(this->data, ref, rev) == + std::make_tuple(flakeRef.data, flakeRef.ref, flakeRef.rev); + } // Parse a flake URI. FlakeRef(const std::string & uri, bool allowRelative = false); - // Default constructor - FlakeRef(const FlakeRef & flakeRef) : data(flakeRef.data) {}; - - /* Unify two flake references so that the resulting reference - combines the information from both. For example, - "nixpkgs/" and "github:NixOS/nixpkgs" unifies to - "nixpkgs/master". May throw an exception if the references are - incompatible (e.g. "nixpkgs/" and "nixpkgs/", - where hash1 != hash2). */ - FlakeRef(const FlakeRef & a, const FlakeRef & b); - // FIXME: change to operator <<. std::string to_string() const; @@ -160,9 +172,5 @@ struct FlakeRef bool isImmutable() const; FlakeRef baseRef() const; - - void setRef(std::optional ref) { ref = ref; } - - void setRev(std::optional rev) { rev = rev; } }; } diff --git a/src/nix/build.cc b/src/nix/build.cc index a2fc56e69..5a3d9d31a 100644 --- a/src/nix/build.cc +++ b/src/nix/build.cc @@ -1,4 +1,3 @@ -#include "primops/flake.hh" #include "eval.hh" #include "command.hh" #include "common-args.hh" @@ -78,7 +77,7 @@ struct CmdBuild : MixDryRun, InstallablesCommand } } - // std::string flakeUri = ""; + // FlakeUri flakeUri = ""; // if(updateLock) // for (uint i = 0; i < installables.size(); i++) // // if (auto flakeUri = installableToFlakeUri) diff --git a/src/nix/command.hh b/src/nix/command.hh index 83959bf9a..56e1e6f34 100644 --- a/src/nix/command.hh +++ b/src/nix/command.hh @@ -1,6 +1,7 @@ #pragma once #include "args.hh" +#include "primops/flake.hh" #include "common-eval-args.hh" namespace nix { @@ -46,7 +47,7 @@ struct GitRepoCommand : virtual Args struct FlakeCommand : virtual Args { - std::string flakeUri; + FlakeUri flakeUri; FlakeCommand() { diff --git a/src/nix/flake.cc b/src/nix/flake.cc index df944a148..dbf0d3e9a 100644 --- a/src/nix/flake.cc +++ b/src/nix/flake.cc @@ -1,4 +1,3 @@ -#include "primops/flake.hh" #include "command.hh" #include "common-args.hh" #include "shared.hh" @@ -29,11 +28,9 @@ struct CmdFlakeList : StoreCommand, MixEvalArgs stopProgressBar(); - for (auto & registry : registries) { - for (auto & entry : registry->entries) { - std::cout << entry.first << " " << entry.second.ref.to_string() << "\n"; - } - } + for (auto & registry : registries) + for (auto & entry : registry->entries) + std::cout << entry.first.to_string() << " " << entry.second.to_string() << "\n"; } }; @@ -81,7 +78,7 @@ struct CmdFlakeDeps : FlakeCommand, MixJSON, StoreCommand, MixEvalArgs FlakeRef flakeRef(flakeUri); - Dependencies deps = resolveFlake(*evalState, flakeRef, true, true); + Dependencies deps = resolveFlake(*evalState, flakeRef, true); std::queue todo; todo.push(deps); @@ -135,15 +132,15 @@ struct CmdFlakeInfo : FlakeCommand, MixJSON, MixEvalArgs, StoreCommand void run(nix::ref store) override { auto evalState = std::make_shared(searchPath, store); - nix::Flake flake = nix::getFlake(*evalState, FlakeRef(flakeUri)); + nix::Flake flake = nix::getFlake(*evalState, FlakeRef(flakeUri), true); printFlakeInfo(flake, json); } }; struct CmdFlakeAdd : MixEvalArgs, Command { - FlakeAlias flakeAlias; - FlakeUri flakeUri; + FlakeUri alias; + FlakeUri uri; std::string name() override { @@ -157,25 +154,24 @@ struct CmdFlakeAdd : MixEvalArgs, Command CmdFlakeAdd() { - expectArg("flake-id", &flakeAlias); - expectArg("flake-uri", &flakeUri); + expectArg("alias", &alias); + expectArg("flake-uri", &uri); } void run() override { - FlakeRef newFlakeRef(flakeUri); + FlakeRef aliasRef(alias); Path userRegistryPath = getUserRegistryPath(); auto userRegistry = readRegistry(userRegistryPath); - FlakeRegistry::Entry entry(newFlakeRef); - userRegistry->entries.erase(flakeAlias); - userRegistry->entries.insert_or_assign(flakeAlias, newFlakeRef); + userRegistry->entries.erase(aliasRef); + userRegistry->entries.insert_or_assign(aliasRef, FlakeRef(uri)); writeRegistry(*userRegistry, userRegistryPath); } }; struct CmdFlakeRemove : virtual Args, MixEvalArgs, Command { - FlakeAlias flakeAlias; + FlakeUri alias; std::string name() override { @@ -189,21 +185,21 @@ struct CmdFlakeRemove : virtual Args, MixEvalArgs, Command CmdFlakeRemove() { - expectArg("flake-id", &flakeAlias); + expectArg("alias", &alias); } void run() override { Path userRegistryPath = getUserRegistryPath(); auto userRegistry = readRegistry(userRegistryPath); - userRegistry->entries.erase(flakeAlias); + userRegistry->entries.erase(FlakeRef(alias)); writeRegistry(*userRegistry, userRegistryPath); } }; struct CmdFlakePin : virtual Args, StoreCommand, MixEvalArgs { - FlakeAlias flakeAlias; + FlakeUri alias; std::string name() override { @@ -217,7 +213,7 @@ struct CmdFlakePin : virtual Args, StoreCommand, MixEvalArgs CmdFlakePin() { - expectArg("flake-id", &flakeAlias); + expectArg("alias", &alias); } void run(nix::ref store) override @@ -226,14 +222,13 @@ struct CmdFlakePin : virtual Args, StoreCommand, MixEvalArgs Path userRegistryPath = getUserRegistryPath(); FlakeRegistry userRegistry = *readRegistry(userRegistryPath); - auto it = userRegistry.entries.find(flakeAlias); + auto it = userRegistry.entries.find(FlakeRef(alias)); if (it != userRegistry.entries.end()) { - FlakeRef oldRef = it->second.ref; - it->second.ref = getFlake(*evalState, oldRef, true).ref; + it->second = getFlake(*evalState, it->second, true).ref; // The 'ref' in 'flake' is immutable. writeRegistry(userRegistry, userRegistryPath); } else - throw Error("the flake alias '%s' does not exist in the user registry", flakeAlias); + throw Error("the flake alias '%s' does not exist in the user registry", alias); } }; diff --git a/src/nix/installables.cc b/src/nix/installables.cc index e792ce96d..13a68a797 100644 --- a/src/nix/installables.cc +++ b/src/nix/installables.cc @@ -7,7 +7,6 @@ #include "get-drvs.hh" #include "store-api.hh" #include "shared.hh" -#include "primops/flake.hh" #include From 4bf3a8226badcdc70c013dfcfa266ee72f6cb89b Mon Sep 17 00:00:00 2001 From: Nick Van den Broeck Date: Thu, 21 Mar 2019 09:30:16 +0100 Subject: [PATCH 047/634] Automated lockfile updating with `nix build` --- src/nix/build.cc | 16 ++++++++++------ src/nix/command.hh | 6 ++++++ src/nix/installables.cc | 8 ++++++++ 3 files changed, 24 insertions(+), 6 deletions(-) diff --git a/src/nix/build.cc b/src/nix/build.cc index da7c7f614..608946378 100644 --- a/src/nix/build.cc +++ b/src/nix/build.cc @@ -11,7 +11,7 @@ struct CmdBuild : MixDryRun, InstallablesCommand { Path outLink = "result"; - std::optional gitRepo = std::nullopt; + bool update = true; CmdBuild() { @@ -28,9 +28,9 @@ struct CmdBuild : MixDryRun, InstallablesCommand .set(&outLink, Path("")); mkFlag() - .longName("update-lock-file") - .description("update the lock file") - .dest(&gitRepo); + .longName("no-update") + .description("don't update the lock files") + .set(&update, false); } std::string name() override @@ -78,8 +78,12 @@ struct CmdBuild : MixDryRun, InstallablesCommand } } - if (gitRepo) - updateLockFile(*evalState, *gitRepo); + if (update) + for (auto installable : installables) { + auto flakeUri = installable->installableToFlakeUri(); + if (flakeUri) + updateLockFile(*evalState, *flakeUri); + } } }; diff --git a/src/nix/command.hh b/src/nix/command.hh index 83959bf9a..5d0c0c82c 100644 --- a/src/nix/command.hh +++ b/src/nix/command.hh @@ -2,6 +2,7 @@ #include "args.hh" #include "common-eval-args.hh" +#include namespace nix { @@ -65,6 +66,11 @@ struct Installable Buildable toBuildable(); + virtual std::optional installableToFlakeUri() + { + return std::nullopt; + } + virtual Value * toValue(EvalState & state) { throw Error("argument '%s' cannot be evaluated", what()); diff --git a/src/nix/installables.cc b/src/nix/installables.cc index e792ce96d..43e15849b 100644 --- a/src/nix/installables.cc +++ b/src/nix/installables.cc @@ -176,6 +176,14 @@ struct InstallableFlake : InstallableValue state.forceValue(*v); return v; } + + std::optional installableToFlakeUri() override + { + if (std::get_if(&flakeRef.data)) + return flakeRef.to_string(); + else + return std::nullopt; + } }; // FIXME: extend From 84c12dbd7c8f2b34c46908f4a0c43cbb86023f20 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Mon, 15 Apr 2019 13:45:51 +0200 Subject: [PATCH 048/634] Move --impure to MixEvalArgs --- src/libexpr/common-eval-args.cc | 7 +++++++ src/nix/installables.cc | 7 ------- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/src/libexpr/common-eval-args.cc b/src/libexpr/common-eval-args.cc index 37c74a94b..3e5400052 100644 --- a/src/libexpr/common-eval-args.cc +++ b/src/libexpr/common-eval-args.cc @@ -26,6 +26,13 @@ MixEvalArgs::MixEvalArgs() .description("add a path to the list of locations used to look up <...> file names") .label("path") .handler([&](std::string s) { searchPath.push_back(s); }); + + mkFlag() + .longName("impure") + .description("allow access to mutable paths and repositories") + .handler([&](std::vector ss) { + evalSettings.pureEval = false; + }); } Bindings * MixEvalArgs::getAutoArgs(EvalState & state) diff --git a/src/nix/installables.cc b/src/nix/installables.cc index fc2c34861..9bc5ff41f 100644 --- a/src/nix/installables.cc +++ b/src/nix/installables.cc @@ -20,13 +20,6 @@ SourceExprCommand::SourceExprCommand() .label("file") .description("evaluate a set of attributes from FILE (deprecated)") .dest(&file); - - mkFlag() - .longName("impure") - .description("allow access to mutable paths and repositories") - .handler([&](std::vector ss) { - evalSettings.pureEval = false; - }); } ref SourceExprCommand::getEvalState() From f6d684b5e21966518d019c0225c3b0e8da2b6aff Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Mon, 15 Apr 2019 13:48:56 +0200 Subject: [PATCH 049/634] getFlakeRegistries(): Return registries regardless of pureEval This makes e.g. 'nix flake list' work. --- src/libexpr/primops/flake.cc | 20 ++++---------------- 1 file changed, 4 insertions(+), 16 deletions(-) diff --git a/src/libexpr/primops/flake.cc b/src/libexpr/primops/flake.cc index 145d79446..e1eeffca6 100644 --- a/src/libexpr/primops/flake.cc +++ b/src/libexpr/primops/flake.cc @@ -126,7 +126,8 @@ void writeLockFile(LockFile lockFile, Path path) std::shared_ptr getGlobalRegistry() { - return std::make_shared(); + Path registryFile = settings.nixDataDir + "/nix/flake-registry.json"; + return readRegistry(registryFile); } Path getUserRegistryPath() @@ -139,12 +140,6 @@ std::shared_ptr getUserRegistry() return readRegistry(getUserRegistryPath()); } -std::shared_ptr getLocalRegistry() -{ - Path registryFile = settings.nixDataDir + "/nix/flake-registry.json"; - return readRegistry(registryFile); -} - std::shared_ptr getFlagRegistry() { // TODO (Nick): Implement this. @@ -154,15 +149,8 @@ std::shared_ptr getFlagRegistry() const std::vector> EvalState::getFlakeRegistries() { std::vector> registries; - if (evalSettings.pureEval) { - registries.push_back(std::make_shared()); // global - registries.push_back(std::make_shared()); // user - registries.push_back(std::make_shared()); // local - } else { - registries.push_back(getGlobalRegistry()); - registries.push_back(getUserRegistry()); - registries.push_back(getLocalRegistry()); - } + registries.push_back(getGlobalRegistry()); + registries.push_back(getUserRegistry()); registries.push_back(getFlagRegistry()); return registries; } From c179f668e5bf64499169b17515bdc4c40473fca9 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Mon, 15 Apr 2019 14:08:18 +0200 Subject: [PATCH 050/634] Slight cleanup --- src/libexpr/primops/flake.cc | 7 +++---- src/libexpr/primops/flake.hh | 2 +- 2 files changed, 4 insertions(+), 5 deletions(-) diff --git a/src/libexpr/primops/flake.cc b/src/libexpr/primops/flake.cc index e1eeffca6..3c1cffba3 100644 --- a/src/libexpr/primops/flake.cc +++ b/src/libexpr/primops/flake.cc @@ -12,8 +12,7 @@ namespace nix { -/* Read the registry or a lock file. (Currently they have an identical - format. */ +/* Read a registry. */ std::shared_ptr readRegistry(const Path & path) { auto registry = std::make_shared(); @@ -34,8 +33,8 @@ std::shared_ptr readRegistry(const Path & path) return registry; } -/* Write the registry or lock file to a file. */ -void writeRegistry(FlakeRegistry registry, Path path) +/* Write a registry to a file. */ +void writeRegistry(const FlakeRegistry & registry, Path path) { nlohmann::json json; json["version"] = 1; diff --git a/src/libexpr/primops/flake.hh b/src/libexpr/primops/flake.hh index 9da065234..8bfceb756 100644 --- a/src/libexpr/primops/flake.hh +++ b/src/libexpr/primops/flake.hh @@ -35,7 +35,7 @@ Value * makeFlakeValue(EvalState & state, const FlakeRef & flakeRef, bool impure std::shared_ptr readRegistry(const Path &); -void writeRegistry(FlakeRegistry, Path); +void writeRegistry(const FlakeRegistry &, Path); struct Flake { From b4e367bf4a28b5495bc349df6fff0694ae73b9c2 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Mon, 15 Apr 2019 14:13:10 +0200 Subject: [PATCH 051/634] FlakeRef::to_string(): Drop the "flake:" prefix This is unnecessary in most contexts and makes 'nix flake list' output less readable. --- src/libexpr/primops/flakeref.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/libexpr/primops/flakeref.cc b/src/libexpr/primops/flakeref.cc index ab1e5e152..274552218 100644 --- a/src/libexpr/primops/flakeref.cc +++ b/src/libexpr/primops/flakeref.cc @@ -120,7 +120,7 @@ std::string FlakeRef::to_string() const { std::string string; if (auto refData = std::get_if(&data)) - string = "flake:" + refData->alias; + string = refData->alias; else if (auto refData = std::get_if(&data)) { assert(!ref || !rev); From b3d33b02e3fc40c7bd8f602334287825e7e6333d Mon Sep 17 00:00:00 2001 From: Nick Van den Broeck Date: Wed, 10 Apr 2019 12:12:44 +0200 Subject: [PATCH 052/634] Added support for private github repositories --- src/libexpr/primops/flake.cc | 6 ++++-- src/libstore/globals.hh | 3 +++ 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/src/libexpr/primops/flake.cc b/src/libexpr/primops/flake.cc index 3c1cffba3..1b0b1eba7 100644 --- a/src/libexpr/primops/flake.cc +++ b/src/libexpr/primops/flake.cc @@ -226,13 +226,15 @@ static FlakeSourceInfo fetchFlake(EvalState & state, const FlakeRef flakeRef, bo // FIXME: use regular /archive URLs instead? api.github.com // might have stricter rate limits. - // FIXME: support passing auth tokens for private repos. - auto url = fmt("https://api.github.com/repos/%s/%s/tarball/%s", refData->owner, refData->repo, fRef.rev ? fRef.rev->to_string(Base16, false) : fRef.ref ? *fRef.ref : "master"); + std::string accessToken = settings.githubAccessToken.get(); + if (accessToken != "") + url += "?access_token=" + accessToken; + auto result = getDownloader()->downloadCached(state.store, url, true, "source", Hash(), nullptr, fRef.rev ? 1000000000 : settings.tarballTtl); diff --git a/src/libstore/globals.hh b/src/libstore/globals.hh index 53efc6a90..80d70fba3 100644 --- a/src/libstore/globals.hh +++ b/src/libstore/globals.hh @@ -344,6 +344,9 @@ public: Setting pluginFiles{this, {}, "plugin-files", "Plugins to dynamically load at nix initialization time."}; + + Setting githubAccessToken{this, "", "github-acces-token", + "GitHub access token to get access to GitHub data through the GitHub API for github:<..> flakes."}; }; From 7587d62d02f216f28034f9e0938eb3236494c41b Mon Sep 17 00:00:00 2001 From: Nick Van den Broeck Date: Tue, 16 Apr 2019 08:21:52 +0200 Subject: [PATCH 053/634] Fixed flake pin issues --- src/libexpr/primops/flake.hh | 2 ++ src/nix/flake.cc | 17 +++++++++++++++-- 2 files changed, 17 insertions(+), 2 deletions(-) diff --git a/src/libexpr/primops/flake.hh b/src/libexpr/primops/flake.hh index 8bfceb756..80114c7c8 100644 --- a/src/libexpr/primops/flake.hh +++ b/src/libexpr/primops/flake.hh @@ -63,6 +63,8 @@ struct NonFlake NonFlake(const FlakeRef flakeRef) : ref(flakeRef) {}; }; +std::shared_ptr getGlobalRegistry(); + Flake getFlake(EvalState &, const FlakeRef &, bool impureIsAllowed); struct Dependencies diff --git a/src/nix/flake.cc b/src/nix/flake.cc index dbf0d3e9a..8634733d6 100644 --- a/src/nix/flake.cc +++ b/src/nix/flake.cc @@ -129,6 +129,8 @@ struct CmdFlakeInfo : FlakeCommand, MixJSON, MixEvalArgs, StoreCommand return "list info about a given flake"; } + CmdFlakeInfo () { evalSettings.pureEval = false; } + void run(nix::ref store) override { auto evalState = std::make_shared(searchPath, store); @@ -156,6 +158,7 @@ struct CmdFlakeAdd : MixEvalArgs, Command { expectArg("alias", &alias); expectArg("flake-uri", &uri); + evalSettings.pureEval = false; } void run() override @@ -186,6 +189,7 @@ struct CmdFlakeRemove : virtual Args, MixEvalArgs, Command CmdFlakeRemove() { expectArg("alias", &alias); + evalSettings.pureEval = false; } void run() override @@ -214,6 +218,7 @@ struct CmdFlakePin : virtual Args, StoreCommand, MixEvalArgs CmdFlakePin() { expectArg("alias", &alias); + evalSettings.pureEval = false; } void run(nix::ref store) override @@ -227,8 +232,16 @@ struct CmdFlakePin : virtual Args, StoreCommand, MixEvalArgs it->second = getFlake(*evalState, it->second, true).ref; // The 'ref' in 'flake' is immutable. writeRegistry(userRegistry, userRegistryPath); - } else - throw Error("the flake alias '%s' does not exist in the user registry", alias); + } else { + std::shared_ptr globalReg = getGlobalRegistry(); + it = globalReg->entries.find(FlakeRef(alias)); + if (it != globalReg->entries.end()) { + FlakeRef newRef = getFlake(*evalState, it->second, true).ref; + userRegistry.entries.insert_or_assign(alias, newRef); + writeRegistry(userRegistry, userRegistryPath); + } else + throw Error("the flake alias '%s' does not exist in the user or global registry", alias); + } } }; From d8fa2fc429c2dbaffce585e08d3070f912a29bf6 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Tue, 16 Apr 2019 12:26:17 +0200 Subject: [PATCH 054/634] Add FIXME for pureEval --- src/nix/flake.cc | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/nix/flake.cc b/src/nix/flake.cc index 8634733d6..3b37ad7a0 100644 --- a/src/nix/flake.cc +++ b/src/nix/flake.cc @@ -129,7 +129,7 @@ struct CmdFlakeInfo : FlakeCommand, MixJSON, MixEvalArgs, StoreCommand return "list info about a given flake"; } - CmdFlakeInfo () { evalSettings.pureEval = false; } + CmdFlakeInfo () { evalSettings.pureEval = false; /* FIXME */ } void run(nix::ref store) override { @@ -158,7 +158,7 @@ struct CmdFlakeAdd : MixEvalArgs, Command { expectArg("alias", &alias); expectArg("flake-uri", &uri); - evalSettings.pureEval = false; + evalSettings.pureEval = false; // FIXME } void run() override @@ -189,7 +189,7 @@ struct CmdFlakeRemove : virtual Args, MixEvalArgs, Command CmdFlakeRemove() { expectArg("alias", &alias); - evalSettings.pureEval = false; + evalSettings.pureEval = false; // FIXME } void run() override @@ -218,7 +218,7 @@ struct CmdFlakePin : virtual Args, StoreCommand, MixEvalArgs CmdFlakePin() { expectArg("alias", &alias); - evalSettings.pureEval = false; + evalSettings.pureEval = false; // FIXME } void run(nix::ref store) override From 529acfd24fdfb5e22eb3ec55b14e855ef845c98b Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Tue, 16 Apr 2019 12:36:10 +0200 Subject: [PATCH 055/634] Add nix to the flake registry --- flake-registry.json | 3 +++ 1 file changed, 3 insertions(+) diff --git a/flake-registry.json b/flake-registry.json index 378290ec6..422f77675 100644 --- a/flake-registry.json +++ b/flake-registry.json @@ -3,6 +3,9 @@ "dwarffs": { "uri": "github:edolstra/dwarffs/flake" }, + "nix": { + "uri": "github:tweag/nix/flakes" + }, "nixpkgs": { "uri": "github:edolstra/nixpkgs/flake" } From 035ac443544b46dc87274ed1eb1393b07db0912c Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Tue, 16 Apr 2019 13:56:08 +0200 Subject: [PATCH 056/634] Fix makeFlakeValue() --- src/libexpr/primops/flake.cc | 68 +++++++++++++++++------------------- src/libexpr/primops/flake.hh | 2 +- src/nix/installables.cc | 6 ++-- 3 files changed, 37 insertions(+), 39 deletions(-) diff --git a/src/libexpr/primops/flake.cc b/src/libexpr/primops/flake.cc index 1b0b1eba7..bd8e5960c 100644 --- a/src/libexpr/primops/flake.cc +++ b/src/libexpr/primops/flake.cc @@ -304,6 +304,7 @@ Flake getFlake(EvalState & state, const FlakeRef & flakeRef, bool impureIsAllowe state.forceAttrs(vInfo); + // FIXME: change to "id"? if (auto name = vInfo.attrs->get(state.sName)) flake.id = state.forceStringNoCtx(*(**name).value, *(**name).pos); else @@ -423,48 +424,45 @@ void updateLockFile(EvalState & state, Path path) } } -// Return the `provides` of the top flake, while assigning to `v` the provides -// of the dependencies as well. -Value * makeFlakeValue(EvalState & state, const FlakeRef & flakeRef, bool impureTopRef, Value & v) +void callFlake(EvalState & state, const Dependencies & flake, Value & v) { - Dependencies deps = resolveFlake(state, flakeRef, impureTopRef); + // Construct the resulting attrset '{description, provides, + // ...}'. This attrset is passed lazily as an argument to 'provides'. - // FIXME: we should call each flake with only its dependencies - // (rather than the closure of the top-level flake). + state.mkAttrs(v, flake.flakeDeps.size() + flake.nonFlakeDeps.size() + 4); - auto vResult = state.allocValue(); - // This will store the attribute set of the `nonFlakeRequires` and the `requires.provides`. - - state.mkAttrs(*vResult, deps.flakeDeps.size()); - - Value * vTop = state.allocAttr(*vResult, deps.flake.id); - - for (auto & dep : deps.flakeDeps) { - Flake flake = dep.flake; - auto vFlake = state.allocAttr(*vResult, flake.id); - - state.mkAttrs(*vFlake, 4); - - mkString(*state.allocAttr(*vFlake, state.sDescription), flake.description); - - state.store->assertStorePath(flake.path); - mkString(*state.allocAttr(*vFlake, state.sOutPath), flake.path, {flake.path}); - - if (flake.revCount) - mkInt(*state.allocAttr(*vFlake, state.symbols.create("revCount")), *flake.revCount); - - auto vProvides = state.allocAttr(*vFlake, state.symbols.create("provides")); - mkApp(*vProvides, *flake.vProvides, *vResult); - - vFlake->attrs->sort(); + for (auto & dep : flake.flakeDeps) { + auto vFlake = state.allocAttr(v, dep.flake.id); + callFlake(state, dep, *vFlake); } - vResult->attrs->sort(); + for (auto & dep : flake.nonFlakeDeps) { + auto vNonFlake = state.allocAttr(v, dep.alias); + state.mkAttrs(*vNonFlake, 4); - v = *vResult; + state.store->isValidPath(dep.path); + mkString(*state.allocAttr(*vNonFlake, state.sOutPath), dep.path, {dep.path}); + } - assert(vTop); - return vTop; + mkString(*state.allocAttr(v, state.sDescription), flake.flake.description); + + state.store->isValidPath(flake.flake.path); + mkString(*state.allocAttr(v, state.sOutPath), flake.flake.path, {flake.flake.path}); + + if (flake.flake.revCount) + mkInt(*state.allocAttr(v, state.symbols.create("revCount")), *flake.flake.revCount); + + auto vProvides = state.allocAttr(v, state.symbols.create("provides")); + mkApp(*vProvides, *flake.flake.vProvides, v); + + v.attrs->sort(); +} + +// Return the `provides` of the top flake, while assigning to `v` the provides +// of the dependencies as well. +void makeFlakeValue(EvalState & state, const FlakeRef & flakeRef, bool impureTopRef, Value & v) +{ + callFlake(state, resolveFlake(state, flakeRef, impureTopRef), v); } // This function is exposed to be used in nix files. diff --git a/src/libexpr/primops/flake.hh b/src/libexpr/primops/flake.hh index 80114c7c8..4cd41352d 100644 --- a/src/libexpr/primops/flake.hh +++ b/src/libexpr/primops/flake.hh @@ -31,7 +31,7 @@ Path getUserRegistryPath(); Value * makeFlakeRegistryValue(EvalState & state); -Value * makeFlakeValue(EvalState & state, const FlakeRef & flakeRef, bool impureTopRef, Value & v); +void makeFlakeValue(EvalState & state, const FlakeRef & flakeRef, bool impureTopRef, Value & v); std::shared_ptr readRegistry(const Path &); diff --git a/src/nix/installables.cc b/src/nix/installables.cc index 9bc5ff41f..37217397a 100644 --- a/src/nix/installables.cc +++ b/src/nix/installables.cc @@ -146,10 +146,10 @@ struct InstallableFlake : InstallableValue Value * toValue(EvalState & state) override { - auto vTemp = state.allocValue(); - auto vFlake = *makeFlakeValue(state, flakeRef, true, *vTemp); + auto vFlake = state.allocValue(); + makeFlakeValue(state, flakeRef, true, *vFlake); - auto vProvides = (*vFlake.attrs->get(state.symbols.create("provides")))->value; + auto vProvides = (*vFlake->attrs->get(state.symbols.create("provides")))->value; state.forceValue(*vProvides); From aecf07b1d6c21b8f402545912ae6c053d0f12a11 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Tue, 16 Apr 2019 14:08:14 +0200 Subject: [PATCH 057/634] Remove dead function --- src/libexpr/primops/flake.cc | 26 -------------------------- src/libexpr/primops/flake.hh | 2 -- 2 files changed, 28 deletions(-) diff --git a/src/libexpr/primops/flake.cc b/src/libexpr/primops/flake.cc index bd8e5960c..ba303fe4a 100644 --- a/src/libexpr/primops/flake.cc +++ b/src/libexpr/primops/flake.cc @@ -154,32 +154,6 @@ const std::vector> EvalState::getFlakeRegistries( return registries; } -// Creates a Nix attribute set value listing all dependencies, so they can be used in `provides`. -Value * makeFlakeRegistryValue(EvalState & state) -{ - auto v = state.allocValue(); - - auto registries = state.getFlakeRegistries(); - - int size = 0; - for (auto registry : registries) - size += registry->entries.size(); - state.mkAttrs(*v, size); - - for (auto & registry : registries) { - for (auto & entry : registry->entries) { - auto vEntry = state.allocAttr(*v, entry.first.to_string()); - state.mkAttrs(*vEntry, 2); - mkString(*state.allocAttr(*vEntry, state.symbols.create("uri")), entry.second.to_string()); - vEntry->attrs->sort(); - } - } - - v->attrs->sort(); - - return v; -} - static FlakeRef lookupFlake(EvalState & state, const FlakeRef & flakeRef, std::vector> registries, std::vector pastSearches = {}) { diff --git a/src/libexpr/primops/flake.hh b/src/libexpr/primops/flake.hh index 4cd41352d..73446c908 100644 --- a/src/libexpr/primops/flake.hh +++ b/src/libexpr/primops/flake.hh @@ -29,8 +29,6 @@ struct LockFile Path getUserRegistryPath(); -Value * makeFlakeRegistryValue(EvalState & state); - void makeFlakeValue(EvalState & state, const FlakeRef & flakeRef, bool impureTopRef, Value & v); std::shared_ptr readRegistry(const Path &); From ba66455636f40264d44c7e1fb87e13653b22042a Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Tue, 16 Apr 2019 14:10:05 +0200 Subject: [PATCH 058/634] Improve incremental build --- src/nix/build.cc | 1 + src/nix/command.hh | 3 +-- src/nix/flake.cc | 2 ++ src/nix/installables.cc | 1 + 4 files changed, 5 insertions(+), 2 deletions(-) diff --git a/src/nix/build.cc b/src/nix/build.cc index ef6b48969..9ef07dcdb 100644 --- a/src/nix/build.cc +++ b/src/nix/build.cc @@ -3,6 +3,7 @@ #include "common-args.hh" #include "shared.hh" #include "store-api.hh" +#include "primops/flake.hh" using namespace nix; diff --git a/src/nix/command.hh b/src/nix/command.hh index a5ae56fb9..5d0c0c82c 100644 --- a/src/nix/command.hh +++ b/src/nix/command.hh @@ -1,7 +1,6 @@ #pragma once #include "args.hh" -#include "primops/flake.hh" #include "common-eval-args.hh" #include @@ -48,7 +47,7 @@ struct GitRepoCommand : virtual Args struct FlakeCommand : virtual Args { - FlakeUri flakeUri; + std::string flakeUri; FlakeCommand() { diff --git a/src/nix/flake.cc b/src/nix/flake.cc index 3b37ad7a0..2079b1c27 100644 --- a/src/nix/flake.cc +++ b/src/nix/flake.cc @@ -3,6 +3,8 @@ #include "shared.hh" #include "progress-bar.hh" #include "eval.hh" +#include "primops/flake.hh" + #include #include diff --git a/src/nix/installables.cc b/src/nix/installables.cc index 37217397a..963321336 100644 --- a/src/nix/installables.cc +++ b/src/nix/installables.cc @@ -7,6 +7,7 @@ #include "get-drvs.hh" #include "store-api.hh" #include "shared.hh" +#include "primops/flake.hh" #include From ed9d725392827ee1516ca90ca891b2e7a66b2859 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Tue, 16 Apr 2019 14:16:20 +0200 Subject: [PATCH 059/634] getFlake(): Use impureIsAllowed This fixes 'nix build nixpkgs:hello' without --impure. --- src/libexpr/primops/flake.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/libexpr/primops/flake.cc b/src/libexpr/primops/flake.cc index ba303fe4a..32874f87b 100644 --- a/src/libexpr/primops/flake.cc +++ b/src/libexpr/primops/flake.cc @@ -253,7 +253,7 @@ static FlakeSourceInfo fetchFlake(EvalState & state, const FlakeRef flakeRef, bo // This will return the flake which corresponds to a given FlakeRef. The lookupFlake is done within this function. Flake getFlake(EvalState & state, const FlakeRef & flakeRef, bool impureIsAllowed = false) { - FlakeSourceInfo sourceInfo = fetchFlake(state, flakeRef); + FlakeSourceInfo sourceInfo = fetchFlake(state, flakeRef, impureIsAllowed); debug("got flake source '%s' with revision %s", sourceInfo.storePath, sourceInfo.rev.value_or(Hash(htSHA1)).to_string(Base16, false)); From e1d73edb10ca38184c85b3124b4c59c6f04a0851 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Tue, 16 Apr 2019 14:23:10 +0200 Subject: [PATCH 060/634] writeLockFile(): Emit empty objects rather than null --- src/libexpr/primops/flake.cc | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/libexpr/primops/flake.cc b/src/libexpr/primops/flake.cc index 32874f87b..f65ae09ea 100644 --- a/src/libexpr/primops/flake.cc +++ b/src/libexpr/primops/flake.cc @@ -114,9 +114,10 @@ void writeLockFile(LockFile lockFile, Path path) { nlohmann::json json; json["version"] = 1; - json["nonFlakeRequires"]; + json["nonFlakeRequires"] = nlohmann::json::object(); for (auto & x : lockFile.nonFlakeEntries) json["nonFlakeRequires"][x.first]["uri"] = x.second.to_string(); + json["requires"] = nlohmann::json::object(); for (auto & x : lockFile.flakeEntries) json["requires"][x.first] = flakeEntryToJson(x.second); createDirs(dirOf(path)); From 7b312a8762988ff7a8e0f0890fcd2406cd89c1a3 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Tue, 16 Apr 2019 14:27:54 +0200 Subject: [PATCH 061/634] Pass stuff by reference --- src/libexpr/primops/flake.cc | 17 +++++++++-------- src/libexpr/primops/flake.hh | 7 ++++--- 2 files changed, 13 insertions(+), 11 deletions(-) diff --git a/src/libexpr/primops/flake.cc b/src/libexpr/primops/flake.cc index f65ae09ea..3d11d9ec4 100644 --- a/src/libexpr/primops/flake.cc +++ b/src/libexpr/primops/flake.cc @@ -34,7 +34,7 @@ std::shared_ptr readRegistry(const Path & path) } /* Write a registry to a file. */ -void writeRegistry(const FlakeRegistry & registry, Path path) +void writeRegistry(const FlakeRegistry & registry, const Path & path) { nlohmann::json json; json["version"] = 1; @@ -99,7 +99,7 @@ LockFile readLockFile(const Path & path) return lockFile; } -nlohmann::json flakeEntryToJson(LockFile::FlakeEntry & entry) +nlohmann::json flakeEntryToJson(const LockFile::FlakeEntry & entry) { nlohmann::json json; json["uri"] = entry.ref.to_string(); @@ -110,7 +110,7 @@ nlohmann::json flakeEntryToJson(LockFile::FlakeEntry & entry) return json; } -void writeLockFile(LockFile lockFile, Path path) +void writeLockFile(const LockFile & lockFile, const Path & path) { nlohmann::json json; json["version"] = 1; @@ -156,7 +156,8 @@ const std::vector> EvalState::getFlakeRegistries( } static FlakeRef lookupFlake(EvalState & state, const FlakeRef & flakeRef, - std::vector> registries, std::vector pastSearches = {}) + const std::vector> & registries, + std::vector pastSearches = {}) { for (std::shared_ptr registry : registries) { auto i = registry->entries.find(flakeRef); @@ -362,14 +363,14 @@ Dependencies resolveFlake(EvalState & state, const FlakeRef & topRef, bool impur return deps; } -LockFile::FlakeEntry dependenciesToFlakeEntry(Dependencies & deps) +LockFile::FlakeEntry dependenciesToFlakeEntry(const Dependencies & deps) { LockFile::FlakeEntry entry(deps.flake.ref); - for (Dependencies & deps : deps.flakeDeps) + for (auto & deps : deps.flakeDeps) entry.flakeEntries.insert_or_assign(deps.flake.id, dependenciesToFlakeEntry(deps)); - for (NonFlake & nonFlake : deps.nonFlakeDeps) + for (auto & nonFlake : deps.nonFlakeDeps) entry.nonFlakeEntries.insert_or_assign(nonFlake.alias, nonFlake.ref); return entry; @@ -385,7 +386,7 @@ LockFile getLockFile(EvalState & evalState, FlakeRef & flakeRef) return lockFile; } -void updateLockFile(EvalState & state, Path path) +void updateLockFile(EvalState & state, const Path & path) { // 'path' is the path to the local flake repo. FlakeRef flakeRef = FlakeRef("file://" + path); diff --git a/src/libexpr/primops/flake.hh b/src/libexpr/primops/flake.hh index 73446c908..347dd2077 100644 --- a/src/libexpr/primops/flake.hh +++ b/src/libexpr/primops/flake.hh @@ -33,7 +33,7 @@ void makeFlakeValue(EvalState & state, const FlakeRef & flakeRef, bool impureTop std::shared_ptr readRegistry(const Path &); -void writeRegistry(const FlakeRegistry &, Path); +void writeRegistry(const FlakeRegistry &, const Path &); struct Flake { @@ -75,7 +75,8 @@ struct Dependencies Dependencies resolveFlake(EvalState &, const FlakeRef &, bool impureTopRef, bool isTopFlake = true); -FlakeRegistry updateLockFile(EvalState &, Flake &); +FlakeRegistry updateLockFile(EvalState &, const Flake &); + +void updateLockFile(EvalState &, const Path & path); -void updateLockFile(EvalState &, Path path); } From 60834492aea935b8043cdc8ccbc1270edebbc20a Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Tue, 16 Apr 2019 15:02:02 +0200 Subject: [PATCH 062/634] Update lock files from InstallableFlake::toValue() This ensures that the lock file is updated *before* evaluating it, and that it gets updated for any nix command, not just 'nix build'. Also, while computing the lock file, allow arbitrary registry lookups, not just at top-level. Also, improve some error messages slightly. --- src/libexpr/primops/flake.cc | 42 +++++++++++++++++++----------------- src/libexpr/primops/flake.hh | 8 +++---- src/nix/build.cc | 14 ------------ src/nix/command.hh | 7 ++---- src/nix/flake.cc | 2 +- src/nix/installables.cc | 20 +++++++++-------- 6 files changed, 40 insertions(+), 53 deletions(-) diff --git a/src/libexpr/primops/flake.cc b/src/libexpr/primops/flake.cc index 3d11d9ec4..37dadd474 100644 --- a/src/libexpr/primops/flake.cc +++ b/src/libexpr/primops/flake.cc @@ -159,6 +159,9 @@ static FlakeRef lookupFlake(EvalState & state, const FlakeRef & flakeRef, const std::vector> & registries, std::vector pastSearches = {}) { + if (registries.empty() && !flakeRef.isDirect()) + throw Error("indirect flake reference '%s' is not allowed", flakeRef.to_string()); + for (std::shared_ptr registry : registries) { auto i = registry->entries.find(flakeRef); if (i != registry->entries.end()) { @@ -178,8 +181,10 @@ static FlakeRef lookupFlake(EvalState & state, const FlakeRef & flakeRef, return lookupFlake(state, newRef, registries, pastSearches); } } + if (!flakeRef.isDirect()) - throw Error("indirect flake URI '%s' is the result of a lookup", flakeRef.to_string()); + throw Error("could not resolve flake reference '%s'", flakeRef.to_string()); + return flakeRef; } @@ -192,7 +197,8 @@ struct FlakeSourceInfo static FlakeSourceInfo fetchFlake(EvalState & state, const FlakeRef flakeRef, bool impureIsAllowed = false) { - FlakeRef fRef = lookupFlake(state, flakeRef, state.getFlakeRegistries()); + FlakeRef fRef = lookupFlake(state, flakeRef, + impureIsAllowed ? state.getFlakeRegistries() : std::vector>()); // This only downloads only one revision of the repo, not the entire history. if (auto refData = std::get_if(&fRef.data)) { @@ -349,16 +355,18 @@ NonFlake getNonFlake(EvalState & state, const FlakeRef & flakeRef, FlakeAlias al dependencies. FIXME: this should return a graph of flakes. */ -Dependencies resolveFlake(EvalState & state, const FlakeRef & topRef, bool impureTopRef, bool isTopFlake) +Dependencies resolveFlake(EvalState & state, const FlakeRef & topRef, + RegistryAccess registryAccess, bool isTopFlake) { - Flake flake = getFlake(state, topRef, isTopFlake && impureTopRef); + Flake flake = getFlake(state, topRef, + registryAccess == AllowRegistry || (registryAccess == AllowRegistryAtTop && isTopFlake)); Dependencies deps(flake); for (auto & nonFlakeInfo : flake.nonFlakeRequires) deps.nonFlakeDeps.push_back(getNonFlake(state, nonFlakeInfo.second, nonFlakeInfo.first)); for (auto & newFlakeRef : flake.requires) - deps.flakeDeps.push_back(resolveFlake(state, newFlakeRef, false)); + deps.flakeDeps.push_back(resolveFlake(state, newFlakeRef, registryAccess, false)); return deps; } @@ -376,9 +384,9 @@ LockFile::FlakeEntry dependenciesToFlakeEntry(const Dependencies & deps) return entry; } -LockFile getLockFile(EvalState & evalState, FlakeRef & flakeRef) +static LockFile makeLockFile(EvalState & evalState, FlakeRef & flakeRef) { - Dependencies deps = resolveFlake(evalState, flakeRef, true); + Dependencies deps = resolveFlake(evalState, flakeRef, AllowRegistry); LockFile::FlakeEntry entry = dependenciesToFlakeEntry(deps); LockFile lockFile; lockFile.flakeEntries = entry.flakeEntries; @@ -388,16 +396,9 @@ LockFile getLockFile(EvalState & evalState, FlakeRef & flakeRef) void updateLockFile(EvalState & state, const Path & path) { - // 'path' is the path to the local flake repo. - FlakeRef flakeRef = FlakeRef("file://" + path); - if (std::get_if(&flakeRef.data)) { - LockFile lockFile = getLockFile(state, flakeRef); - writeLockFile(lockFile, path + "/flake.lock"); - } else if (std::get_if(&flakeRef.data)) { - throw UsageError("you can only update local flakes, not flakes on GitHub"); - } else { - throw UsageError("you can only update local flakes, not flakes through their FlakeAlias"); - } + FlakeRef flakeRef = FlakeRef("file://" + path); // FIXME: ugly + auto lockFile = makeLockFile(state, flakeRef); + writeLockFile(lockFile, path + "/flake.lock"); } void callFlake(EvalState & state, const Dependencies & flake, Value & v) @@ -436,15 +437,16 @@ void callFlake(EvalState & state, const Dependencies & flake, Value & v) // Return the `provides` of the top flake, while assigning to `v` the provides // of the dependencies as well. -void makeFlakeValue(EvalState & state, const FlakeRef & flakeRef, bool impureTopRef, Value & v) +void makeFlakeValue(EvalState & state, const FlakeRef & flakeRef, RegistryAccess registryAccess, Value & v) { - callFlake(state, resolveFlake(state, flakeRef, impureTopRef), v); + callFlake(state, resolveFlake(state, flakeRef, registryAccess), v); } // This function is exposed to be used in nix files. static void prim_getFlake(EvalState & state, const Pos & pos, Value * * args, Value & v) { - makeFlakeValue(state, state.forceStringNoCtx(*args[0], pos), false, v); + makeFlakeValue(state, state.forceStringNoCtx(*args[0], pos), + evalSettings.pureEval ? DisallowRegistry : AllowRegistryAtTop, v); } static RegisterPrimOp r2("getFlake", 1, prim_getFlake); diff --git a/src/libexpr/primops/flake.hh b/src/libexpr/primops/flake.hh index 347dd2077..655d87f03 100644 --- a/src/libexpr/primops/flake.hh +++ b/src/libexpr/primops/flake.hh @@ -29,7 +29,9 @@ struct LockFile Path getUserRegistryPath(); -void makeFlakeValue(EvalState & state, const FlakeRef & flakeRef, bool impureTopRef, Value & v); +enum RegistryAccess { DisallowRegistry, AllowRegistry, AllowRegistryAtTop }; + +void makeFlakeValue(EvalState & state, const FlakeRef & flakeRef, RegistryAccess registryAccess, Value & v); std::shared_ptr readRegistry(const Path &); @@ -73,9 +75,7 @@ struct Dependencies Dependencies(const Flake & flake) : flake(flake) {} }; -Dependencies resolveFlake(EvalState &, const FlakeRef &, bool impureTopRef, bool isTopFlake = true); - -FlakeRegistry updateLockFile(EvalState &, const Flake &); +Dependencies resolveFlake(EvalState &, const FlakeRef &, RegistryAccess registryAccess, bool isTopFlake = true); void updateLockFile(EvalState &, const Path & path); diff --git a/src/nix/build.cc b/src/nix/build.cc index 9ef07dcdb..d6a6a8071 100644 --- a/src/nix/build.cc +++ b/src/nix/build.cc @@ -11,8 +11,6 @@ struct CmdBuild : MixDryRun, InstallablesCommand { Path outLink = "result"; - bool update = true; - CmdBuild() { mkFlag() @@ -26,11 +24,6 @@ struct CmdBuild : MixDryRun, InstallablesCommand .longName("no-link") .description("do not create a symlink to the build result") .set(&outLink, Path("")); - - mkFlag() - .longName("no-update") - .description("don't update the lock file") - .set(&update, false); } std::string name() override @@ -77,13 +70,6 @@ struct CmdBuild : MixDryRun, InstallablesCommand store2->addPermRoot(output.second, absPath(symlink), true); } } - - if (update) - for (auto installable : installables) { - auto flakeUri = installable->installableToFlakeUri(); - if (flakeUri) - updateLockFile(*evalState, *flakeUri); - } } }; diff --git a/src/nix/command.hh b/src/nix/command.hh index 5d0c0c82c..a52fbb9ba 100644 --- a/src/nix/command.hh +++ b/src/nix/command.hh @@ -66,11 +66,6 @@ struct Installable Buildable toBuildable(); - virtual std::optional installableToFlakeUri() - { - return std::nullopt; - } - virtual Value * toValue(EvalState & state) { throw Error("argument '%s' cannot be evaluated", what()); @@ -81,6 +76,8 @@ struct SourceExprCommand : virtual Args, StoreCommand, MixEvalArgs { std::optional file; + bool updateLockFile = true; + SourceExprCommand(); ref getEvalState(); diff --git a/src/nix/flake.cc b/src/nix/flake.cc index 2079b1c27..1e03669c3 100644 --- a/src/nix/flake.cc +++ b/src/nix/flake.cc @@ -80,7 +80,7 @@ struct CmdFlakeDeps : FlakeCommand, MixJSON, StoreCommand, MixEvalArgs FlakeRef flakeRef(flakeUri); - Dependencies deps = resolveFlake(*evalState, flakeRef, true); + Dependencies deps = resolveFlake(*evalState, flakeRef, AllowRegistryAtTop); std::queue todo; todo.push(deps); diff --git a/src/nix/installables.cc b/src/nix/installables.cc index 963321336..9d87c70c3 100644 --- a/src/nix/installables.cc +++ b/src/nix/installables.cc @@ -21,6 +21,11 @@ SourceExprCommand::SourceExprCommand() .label("file") .description("evaluate a set of attributes from FILE (deprecated)") .dest(&file); + + mkFlag() + .longName("no-update") + .description("don't create/update flake lock files") + .set(&updateLockFile, false); } ref SourceExprCommand::getEvalState() @@ -147,8 +152,13 @@ struct InstallableFlake : InstallableValue Value * toValue(EvalState & state) override { + auto path = std::get_if(&flakeRef.data); + if (cmd.updateLockFile && path) { + updateLockFile(state, path->path); + } + auto vFlake = state.allocValue(); - makeFlakeValue(state, flakeRef, true, *vFlake); + makeFlakeValue(state, flakeRef, AllowRegistryAtTop, *vFlake); auto vProvides = (*vFlake->attrs->get(state.symbols.create("provides")))->value; @@ -169,14 +179,6 @@ struct InstallableFlake : InstallableValue state.forceValue(*v); return v; } - - std::optional installableToFlakeUri() override - { - if (std::get_if(&flakeRef.data)) - return flakeRef.to_string(); - else - return std::nullopt; - } }; // FIXME: extend From cfca793a20862220c53094ee63523c9a09d2c9a3 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Tue, 16 Apr 2019 15:06:40 +0200 Subject: [PATCH 063/634] Remove unneeded pureEval flags --- src/nix/flake.cc | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/src/nix/flake.cc b/src/nix/flake.cc index 1e03669c3..93af71ac3 100644 --- a/src/nix/flake.cc +++ b/src/nix/flake.cc @@ -131,7 +131,7 @@ struct CmdFlakeInfo : FlakeCommand, MixJSON, MixEvalArgs, StoreCommand return "list info about a given flake"; } - CmdFlakeInfo () { evalSettings.pureEval = false; /* FIXME */ } + CmdFlakeInfo () { } void run(nix::ref store) override { @@ -160,7 +160,6 @@ struct CmdFlakeAdd : MixEvalArgs, Command { expectArg("alias", &alias); expectArg("flake-uri", &uri); - evalSettings.pureEval = false; // FIXME } void run() override @@ -191,7 +190,6 @@ struct CmdFlakeRemove : virtual Args, MixEvalArgs, Command CmdFlakeRemove() { expectArg("alias", &alias); - evalSettings.pureEval = false; // FIXME } void run() override @@ -220,7 +218,6 @@ struct CmdFlakePin : virtual Args, StoreCommand, MixEvalArgs CmdFlakePin() { expectArg("alias", &alias); - evalSettings.pureEval = false; // FIXME } void run(nix::ref store) override From 8c4e759efd779e80d135516fdce2cf884a2e06f5 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Tue, 16 Apr 2019 15:11:17 +0200 Subject: [PATCH 064/634] updateLockFile(): Make sure Git can see flake.lock --- src/libexpr/primops/flake.cc | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/libexpr/primops/flake.cc b/src/libexpr/primops/flake.cc index 37dadd474..13928d9d5 100644 --- a/src/libexpr/primops/flake.cc +++ b/src/libexpr/primops/flake.cc @@ -399,6 +399,10 @@ void updateLockFile(EvalState & state, const Path & path) FlakeRef flakeRef = FlakeRef("file://" + path); // FIXME: ugly auto lockFile = makeLockFile(state, flakeRef); writeLockFile(lockFile, path + "/flake.lock"); + + // Hack: Make sure that flake.lock is visible to Git. Otherwise, + // exportGit will fail to copy it to the Nix store. + runProgram("git", true, { "-C", path, "add", "flake.lock" }); } void callFlake(EvalState & state, const Dependencies & flake, Value & v) From 3d0e81051fca850fb7b46d6299a94566b8c1ab62 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Tue, 16 Apr 2019 15:40:58 +0200 Subject: [PATCH 065/634] Fix lock file generation Before: "requires": { "nixpkgs": { "uri": "nixpkgs" } }, After: "requires": { "nixpkgs": { "uri": "github:edolstra/nixpkgs/f10e8a02eb7fa2b4a070f30cf87f4efcc7f3186d" } }, --- src/libexpr/primops/flake.cc | 53 ++++++++++++++++----------------- src/libexpr/primops/flake.hh | 18 +++++++---- src/libexpr/primops/flakeref.cc | 2 +- src/nix/flake.cc | 4 +-- 4 files changed, 42 insertions(+), 35 deletions(-) diff --git a/src/libexpr/primops/flake.cc b/src/libexpr/primops/flake.cc index 13928d9d5..23406327e 100644 --- a/src/libexpr/primops/flake.cc +++ b/src/libexpr/primops/flake.cc @@ -188,13 +188,6 @@ static FlakeRef lookupFlake(EvalState & state, const FlakeRef & flakeRef, return flakeRef; } -struct FlakeSourceInfo -{ - Path storePath; - std::optional rev; - std::optional revCount; -}; - static FlakeSourceInfo fetchFlake(EvalState & state, const FlakeRef flakeRef, bool impureIsAllowed = false) { FlakeRef fRef = lookupFlake(state, flakeRef, @@ -226,9 +219,11 @@ static FlakeSourceInfo fetchFlake(EvalState & state, const FlakeRef flakeRef, bo if (result.etag->size() != 42 || (*result.etag)[0] != '"' || (*result.etag)[41] != '"') throw Error("ETag header '%s' from '%s' is not a Git revision", *result.etag, url); - FlakeSourceInfo info; + FlakeSourceInfo info(fRef); info.storePath = result.path; info.rev = Hash(std::string(*result.etag, 1, result.etag->size() - 2), htSHA1); + info.flakeRef.rev = info.rev; + info.flakeRef.ref = {}; return info; } @@ -237,10 +232,12 @@ static FlakeSourceInfo fetchFlake(EvalState & state, const FlakeRef flakeRef, bo else if (auto refData = std::get_if(&fRef.data)) { auto gitInfo = exportGit(state.store, refData->uri, fRef.ref, fRef.rev ? fRef.rev->to_string(Base16, false) : "", "source"); - FlakeSourceInfo info; + FlakeSourceInfo info(fRef); info.storePath = gitInfo.storePath; info.rev = Hash(gitInfo.rev, htSHA1); info.revCount = gitInfo.revCount; + info.flakeRef.rev = info.rev; + // FIXME: ensure info.flakeRef.ref is set. return info; } @@ -248,10 +245,11 @@ static FlakeSourceInfo fetchFlake(EvalState & state, const FlakeRef flakeRef, bo if (!pathExists(refData->path + "/.git")) throw Error("flake '%s' does not reference a Git repository", refData->path); auto gitInfo = exportGit(state.store, refData->path, {}, "", "source"); - FlakeSourceInfo info; + FlakeSourceInfo info(fRef); info.storePath = gitInfo.storePath; info.rev = Hash(gitInfo.rev, htSHA1); info.revCount = gitInfo.revCount; + info.flakeRef.rev = info.rev; return info; } @@ -265,24 +263,21 @@ Flake getFlake(EvalState & state, const FlakeRef & flakeRef, bool impureIsAllowe debug("got flake source '%s' with revision %s", sourceInfo.storePath, sourceInfo.rev.value_or(Hash(htSHA1)).to_string(Base16, false)); - auto flakePath = sourceInfo.storePath; - state.store->assertStorePath(flakePath); + state.store->assertStorePath(sourceInfo.storePath); if (state.allowedPaths) - state.allowedPaths->insert(flakePath); + state.allowedPaths->insert(sourceInfo.storePath); - Flake flake(flakeRef); + Flake flake(flakeRef, std::move(sourceInfo)); if (std::get_if(&flakeRef.data)) { - if (sourceInfo.rev) + // FIXME: ehm? + if (flake.sourceInfo.rev) flake.ref = FlakeRef(flakeRef.baseRef().to_string() - + "/" + sourceInfo.rev->to_string(Base16, false)); + + "/" + flake.sourceInfo.rev->to_string(Base16, false)); } - flake.path = flakePath; - flake.revCount = sourceInfo.revCount; - Value vInfo; - state.evalFile(flakePath + "/flake.nix", vInfo); // FIXME: symlink attack + state.evalFile(sourceInfo.storePath + "/flake.nix", vInfo); // FIXME: symlink attack state.forceAttrs(vInfo); @@ -317,7 +312,7 @@ Flake getFlake(EvalState & state, const FlakeRef & flakeRef, bool impureIsAllowe } else throw Error("flake lacks attribute 'provides'"); - const Path lockFile = flakePath + "/flake.lock"; // FIXME: symlink attack + Path lockFile = sourceInfo.storePath + "/flake.lock"; // FIXME: symlink attack flake.lockFile = readLockFile(lockFile); @@ -373,7 +368,7 @@ Dependencies resolveFlake(EvalState & state, const FlakeRef & topRef, LockFile::FlakeEntry dependenciesToFlakeEntry(const Dependencies & deps) { - LockFile::FlakeEntry entry(deps.flake.ref); + LockFile::FlakeEntry entry(deps.flake.sourceInfo.flakeRef); for (auto & deps : deps.flakeDeps) entry.flakeEntries.insert_or_assign(deps.flake.id, dependenciesToFlakeEntry(deps)); @@ -396,7 +391,10 @@ static LockFile makeLockFile(EvalState & evalState, FlakeRef & flakeRef) void updateLockFile(EvalState & state, const Path & path) { - FlakeRef flakeRef = FlakeRef("file://" + path); // FIXME: ugly + // FIXME: don't copy 'path' to the store (especially since we + // dirty it immediately afterwards). + + FlakeRef flakeRef = FlakeRef(path); // FIXME: ugly auto lockFile = makeLockFile(state, flakeRef); writeLockFile(lockFile, path + "/flake.lock"); @@ -427,11 +425,12 @@ void callFlake(EvalState & state, const Dependencies & flake, Value & v) mkString(*state.allocAttr(v, state.sDescription), flake.flake.description); - state.store->isValidPath(flake.flake.path); - mkString(*state.allocAttr(v, state.sOutPath), flake.flake.path, {flake.flake.path}); + auto & path = flake.flake.sourceInfo.storePath; + state.store->isValidPath(path); + mkString(*state.allocAttr(v, state.sOutPath), path, {path}); - if (flake.flake.revCount) - mkInt(*state.allocAttr(v, state.symbols.create("revCount")), *flake.flake.revCount); + if (flake.flake.sourceInfo.revCount) + mkInt(*state.allocAttr(v, state.symbols.create("revCount")), *flake.flake.sourceInfo.revCount); auto vProvides = state.allocAttr(v, state.symbols.create("provides")); mkApp(*vProvides, *flake.flake.vProvides, v); diff --git a/src/libexpr/primops/flake.hh b/src/libexpr/primops/flake.hh index 655d87f03..f93796660 100644 --- a/src/libexpr/primops/flake.hh +++ b/src/libexpr/primops/flake.hh @@ -37,20 +37,28 @@ std::shared_ptr readRegistry(const Path &); void writeRegistry(const FlakeRegistry &, const Path &); +struct FlakeSourceInfo +{ + FlakeRef flakeRef; + Path storePath; + std::optional rev; + std::optional revCount; + // date + FlakeSourceInfo(const FlakeRef & flakeRef) : flakeRef(flakeRef) { } +}; + struct Flake { FlakeId id; FlakeRef ref; std::string description; - Path path; - std::optional revCount; + FlakeSourceInfo sourceInfo; std::vector requires; LockFile lockFile; std::map nonFlakeRequires; Value * vProvides; // FIXME: gc - // date - // content hash - Flake(const FlakeRef flakeRef) : ref(flakeRef) {}; + Flake(const FlakeRef & flakeRef, FlakeSourceInfo && sourceInfo) + : ref(flakeRef), sourceInfo(sourceInfo) {}; }; struct NonFlake diff --git a/src/libexpr/primops/flakeref.cc b/src/libexpr/primops/flakeref.cc index 274552218..4127e63cc 100644 --- a/src/libexpr/primops/flakeref.cc +++ b/src/libexpr/primops/flakeref.cc @@ -123,7 +123,7 @@ std::string FlakeRef::to_string() const string = refData->alias; else if (auto refData = std::get_if(&data)) { - assert(!ref || !rev); + assert(!(ref && rev)); string = "github:" + refData->owner + "/" + refData->repo; } diff --git a/src/nix/flake.cc b/src/nix/flake.cc index 93af71ac3..7006ab989 100644 --- a/src/nix/flake.cc +++ b/src/nix/flake.cc @@ -40,13 +40,13 @@ void printFlakeInfo(Flake & flake, bool json) { if (json) { nlohmann::json j; j["id"] = flake.id; - j["location"] = flake.path; + j["location"] = flake.sourceInfo.storePath; j["description"] = flake.description; std::cout << j.dump(4) << std::endl; } else { std::cout << "ID: " << flake.id << "\n"; std::cout << "Description: " << flake.description << "\n"; - std::cout << "Location: " << flake.path << "\n"; + std::cout << "Location: " << flake.sourceInfo.storePath << "\n"; } } From 54ca4b4e81cd814d7727382bce073f6c6ea0ddf8 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Tue, 16 Apr 2019 15:47:15 +0200 Subject: [PATCH 066/634] Add flake lockfile --- flake.lock | 9 +++++++++ 1 file changed, 9 insertions(+) create mode 100644 flake.lock diff --git a/flake.lock b/flake.lock new file mode 100644 index 000000000..3154006c8 --- /dev/null +++ b/flake.lock @@ -0,0 +1,9 @@ +{ + "nonFlakeRequires": {}, + "requires": { + "nixpkgs": { + "uri": "github:edolstra/nixpkgs/f10e8a02eb7fa2b4a070f30cf87f4efcc7f3186d" + } + }, + "version": 1 +} \ No newline at end of file From 3c28cb1b8ff586421ed9e37cef383af0486445cb Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Tue, 16 Apr 2019 16:18:03 +0200 Subject: [PATCH 067/634] Improve 'nix flake info' a bit Example: $ nix flake info dwarffs ID: dwarffs URI: github:edolstra/dwarffs/a83d182fe3fe528ed6366a5cec3458bcb1a5f6e1 Description: A filesystem that fetches DWARF debug info from the Internet on demand Revision: a83d182fe3fe528ed6366a5cec3458bcb1a5f6e1 Path: /nix/store/grgd14kxxk8q4n503j87mpz48gcqpqw7-source --- src/nix/flake.cc | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/src/nix/flake.cc b/src/nix/flake.cc index 7006ab989..4b8f1026e 100644 --- a/src/nix/flake.cc +++ b/src/nix/flake.cc @@ -40,13 +40,19 @@ void printFlakeInfo(Flake & flake, bool json) { if (json) { nlohmann::json j; j["id"] = flake.id; - j["location"] = flake.sourceInfo.storePath; + j["uri"] = flake.sourceInfo.flakeRef.to_string(); j["description"] = flake.description; + if (flake.sourceInfo.rev) + j["revision"] = flake.sourceInfo.rev->to_string(Base16, false); + j["path"] = flake.sourceInfo.storePath; std::cout << j.dump(4) << std::endl; } else { std::cout << "ID: " << flake.id << "\n"; + std::cout << "URI: " << flake.sourceInfo.flakeRef.to_string() << "\n"; std::cout << "Description: " << flake.description << "\n"; - std::cout << "Location: " << flake.sourceInfo.storePath << "\n"; + if (flake.sourceInfo.rev) + std::cout << "Revision: " << flake.sourceInfo.rev->to_string(Base16, false) << "\n"; + std::cout << "Path: " << flake.sourceInfo.storePath << "\n"; } } From 260527a90ccc23461cdc4ad73970dd4e0b2e5239 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Tue, 16 Apr 2019 16:18:47 +0200 Subject: [PATCH 068/634] Use the lock file --- src/libexpr/primops/flake.cc | 20 +++++++++++++------- src/libexpr/primops/flake.hh | 5 ++--- 2 files changed, 15 insertions(+), 10 deletions(-) diff --git a/src/libexpr/primops/flake.cc b/src/libexpr/primops/flake.cc index 23406327e..193b521a3 100644 --- a/src/libexpr/primops/flake.cc +++ b/src/libexpr/primops/flake.cc @@ -106,7 +106,7 @@ nlohmann::json flakeEntryToJson(const LockFile::FlakeEntry & entry) for (auto & x : entry.nonFlakeEntries) json["nonFlakeRequires"][x.first]["uri"] = x.second.to_string(); for (auto & x : entry.flakeEntries) - json["requires"][x.first] = flakeEntryToJson(x.second); + json["requires"][x.first.to_string()] = flakeEntryToJson(x.second); return json; } @@ -119,7 +119,7 @@ void writeLockFile(const LockFile & lockFile, const Path & path) json["nonFlakeRequires"][x.first]["uri"] = x.second.to_string(); json["requires"] = nlohmann::json::object(); for (auto & x : lockFile.flakeEntries) - json["requires"][x.first] = flakeEntryToJson(x.second); + json["requires"][x.first.to_string()] = flakeEntryToJson(x.second); createDirs(dirOf(path)); writeFile(path, json.dump(4)); // '4' = indentation in json file } @@ -312,10 +312,6 @@ Flake getFlake(EvalState & state, const FlakeRef & flakeRef, bool impureIsAllowe } else throw Error("flake lacks attribute 'provides'"); - Path lockFile = sourceInfo.storePath + "/flake.lock"; // FIXME: symlink attack - - flake.lockFile = readLockFile(lockFile); - return flake; } @@ -355,13 +351,23 @@ Dependencies resolveFlake(EvalState & state, const FlakeRef & topRef, { Flake flake = getFlake(state, topRef, registryAccess == AllowRegistry || (registryAccess == AllowRegistryAtTop && isTopFlake)); + + LockFile lockFile; + + if (isTopFlake) + lockFile = readLockFile(flake.sourceInfo.storePath + "/flake.lock"); // FIXME: symlink attack + Dependencies deps(flake); for (auto & nonFlakeInfo : flake.nonFlakeRequires) deps.nonFlakeDeps.push_back(getNonFlake(state, nonFlakeInfo.second, nonFlakeInfo.first)); - for (auto & newFlakeRef : flake.requires) + for (auto newFlakeRef : flake.requires) { + auto i = lockFile.flakeEntries.find(newFlakeRef); + if (i != lockFile.flakeEntries.end()) newFlakeRef = i->second.ref; + // FIXME: propagate lockFile downwards deps.flakeDeps.push_back(resolveFlake(state, newFlakeRef, registryAccess, false)); + } return deps; } diff --git a/src/libexpr/primops/flake.hh b/src/libexpr/primops/flake.hh index f93796660..85f4fdf9f 100644 --- a/src/libexpr/primops/flake.hh +++ b/src/libexpr/primops/flake.hh @@ -18,12 +18,12 @@ struct LockFile struct FlakeEntry { FlakeRef ref; - std::map flakeEntries; + std::map flakeEntries; std::map nonFlakeEntries; FlakeEntry(const FlakeRef & flakeRef) : ref(flakeRef) {}; }; - std::map flakeEntries; + std::map flakeEntries; std::map nonFlakeEntries; }; @@ -54,7 +54,6 @@ struct Flake std::string description; FlakeSourceInfo sourceInfo; std::vector requires; - LockFile lockFile; std::map nonFlakeRequires; Value * vProvides; // FIXME: gc Flake(const FlakeRef & flakeRef, FlakeSourceInfo && sourceInfo) From 3ddb6d1833a94e3d141116f3e579e66ebef04111 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Tue, 16 Apr 2019 16:24:51 +0200 Subject: [PATCH 069/634] Allow refs to start with a digit E.g. we want to accept "19.03" as a ref. --- src/libexpr/primops/flakeref.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/libexpr/primops/flakeref.cc b/src/libexpr/primops/flakeref.cc index 4127e63cc..973987469 100644 --- a/src/libexpr/primops/flakeref.cc +++ b/src/libexpr/primops/flakeref.cc @@ -5,7 +5,7 @@ namespace nix { // A Git ref (i.e. branch or tag name). -const static std::string refRegex = "[a-zA-Z][a-zA-Z0-9_.-]*"; // FIXME: check +const static std::string refRegex = "[a-zA-Z0-9][a-zA-Z0-9_.-]*"; // FIXME: check // A Git revision (a SHA-1 commit hash). const static std::string revRegexS = "[0-9a-fA-F]{40}"; From 939bee06cd7c68af1508fab127202689fc63c22e Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Tue, 16 Apr 2019 16:29:44 +0200 Subject: [PATCH 070/634] Pass a flake to itself as "self" --- flake.nix | 10 ++++++---- src/libexpr/primops/flake.cc | 2 ++ 2 files changed, 8 insertions(+), 4 deletions(-) diff --git a/flake.nix b/flake.nix index 695f67fa4..95ec5d952 100644 --- a/flake.nix +++ b/flake.nix @@ -3,13 +3,15 @@ description = "The purely functional package manager"; - requires = [ flake:nixpkgs ]; + epoch = 2019; - provides = flakes: rec { + requires = [ "nixpkgs" ]; + + provides = deps: rec { hydraJobs = import ./release.nix { - nix = flakes.nix; # => flakes.self? - nixpkgs = flakes.nixpkgs; + nix = deps.self; + nixpkgs = deps.nixpkgs; }; packages.nix = hydraJobs.build.x86_64-linux; diff --git a/src/libexpr/primops/flake.cc b/src/libexpr/primops/flake.cc index 193b521a3..296db3f92 100644 --- a/src/libexpr/primops/flake.cc +++ b/src/libexpr/primops/flake.cc @@ -441,6 +441,8 @@ void callFlake(EvalState & state, const Dependencies & flake, Value & v) auto vProvides = state.allocAttr(v, state.symbols.create("provides")); mkApp(*vProvides, *flake.flake.vProvides, v); + v.attrs->push_back(Attr(state.symbols.create("self"), &v)); + v.attrs->sort(); } From b42ba08fc8a291c549c1f9f92457d72639fac995 Mon Sep 17 00:00:00 2001 From: Nick Van den Broeck Date: Thu, 21 Mar 2019 09:30:16 +0100 Subject: [PATCH 071/634] Add command `flake clone` --- src/libexpr/primops/flake.cc | 43 +++++++++++++++++++++++++++++---- src/libexpr/primops/flake.hh | 3 +++ src/libexpr/primops/flakeref.hh | 1 + src/libutil/util.cc | 7 +++++- src/nix/flake.cc | 29 ++++++++++++++++++++++ 5 files changed, 77 insertions(+), 6 deletions(-) diff --git a/src/libexpr/primops/flake.cc b/src/libexpr/primops/flake.cc index 296db3f92..0e4b8afee 100644 --- a/src/libexpr/primops/flake.cc +++ b/src/libexpr/primops/flake.cc @@ -146,17 +146,19 @@ std::shared_ptr getFlagRegistry() return std::make_shared(); } -const std::vector> EvalState::getFlakeRegistries() +// This always returns a vector with globalReg, userReg, localReg, flakeReg. +// If one of them doesn't exist, the registry is left empty but does exist. +const Registries EvalState::getFlakeRegistries() { - std::vector> registries; - registries.push_back(getGlobalRegistry()); + Registries registries; + registries.push_back(getGlobalRegistry()); // TODO (Nick): Doesn't this break immutability? registries.push_back(getUserRegistry()); + registries.push_back(std::make_shared()); // local registries.push_back(getFlagRegistry()); return registries; } -static FlakeRef lookupFlake(EvalState & state, const FlakeRef & flakeRef, - const std::vector> & registries, +static FlakeRef lookupFlake(EvalState & state, const FlakeRef & flakeRef, const Registries & registries, std::vector pastSearches = {}) { if (registries.empty() && !flakeRef.isDirect()) @@ -462,4 +464,35 @@ static void prim_getFlake(EvalState & state, const Pos & pos, Value * * args, Va static RegisterPrimOp r2("getFlake", 1, prim_getFlake); +void gitCloneFlake (std::string flakeUri, EvalState & state, Registries registries, + Path endDirectory) +{ + FlakeRef flakeRef(flakeUri); + flakeRef = lookupFlake(state, flakeRef, registries); + + std::string uri; + + Strings args = {"clone"}; + + if (auto refData = std::get_if(&flakeRef.data)) { + uri = "git@github.com:" + refData->owner + "/" + refData->repo + ".git"; + args.push_back(uri); + if (flakeRef.ref) { + args.push_back("--branch"); + args.push_back(*flakeRef.ref); + } + } else if (auto refData = std::get_if(&flakeRef.data)) { + args.push_back(refData->uri); + if (flakeRef.ref) { + args.push_back("--branch"); + args.push_back(*flakeRef.ref); + } + } + + if (endDirectory != "") + args.push_back(endDirectory); + + runProgram("git", true, args); +} + } diff --git a/src/libexpr/primops/flake.hh b/src/libexpr/primops/flake.hh index 85f4fdf9f..76219fbd6 100644 --- a/src/libexpr/primops/flake.hh +++ b/src/libexpr/primops/flake.hh @@ -27,6 +27,8 @@ struct LockFile std::map nonFlakeEntries; }; +typedef std::vector> Registries; + Path getUserRegistryPath(); enum RegistryAccess { DisallowRegistry, AllowRegistry, AllowRegistryAtTop }; @@ -86,4 +88,5 @@ Dependencies resolveFlake(EvalState &, const FlakeRef &, RegistryAccess registry void updateLockFile(EvalState &, const Path & path); +void gitCloneFlake (std::string flakeUri, EvalState &, Registries, Path); } diff --git a/src/libexpr/primops/flakeref.hh b/src/libexpr/primops/flakeref.hh index d789a6f70..cf9d7a1a6 100644 --- a/src/libexpr/primops/flakeref.hh +++ b/src/libexpr/primops/flakeref.hh @@ -67,6 +67,7 @@ namespace nix { https://example.org/my/repo.git https://example.org/my/repo.git?ref=release-1.2.3 https://example.org/my/repo.git?rev=e72daba8250068216d79d2aeef40d4d95aff6666 + git://github.com/edolstra/dwarffs.git\?ref=flake\&rev=2efca4bc9da70fb001b26c3dc858c6397d3c4817 * /path.git(\?attr(&attr)*)? diff --git a/src/libutil/util.cc b/src/libutil/util.cc index b0a2b853e..f4f86c5c8 100644 --- a/src/libutil/util.cc +++ b/src/libutil/util.cc @@ -962,12 +962,14 @@ std::vector stringsToCharPtrs(const Strings & ss) return res; } - +// Output = "standard out" output stream string runProgram(Path program, bool searchPath, const Strings & args, const std::optional & input) { RunOptions opts(program, args); opts.searchPath = searchPath; + // This allows you to refer to a program with a pathname relative to the + // PATH variable. opts.input = input; auto res = runProgram(opts); @@ -978,6 +980,7 @@ string runProgram(Path program, bool searchPath, const Strings & args, return res.second; } +// Output = error code + "standard out" output stream std::pair runProgram(const RunOptions & options_) { RunOptions options(options_); @@ -1028,6 +1031,8 @@ void runProgram2(const RunOptions & options) if (options.searchPath) execvp(options.program.c_str(), stringsToCharPtrs(args_).data()); + // This allows you to refer to a program with a pathname relative + // to the PATH variable. else execv(options.program.c_str(), stringsToCharPtrs(args_).data()); diff --git a/src/nix/flake.cc b/src/nix/flake.cc index 4b8f1026e..35324295d 100644 --- a/src/nix/flake.cc +++ b/src/nix/flake.cc @@ -280,6 +280,34 @@ struct CmdFlakeInit : virtual Args, Command } }; +struct CmdFlakeClone : StoreCommand, FlakeCommand, MixEvalArgs +{ + Path endDirectory = ""; + + std::string name() override + { + return "clone"; + } + + std::string description() override + { + return "clone flake repository"; + } + + CmdFlakeClone() + { + expectArg("end-dir", &endDirectory, true); + } + + void run(nix::ref store) override + { + auto evalState = std::make_shared(searchPath, store); + + Registries registries = evalState->getFlakeRegistries(); + gitCloneFlake(flakeUri, *evalState, registries, endDirectory); + } +}; + struct CmdFlake : virtual MultiCommand, virtual Command { CmdFlake() @@ -291,6 +319,7 @@ struct CmdFlake : virtual MultiCommand, virtual Command , make_ref() , make_ref() , make_ref() + , make_ref() }) { } From 160b974fb0623df436c9e834f6d4db62dfda02d2 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Wed, 17 Apr 2019 13:54:06 +0200 Subject: [PATCH 072/634] Fix mutability check --- src/libexpr/primops/flake.cc | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/src/libexpr/primops/flake.cc b/src/libexpr/primops/flake.cc index 296db3f92..0bf666a98 100644 --- a/src/libexpr/primops/flake.cc +++ b/src/libexpr/primops/flake.cc @@ -193,10 +193,11 @@ static FlakeSourceInfo fetchFlake(EvalState & state, const FlakeRef flakeRef, bo FlakeRef fRef = lookupFlake(state, flakeRef, impureIsAllowed ? state.getFlakeRegistries() : std::vector>()); + if (evalSettings.pureEval && !impureIsAllowed && !fRef.isImmutable()) + throw Error("requested to fetch mutable flake '%s' in pure mode", fRef.to_string()); + // This only downloads only one revision of the repo, not the entire history. if (auto refData = std::get_if(&fRef.data)) { - if (evalSettings.pureEval && !impureIsAllowed && !fRef.isImmutable()) - throw Error("requested to fetch FlakeRef '%s' purely, which is mutable", fRef.to_string()); // FIXME: use regular /archive URLs instead? api.github.com // might have stricter rate limits. From 6e4210d8ce76f52d9fd717660ea24b98ba780843 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Fri, 19 Apr 2019 10:58:08 +0200 Subject: [PATCH 073/634] Fix assertion failure --- src/libexpr/primops/flake.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/libexpr/primops/flake.cc b/src/libexpr/primops/flake.cc index 0bf666a98..2415faf5e 100644 --- a/src/libexpr/primops/flake.cc +++ b/src/libexpr/primops/flake.cc @@ -415,7 +415,7 @@ void callFlake(EvalState & state, const Dependencies & flake, Value & v) // Construct the resulting attrset '{description, provides, // ...}'. This attrset is passed lazily as an argument to 'provides'. - state.mkAttrs(v, flake.flakeDeps.size() + flake.nonFlakeDeps.size() + 4); + state.mkAttrs(v, flake.flakeDeps.size() + flake.nonFlakeDeps.size() + 8); for (auto & dep : flake.flakeDeps) { auto vFlake = state.allocAttr(v, dep.flake.id); From 46cb15df9b3501ca631779fa7d5c6299c1c17b53 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Fri, 19 Apr 2019 11:16:14 +0200 Subject: [PATCH 074/634] Fix assertion failure in FlakeRef::to_string() --- src/libexpr/primops/fetchGit.cc | 3 ++- src/libexpr/primops/fetchGit.hh | 1 + src/libexpr/primops/flake.cc | 2 +- src/libexpr/primops/flakeref.cc | 2 +- 4 files changed, 5 insertions(+), 3 deletions(-) diff --git a/src/libexpr/primops/fetchGit.cc b/src/libexpr/primops/fetchGit.cc index 391308224..40975d8d8 100644 --- a/src/libexpr/primops/fetchGit.cc +++ b/src/libexpr/primops/fetchGit.cc @@ -39,6 +39,7 @@ GitInfo exportGit(ref store, const std::string & uri, files. */ GitInfo gitInfo; + gitInfo.ref = "HEAD"; gitInfo.rev = "0000000000000000000000000000000000000000"; gitInfo.shortRev = std::string(gitInfo.rev, 0, 7); @@ -67,7 +68,6 @@ GitInfo exportGit(ref store, const std::string & uri, // clean working tree, but no ref or rev specified. Use 'HEAD'. rev = chomp(runProgram("git", true, { "-C", uri, "rev-parse", "HEAD" })); - ref = "HEAD"s; } if (!ref) ref = "HEAD"s; @@ -127,6 +127,7 @@ GitInfo exportGit(ref store, const std::string & uri, // FIXME: check whether rev is an ancestor of ref. GitInfo gitInfo; + gitInfo.ref = *ref; gitInfo.rev = rev != "" ? rev : chomp(readFile(localRefFile)); gitInfo.shortRev = std::string(gitInfo.rev, 0, 7); diff --git a/src/libexpr/primops/fetchGit.hh b/src/libexpr/primops/fetchGit.hh index 60c439426..5937bdcc0 100644 --- a/src/libexpr/primops/fetchGit.hh +++ b/src/libexpr/primops/fetchGit.hh @@ -9,6 +9,7 @@ namespace nix { struct GitInfo { Path storePath; + std::string ref; std::string rev; std::string shortRev; std::optional revCount; diff --git a/src/libexpr/primops/flake.cc b/src/libexpr/primops/flake.cc index 2415faf5e..1149efaac 100644 --- a/src/libexpr/primops/flake.cc +++ b/src/libexpr/primops/flake.cc @@ -237,8 +237,8 @@ static FlakeSourceInfo fetchFlake(EvalState & state, const FlakeRef flakeRef, bo info.storePath = gitInfo.storePath; info.rev = Hash(gitInfo.rev, htSHA1); info.revCount = gitInfo.revCount; + info.flakeRef.ref = gitInfo.ref; info.flakeRef.rev = info.rev; - // FIXME: ensure info.flakeRef.ref is set. return info; } diff --git a/src/libexpr/primops/flakeref.cc b/src/libexpr/primops/flakeref.cc index 973987469..97f31377a 100644 --- a/src/libexpr/primops/flakeref.cc +++ b/src/libexpr/primops/flakeref.cc @@ -128,7 +128,7 @@ std::string FlakeRef::to_string() const } else if (auto refData = std::get_if(&data)) { - assert(ref || !rev); + assert(!rev || ref); string = refData->uri; } From 6960ee929dcf95c24e0db761fd4bc46c3749abb2 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Fri, 19 Apr 2019 11:34:23 +0200 Subject: [PATCH 075/634] Clean up exportGit argument handling --- src/libexpr/primops/fetchGit.cc | 42 ++++++++++++++++----------------- src/libexpr/primops/fetchGit.hh | 6 ++--- src/libexpr/primops/flake.cc | 10 ++++---- src/libutil/hash.hh | 12 ++++++++++ 4 files changed, 40 insertions(+), 30 deletions(-) diff --git a/src/libexpr/primops/fetchGit.cc b/src/libexpr/primops/fetchGit.cc index 40975d8d8..3a6830cb7 100644 --- a/src/libexpr/primops/fetchGit.cc +++ b/src/libexpr/primops/fetchGit.cc @@ -19,10 +19,13 @@ namespace nix { extern std::regex revRegex; GitInfo exportGit(ref store, const std::string & uri, - std::optional ref, std::string rev, + std::optional ref, + std::optional rev, const std::string & name) { - if (!ref && rev == "" && hasPrefix(uri, "/") && pathExists(uri + "/.git")) { + assert(!rev || rev->type == htSHA1); + + if (!ref && !rev && hasPrefix(uri, "/") && pathExists(uri + "/.git")) { bool clean = true; @@ -40,8 +43,6 @@ GitInfo exportGit(ref store, const std::string & uri, GitInfo gitInfo; gitInfo.ref = "HEAD"; - gitInfo.rev = "0000000000000000000000000000000000000000"; - gitInfo.shortRev = std::string(gitInfo.rev, 0, 7); auto files = tokenizeString>( runProgram("git", true, { "-C", uri, "ls-files", "-z" }), "\0"s); @@ -67,14 +68,11 @@ GitInfo exportGit(ref store, const std::string & uri, } // clean working tree, but no ref or rev specified. Use 'HEAD'. - rev = chomp(runProgram("git", true, { "-C", uri, "rev-parse", "HEAD" })); + rev = Hash(chomp(runProgram("git", true, { "-C", uri, "rev-parse", "HEAD" })), htSHA1); } if (!ref) ref = "HEAD"s; - if (rev != "" && !std::regex_match(rev, revRegex)) - throw Error("invalid Git revision '%s'", rev); - deletePath(getCacheDir() + "/nix/git"); Path cacheDir = getCacheDir() + "/nix/gitv2/" + hashString(htSHA256, uri).to_string(Base32, false); @@ -90,9 +88,9 @@ GitInfo exportGit(ref store, const std::string & uri, time_t now = time(0); /* If a rev was specified, we need to fetch if it's not in the repo. */ - if (rev != "") { + if (rev) { try { - runProgram("git", true, { "-C", cacheDir, "cat-file", "-e", rev }); + runProgram("git", true, { "-C", cacheDir, "cat-file", "-e", rev->gitRev() }); doFetch = false; } catch (ExecError & e) { if (WIFEXITED(e.status)) { @@ -128,19 +126,19 @@ GitInfo exportGit(ref store, const std::string & uri, // FIXME: check whether rev is an ancestor of ref. GitInfo gitInfo; gitInfo.ref = *ref; - gitInfo.rev = rev != "" ? rev : chomp(readFile(localRefFile)); - gitInfo.shortRev = std::string(gitInfo.rev, 0, 7); + gitInfo.rev = rev ? *rev : Hash(chomp(readFile(localRefFile)), htSHA1); printTalkative("using revision %s of repo '%s'", gitInfo.rev, uri); - std::string storeLinkName = hashString(htSHA512, name + std::string("\0"s) + gitInfo.rev).to_string(Base32, false); + std::string storeLinkName = hashString(htSHA512, + name + std::string("\0"s) + gitInfo.rev.gitRev()).to_string(Base32, false); Path storeLink = cacheDir + "/" + storeLinkName + ".link"; PathLocks storeLinkLock({storeLink}, fmt("waiting for lock on '%1%'...", storeLink)); // FIXME: broken try { auto json = nlohmann::json::parse(readFile(storeLink)); - assert(json["name"] == name && json["rev"] == gitInfo.rev); + assert(json["name"] == name && Hash((std::string) json["rev"], htSHA1) == gitInfo.rev); gitInfo.storePath = json["storePath"]; @@ -155,7 +153,7 @@ GitInfo exportGit(ref store, const std::string & uri, // FIXME: should pipe this, or find some better way to extract a // revision. - auto tar = runProgram("git", true, { "-C", cacheDir, "archive", gitInfo.rev }); + auto tar = runProgram("git", true, { "-C", cacheDir, "archive", gitInfo.rev.gitRev() }); Path tmpDir = createTempDir(); AutoDelete delTmpDir(tmpDir, true); @@ -164,13 +162,13 @@ GitInfo exportGit(ref store, const std::string & uri, gitInfo.storePath = store->addToStore(name, tmpDir); - gitInfo.revCount = std::stoull(runProgram("git", true, { "-C", cacheDir, "rev-list", "--count", gitInfo.rev })); + gitInfo.revCount = std::stoull(runProgram("git", true, { "-C", cacheDir, "rev-list", "--count", gitInfo.rev.gitRev() })); nlohmann::json json; json["storePath"] = gitInfo.storePath; json["uri"] = uri; json["name"] = name; - json["rev"] = gitInfo.rev; + json["rev"] = gitInfo.rev.gitRev(); json["revCount"] = *gitInfo.revCount; writeFile(storeLink, json.dump()); @@ -182,7 +180,7 @@ static void prim_fetchGit(EvalState & state, const Pos & pos, Value * * args, Va { std::string url; std::optional ref; - std::string rev; + std::optional rev; std::string name = "source"; PathSet context; @@ -199,7 +197,7 @@ static void prim_fetchGit(EvalState & state, const Pos & pos, Value * * args, Va else if (n == "ref") ref = state.forceStringNoCtx(*attr.value, *attr.pos); else if (n == "rev") - rev = state.forceStringNoCtx(*attr.value, *attr.pos); + rev = Hash(state.forceStringNoCtx(*attr.value, *attr.pos), htSHA1); else if (n == "name") name = state.forceStringNoCtx(*attr.value, *attr.pos); else @@ -216,15 +214,15 @@ static void prim_fetchGit(EvalState & state, const Pos & pos, Value * * args, Va // whitelist. Ah well. state.checkURI(url); - if (evalSettings.pureEval && rev == "") + if (evalSettings.pureEval && !rev) throw Error("in pure evaluation mode, 'fetchGit' requires a Git revision"); auto gitInfo = exportGit(state.store, url, ref, rev, name); state.mkAttrs(v, 8); mkString(*state.allocAttr(v, state.sOutPath), gitInfo.storePath, PathSet({gitInfo.storePath})); - mkString(*state.allocAttr(v, state.symbols.create("rev")), gitInfo.rev); - mkString(*state.allocAttr(v, state.symbols.create("shortRev")), gitInfo.shortRev); + mkString(*state.allocAttr(v, state.symbols.create("rev")), gitInfo.rev.gitRev()); + mkString(*state.allocAttr(v, state.symbols.create("shortRev")), gitInfo.rev.gitShortRev()); mkInt(*state.allocAttr(v, state.symbols.create("revCount")), gitInfo.revCount.value_or(0)); v.attrs->sort(); diff --git a/src/libexpr/primops/fetchGit.hh b/src/libexpr/primops/fetchGit.hh index 5937bdcc0..a867f38f6 100644 --- a/src/libexpr/primops/fetchGit.hh +++ b/src/libexpr/primops/fetchGit.hh @@ -10,13 +10,13 @@ struct GitInfo { Path storePath; std::string ref; - std::string rev; - std::string shortRev; + Hash rev{htSHA1}; std::optional revCount; }; GitInfo exportGit(ref store, const std::string & uri, - std::optional ref, std::string rev, + std::optional ref, + std::optional rev, const std::string & name); } diff --git a/src/libexpr/primops/flake.cc b/src/libexpr/primops/flake.cc index 1149efaac..c5e646412 100644 --- a/src/libexpr/primops/flake.cc +++ b/src/libexpr/primops/flake.cc @@ -231,11 +231,10 @@ static FlakeSourceInfo fetchFlake(EvalState & state, const FlakeRef flakeRef, bo // This downloads the entire git history else if (auto refData = std::get_if(&fRef.data)) { - auto gitInfo = exportGit(state.store, refData->uri, fRef.ref, - fRef.rev ? fRef.rev->to_string(Base16, false) : "", "source"); + auto gitInfo = exportGit(state.store, refData->uri, fRef.ref, fRef.rev, "source"); FlakeSourceInfo info(fRef); info.storePath = gitInfo.storePath; - info.rev = Hash(gitInfo.rev, htSHA1); + info.rev = gitInfo.rev; info.revCount = gitInfo.revCount; info.flakeRef.ref = gitInfo.ref; info.flakeRef.rev = info.rev; @@ -245,11 +244,12 @@ static FlakeSourceInfo fetchFlake(EvalState & state, const FlakeRef flakeRef, bo else if (auto refData = std::get_if(&fRef.data)) { if (!pathExists(refData->path + "/.git")) throw Error("flake '%s' does not reference a Git repository", refData->path); - auto gitInfo = exportGit(state.store, refData->path, {}, "", "source"); + auto gitInfo = exportGit(state.store, refData->path, {}, {}, "source"); FlakeSourceInfo info(fRef); info.storePath = gitInfo.storePath; - info.rev = Hash(gitInfo.rev, htSHA1); + info.rev = gitInfo.rev; info.revCount = gitInfo.revCount; + info.flakeRef.ref = gitInfo.ref; info.flakeRef.rev = info.rev; return info; } diff --git a/src/libutil/hash.hh b/src/libutil/hash.hh index 2dbc3b630..edede8ace 100644 --- a/src/libutil/hash.hh +++ b/src/libutil/hash.hh @@ -80,6 +80,18 @@ struct Hash or base-64. By default, this is prefixed by the hash type (e.g. "sha256:"). */ std::string to_string(Base base = Base32, bool includeType = true) const; + + std::string gitRev() const + { + assert(type == htSHA1); + return to_string(Base16, false); + } + + std::string gitShortRev() const + { + assert(type == htSHA1); + return std::string(to_string(Base16, false), 0, 7); + } }; From 160ce18a0e9f569f94e6b0cb8e47bd4008a9fea2 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Fri, 19 Apr 2019 11:43:56 +0200 Subject: [PATCH 076/634] Improve missing flake.nix error message --- src/libexpr/primops/flake.cc | 18 +++++++++++------- src/libexpr/primops/flakeref.cc | 6 ++++++ src/libexpr/primops/flakeref.hh | 5 +++++ src/nix/flake.cc | 4 ++-- 4 files changed, 24 insertions(+), 9 deletions(-) diff --git a/src/libexpr/primops/flake.cc b/src/libexpr/primops/flake.cc index c5e646412..720e157c6 100644 --- a/src/libexpr/primops/flake.cc +++ b/src/libexpr/primops/flake.cc @@ -48,7 +48,7 @@ LockFile::FlakeEntry readFlakeEntry(nlohmann::json json) { FlakeRef flakeRef(json["uri"]); if (!flakeRef.isImmutable()) - throw Error("requested to fetch FlakeRef '%s' purely, which is mutable", flakeRef.to_string()); + throw Error("requested to fetch FlakeRef '%s' purely, which is mutable", flakeRef); LockFile::FlakeEntry entry(flakeRef); @@ -57,7 +57,7 @@ LockFile::FlakeEntry readFlakeEntry(nlohmann::json json) for (auto i = nonFlakeRequires.begin(); i != nonFlakeRequires.end(); ++i) { FlakeRef flakeRef(i->value("uri", "")); if (!flakeRef.isImmutable()) - throw Error("requested to fetch FlakeRef '%s' purely, which is mutable", flakeRef.to_string()); + throw Error("requested to fetch FlakeRef '%s' purely, which is mutable", flakeRef); entry.nonFlakeEntries.insert_or_assign(i.key(), flakeRef); } @@ -87,7 +87,7 @@ LockFile readLockFile(const Path & path) for (auto i = nonFlakeRequires.begin(); i != nonFlakeRequires.end(); ++i) { FlakeRef flakeRef(i->value("uri", "")); if (!flakeRef.isImmutable()) - throw Error("requested to fetch FlakeRef '%s' purely, which is mutable", flakeRef.to_string()); + throw Error("requested to fetch FlakeRef '%s' purely, which is mutable", flakeRef); lockFile.nonFlakeEntries.insert_or_assign(i.key(), flakeRef); } @@ -160,7 +160,7 @@ static FlakeRef lookupFlake(EvalState & state, const FlakeRef & flakeRef, std::vector pastSearches = {}) { if (registries.empty() && !flakeRef.isDirect()) - throw Error("indirect flake reference '%s' is not allowed", flakeRef.to_string()); + throw Error("indirect flake reference '%s' is not allowed", flakeRef); for (std::shared_ptr registry : registries) { auto i = registry->entries.find(flakeRef); @@ -183,7 +183,7 @@ static FlakeRef lookupFlake(EvalState & state, const FlakeRef & flakeRef, } if (!flakeRef.isDirect()) - throw Error("could not resolve flake reference '%s'", flakeRef.to_string()); + throw Error("could not resolve flake reference '%s'", flakeRef); return flakeRef; } @@ -194,7 +194,7 @@ static FlakeSourceInfo fetchFlake(EvalState & state, const FlakeRef flakeRef, bo impureIsAllowed ? state.getFlakeRegistries() : std::vector>()); if (evalSettings.pureEval && !impureIsAllowed && !fRef.isImmutable()) - throw Error("requested to fetch mutable flake '%s' in pure mode", fRef.to_string()); + throw Error("requested to fetch mutable flake '%s' in pure mode", fRef); // This only downloads only one revision of the repo, not the entire history. if (auto refData = std::get_if(&fRef.data)) { @@ -277,8 +277,12 @@ Flake getFlake(EvalState & state, const FlakeRef & flakeRef, bool impureIsAllowe + "/" + flake.sourceInfo.rev->to_string(Base16, false)); } + Path flakeFile = sourceInfo.storePath + "/flake.nix"; + if (!pathExists(flakeFile)) + throw Error("source tree referenced by '%s' does not contain a 'flake.nix' file", flakeRef); + Value vInfo; - state.evalFile(sourceInfo.storePath + "/flake.nix", vInfo); // FIXME: symlink attack + state.evalFile(flakeFile, vInfo); // FIXME: symlink attack state.forceAttrs(vInfo); diff --git a/src/libexpr/primops/flakeref.cc b/src/libexpr/primops/flakeref.cc index 97f31377a..b91bbee2a 100644 --- a/src/libexpr/primops/flakeref.cc +++ b/src/libexpr/primops/flakeref.cc @@ -142,6 +142,12 @@ std::string FlakeRef::to_string() const return string; } +std::ostream & operator << (std::ostream & str, const FlakeRef & flakeRef) +{ + str << flakeRef.to_string(); + return str; +} + bool FlakeRef::isImmutable() const { return (bool) rev; diff --git a/src/libexpr/primops/flakeref.hh b/src/libexpr/primops/flakeref.hh index d789a6f70..e599e2feb 100644 --- a/src/libexpr/primops/flakeref.hh +++ b/src/libexpr/primops/flakeref.hh @@ -1,3 +1,5 @@ +#pragma once + #include "types.hh" #include "hash.hh" @@ -173,4 +175,7 @@ struct FlakeRef FlakeRef baseRef() const; }; + +std::ostream & operator << (std::ostream & str, const FlakeRef & flakeRef); + } diff --git a/src/nix/flake.cc b/src/nix/flake.cc index 4b8f1026e..34d67ee58 100644 --- a/src/nix/flake.cc +++ b/src/nix/flake.cc @@ -32,7 +32,7 @@ struct CmdFlakeList : StoreCommand, MixEvalArgs for (auto & registry : registries) for (auto & entry : registry->entries) - std::cout << entry.first.to_string() << " " << entry.second.to_string() << "\n"; + std::cout << entry.first << " " << entry.second << "\n"; } }; @@ -48,7 +48,7 @@ void printFlakeInfo(Flake & flake, bool json) { std::cout << j.dump(4) << std::endl; } else { std::cout << "ID: " << flake.id << "\n"; - std::cout << "URI: " << flake.sourceInfo.flakeRef.to_string() << "\n"; + std::cout << "URI: " << flake.sourceInfo.flakeRef << "\n"; std::cout << "Description: " << flake.description << "\n"; if (flake.sourceInfo.rev) std::cout << "Revision: " << flake.sourceInfo.rev->to_string(Base16, false) << "\n"; From 0cbda84f5b14aba0416cb65f88f8e9d487895207 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Fri, 19 Apr 2019 14:06:27 +0200 Subject: [PATCH 077/634] exportGit: Don't clone local repositories This ensures that commands like 'nix flake info /my/nixpkgs' don't copy a gigabyte of crap to ~/.cache/nix. Fixes #60. --- src/libexpr/primops/fetchGit.cc | 129 +++++++++++++++--------- src/libexpr/primops/fetchGit.hh | 2 +- src/libstore/http-binary-cache-store.cc | 5 +- tests/binary-cache.sh | 4 +- tests/fetchGit.sh | 2 + 5 files changed, 88 insertions(+), 54 deletions(-) diff --git a/src/libexpr/primops/fetchGit.cc b/src/libexpr/primops/fetchGit.cc index 3a6830cb7..eb95208de 100644 --- a/src/libexpr/primops/fetchGit.cc +++ b/src/libexpr/primops/fetchGit.cc @@ -18,14 +18,19 @@ namespace nix { extern std::regex revRegex; -GitInfo exportGit(ref store, const std::string & uri, +GitInfo exportGit(ref store, std::string uri, std::optional ref, std::optional rev, const std::string & name) { assert(!rev || rev->type == htSHA1); - if (!ref && !rev && hasPrefix(uri, "/") && pathExists(uri + "/.git")) { + bool isLocal = hasPrefix(uri, "/") && pathExists(uri + "/.git"); + + // If this is a local directory (but not a file:// URI) and no ref + // or revision is given, then allow the use of an unclean working + // tree. + if (!ref && !rev && isLocal) { bool clean = true; @@ -66,67 +71,92 @@ GitInfo exportGit(ref store, const std::string & uri, return gitInfo; } - - // clean working tree, but no ref or rev specified. Use 'HEAD'. - rev = Hash(chomp(runProgram("git", true, { "-C", uri, "rev-parse", "HEAD" })), htSHA1); } - if (!ref) ref = "HEAD"s; + if (!ref) ref = isLocal ? "HEAD" : "master"; + + // Don't clone file:// URIs (but otherwise treat them the same as + // remote URIs, i.e. don't use the working tree or HEAD). + static bool forceHttp = getEnv("_NIX_FORCE_HTTP") == "1"; // for testing + if (!forceHttp && hasPrefix(uri, "file://")) { + uri = std::string(uri, 7); + isLocal = true; + } deletePath(getCacheDir() + "/nix/git"); Path cacheDir = getCacheDir() + "/nix/gitv2/" + hashString(htSHA256, uri).to_string(Base32, false); + Path repoDir; - if (!pathExists(cacheDir)) { - createDirs(dirOf(cacheDir)); - runProgram("git", true, { "init", "--bare", cacheDir }); - } + if (isLocal) { - Path localRefFile = cacheDir + "/refs/heads/" + *ref; + if (!rev) + rev = Hash(chomp(runProgram("git", true, { "-C", uri, "rev-parse", *ref })), htSHA1); + + if (!pathExists(cacheDir)) + createDirs(cacheDir); + + repoDir = uri; - bool doFetch; - time_t now = time(0); - /* If a rev was specified, we need to fetch if it's not in the - repo. */ - if (rev) { - try { - runProgram("git", true, { "-C", cacheDir, "cat-file", "-e", rev->gitRev() }); - doFetch = false; - } catch (ExecError & e) { - if (WIFEXITED(e.status)) { - doFetch = true; - } else { - throw; - } - } } else { - /* If the local ref is older than ‘tarball-ttl’ seconds, do a - git fetch to update the local ref to the remote ref. */ - struct stat st; - doFetch = stat(localRefFile.c_str(), &st) != 0 || - st.st_mtime + settings.tarballTtl <= now; - } - if (doFetch) - { - Activity act(*logger, lvlTalkative, actUnknown, fmt("fetching Git repository '%s'", uri)); - // FIXME: git stderr messes up our progress indicator, so - // we're using --quiet for now. Should process its stderr. - runProgram("git", true, { "-C", cacheDir, "fetch", "--quiet", "--force", "--", uri, fmt("%s:%s", *ref, *ref) }); + repoDir = cacheDir; - struct timeval times[2]; - times[0].tv_sec = now; - times[0].tv_usec = 0; - times[1].tv_sec = now; - times[1].tv_usec = 0; + if (!pathExists(cacheDir)) { + createDirs(dirOf(cacheDir)); + runProgram("git", true, { "init", "--bare", repoDir }); + } - utimes(localRefFile.c_str(), times); + Path localRefFile = repoDir + "/refs/heads/" + *ref; + + bool doFetch; + time_t now = time(0); + + /* If a rev was specified, we need to fetch if it's not in the + repo. */ + if (rev) { + try { + runProgram("git", true, { "-C", repoDir, "cat-file", "-e", rev->gitRev() }); + doFetch = false; + } catch (ExecError & e) { + if (WIFEXITED(e.status)) { + doFetch = true; + } else { + throw; + } + } + } else { + /* If the local ref is older than ‘tarball-ttl’ seconds, do a + git fetch to update the local ref to the remote ref. */ + struct stat st; + doFetch = stat(localRefFile.c_str(), &st) != 0 || + st.st_mtime + settings.tarballTtl <= now; + } + + if (doFetch) { + Activity act(*logger, lvlTalkative, actUnknown, fmt("fetching Git repository '%s'", uri)); + + // FIXME: git stderr messes up our progress indicator, so + // we're using --quiet for now. Should process its stderr. + runProgram("git", true, { "-C", repoDir, "fetch", "--quiet", "--force", "--", uri, fmt("%s:%s", *ref, *ref) }); + + struct timeval times[2]; + times[0].tv_sec = now; + times[0].tv_usec = 0; + times[1].tv_sec = now; + times[1].tv_usec = 0; + + utimes(localRefFile.c_str(), times); + } + + if (!rev) + rev = Hash(chomp(readFile(localRefFile)), htSHA1); } // FIXME: check whether rev is an ancestor of ref. GitInfo gitInfo; gitInfo.ref = *ref; - gitInfo.rev = rev ? *rev : Hash(chomp(readFile(localRefFile)), htSHA1); + gitInfo.rev = *rev; printTalkative("using revision %s of repo '%s'", gitInfo.rev, uri); @@ -140,9 +170,10 @@ GitInfo exportGit(ref store, const std::string & uri, assert(json["name"] == name && Hash((std::string) json["rev"], htSHA1) == gitInfo.rev); - gitInfo.storePath = json["storePath"]; + Path storePath = json["storePath"]; - if (store->isValidPath(gitInfo.storePath)) { + if (store->isValidPath(storePath)) { + gitInfo.storePath = storePath; gitInfo.revCount = json["revCount"]; return gitInfo; } @@ -153,7 +184,7 @@ GitInfo exportGit(ref store, const std::string & uri, // FIXME: should pipe this, or find some better way to extract a // revision. - auto tar = runProgram("git", true, { "-C", cacheDir, "archive", gitInfo.rev.gitRev() }); + auto tar = runProgram("git", true, { "-C", repoDir, "archive", gitInfo.rev.gitRev() }); Path tmpDir = createTempDir(); AutoDelete delTmpDir(tmpDir, true); @@ -162,7 +193,7 @@ GitInfo exportGit(ref store, const std::string & uri, gitInfo.storePath = store->addToStore(name, tmpDir); - gitInfo.revCount = std::stoull(runProgram("git", true, { "-C", cacheDir, "rev-list", "--count", gitInfo.rev.gitRev() })); + gitInfo.revCount = std::stoull(runProgram("git", true, { "-C", repoDir, "rev-list", "--count", gitInfo.rev.gitRev() })); nlohmann::json json; json["storePath"] = gitInfo.storePath; diff --git a/src/libexpr/primops/fetchGit.hh b/src/libexpr/primops/fetchGit.hh index a867f38f6..32e748f98 100644 --- a/src/libexpr/primops/fetchGit.hh +++ b/src/libexpr/primops/fetchGit.hh @@ -14,7 +14,7 @@ struct GitInfo std::optional revCount; }; -GitInfo exportGit(ref store, const std::string & uri, +GitInfo exportGit(ref store, std::string uri, std::optional ref, std::optional rev, const std::string & name); diff --git a/src/libstore/http-binary-cache-store.cc b/src/libstore/http-binary-cache-store.cc index 8da0e2f9d..105e1dcdd 100644 --- a/src/libstore/http-binary-cache-store.cc +++ b/src/libstore/http-binary-cache-store.cc @@ -160,10 +160,11 @@ static RegisterStoreImplementation regStore([]( const std::string & uri, const Store::Params & params) -> std::shared_ptr { + static bool forceHttp = getEnv("_NIX_FORCE_HTTP") == "1"; if (std::string(uri, 0, 7) != "http://" && std::string(uri, 0, 8) != "https://" && - (getEnv("_NIX_FORCE_HTTP_BINARY_CACHE_STORE") != "1" || std::string(uri, 0, 7) != "file://") - ) return 0; + (!forceHttp || std::string(uri, 0, 7) != "file://")) + return 0; auto store = std::make_shared(params, uri); store->init(); return store; diff --git a/tests/binary-cache.sh b/tests/binary-cache.sh index eb58ae7c1..a3c3c7847 100644 --- a/tests/binary-cache.sh +++ b/tests/binary-cache.sh @@ -48,7 +48,7 @@ basicTests # Test HttpBinaryCacheStore. -export _NIX_FORCE_HTTP_BINARY_CACHE_STORE=1 +export _NIX_FORCE_HTTP=1 basicTests @@ -126,7 +126,7 @@ badKey="$(cat $TEST_ROOT/pk2)" res=($(nix-store --generate-binary-cache-key foo.nixos.org-1 $TEST_ROOT/sk3 $TEST_ROOT/pk3)) otherKey="$(cat $TEST_ROOT/pk3)" -_NIX_FORCE_HTTP_BINARY_CACHE_STORE= nix copy --to file://$cacheDir?secret-key=$TEST_ROOT/sk1 $outPath +_NIX_FORCE_HTTP= nix copy --to file://$cacheDir?secret-key=$TEST_ROOT/sk1 $outPath # Downloading should fail if we don't provide a key. diff --git a/tests/fetchGit.sh b/tests/fetchGit.sh index 51fd49e9f..d87ce8560 100644 --- a/tests/fetchGit.sh +++ b/tests/fetchGit.sh @@ -9,6 +9,8 @@ clearStore repo=$TEST_ROOT/git +export _NIX_FORCE_HTTP=1 + rm -rf $repo ${repo}-tmp $TEST_HOME/.cache/nix/gitv2 git init $repo From bc259192b4e1f90c575ddc83814b82cca829a4f8 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Fri, 19 Apr 2019 14:15:51 +0200 Subject: [PATCH 078/634] fetchGit: Return revCount for dirty working trees --- src/libexpr/primops/fetchGit.cc | 5 +++-- src/libexpr/primops/fetchGit.hh | 2 +- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/src/libexpr/primops/fetchGit.cc b/src/libexpr/primops/fetchGit.cc index eb95208de..e79eacafe 100644 --- a/src/libexpr/primops/fetchGit.cc +++ b/src/libexpr/primops/fetchGit.cc @@ -68,6 +68,7 @@ GitInfo exportGit(ref store, std::string uri, }; gitInfo.storePath = store->addToStore("source", uri, true, htSHA256, filter); + gitInfo.revCount = std::stoull(runProgram("git", true, { "-C", uri, "rev-list", "--count", "HEAD" })); return gitInfo; } @@ -200,7 +201,7 @@ GitInfo exportGit(ref store, std::string uri, json["uri"] = uri; json["name"] = name; json["rev"] = gitInfo.rev.gitRev(); - json["revCount"] = *gitInfo.revCount; + json["revCount"] = gitInfo.revCount; writeFile(storeLink, json.dump()); @@ -254,7 +255,7 @@ static void prim_fetchGit(EvalState & state, const Pos & pos, Value * * args, Va mkString(*state.allocAttr(v, state.sOutPath), gitInfo.storePath, PathSet({gitInfo.storePath})); mkString(*state.allocAttr(v, state.symbols.create("rev")), gitInfo.rev.gitRev()); mkString(*state.allocAttr(v, state.symbols.create("shortRev")), gitInfo.rev.gitShortRev()); - mkInt(*state.allocAttr(v, state.symbols.create("revCount")), gitInfo.revCount.value_or(0)); + mkInt(*state.allocAttr(v, state.symbols.create("revCount")), gitInfo.revCount); v.attrs->sort(); if (state.allowedPaths) diff --git a/src/libexpr/primops/fetchGit.hh b/src/libexpr/primops/fetchGit.hh index 32e748f98..2ad6a5e5c 100644 --- a/src/libexpr/primops/fetchGit.hh +++ b/src/libexpr/primops/fetchGit.hh @@ -11,7 +11,7 @@ struct GitInfo Path storePath; std::string ref; Hash rev{htSHA1}; - std::optional revCount; + uint64_t revCount; }; GitInfo exportGit(ref store, std::string uri, From 50ec2bed9edd234eabbd4a3920052ca2f94bca52 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Fri, 19 Apr 2019 14:19:46 +0200 Subject: [PATCH 079/634] nix flake info: Show revcount --- src/nix/flake.cc | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/nix/flake.cc b/src/nix/flake.cc index 34d67ee58..eec280584 100644 --- a/src/nix/flake.cc +++ b/src/nix/flake.cc @@ -44,6 +44,8 @@ void printFlakeInfo(Flake & flake, bool json) { j["description"] = flake.description; if (flake.sourceInfo.rev) j["revision"] = flake.sourceInfo.rev->to_string(Base16, false); + if (flake.sourceInfo.revCount) + j["revCount"] = *flake.sourceInfo.revCount; j["path"] = flake.sourceInfo.storePath; std::cout << j.dump(4) << std::endl; } else { @@ -52,6 +54,8 @@ void printFlakeInfo(Flake & flake, bool json) { std::cout << "Description: " << flake.description << "\n"; if (flake.sourceInfo.rev) std::cout << "Revision: " << flake.sourceInfo.rev->to_string(Base16, false) << "\n"; + if (flake.sourceInfo.revCount) + std::cout << "Revcount: " << *flake.sourceInfo.revCount << "\n"; std::cout << "Path: " << flake.sourceInfo.storePath << "\n"; } } From e51abb6631ff0f5fc52523ea1819333cb587170c Mon Sep 17 00:00:00 2001 From: Nick Van den Broeck Date: Fri, 19 Apr 2019 14:23:35 +0200 Subject: [PATCH 080/634] Changed some names --- src/libexpr/primops/flake.cc | 46 ++++++++++++++++++------------------ src/libexpr/primops/flake.hh | 8 +++---- src/nix/flake.cc | 14 +++++------ 3 files changed, 34 insertions(+), 34 deletions(-) diff --git a/src/libexpr/primops/flake.cc b/src/libexpr/primops/flake.cc index 720e157c6..c098168de 100644 --- a/src/libexpr/primops/flake.cc +++ b/src/libexpr/primops/flake.cc @@ -351,7 +351,7 @@ NonFlake getNonFlake(EvalState & state, const FlakeRef & flakeRef, FlakeAlias al dependencies. FIXME: this should return a graph of flakes. */ -Dependencies resolveFlake(EvalState & state, const FlakeRef & topRef, +ResolvedFlake resolveFlake(EvalState & state, const FlakeRef & topRef, RegistryAccess registryAccess, bool isTopFlake) { Flake flake = getFlake(state, topRef, @@ -362,7 +362,7 @@ Dependencies resolveFlake(EvalState & state, const FlakeRef & topRef, if (isTopFlake) lockFile = readLockFile(flake.sourceInfo.storePath + "/flake.lock"); // FIXME: symlink attack - Dependencies deps(flake); + ResolvedFlake deps(flake); for (auto & nonFlakeInfo : flake.nonFlakeRequires) deps.nonFlakeDeps.push_back(getNonFlake(state, nonFlakeInfo.second, nonFlakeInfo.first)); @@ -377,14 +377,14 @@ Dependencies resolveFlake(EvalState & state, const FlakeRef & topRef, return deps; } -LockFile::FlakeEntry dependenciesToFlakeEntry(const Dependencies & deps) +LockFile::FlakeEntry dependenciesToFlakeEntry(const ResolvedFlake & resolvedFlake) { - LockFile::FlakeEntry entry(deps.flake.sourceInfo.flakeRef); + LockFile::FlakeEntry entry(resolvedFlake.flake.sourceInfo.flakeRef); - for (auto & deps : deps.flakeDeps) - entry.flakeEntries.insert_or_assign(deps.flake.id, dependenciesToFlakeEntry(deps)); + for (auto & newResFlake : resolvedFlake.flakeDeps) + entry.flakeEntries.insert_or_assign(newResFlake.flake.id, dependenciesToFlakeEntry(newResFlake)); - for (auto & nonFlake : deps.nonFlakeDeps) + for (auto & nonFlake : resolvedFlake.nonFlakeDeps) entry.nonFlakeEntries.insert_or_assign(nonFlake.alias, nonFlake.ref); return entry; @@ -392,8 +392,8 @@ LockFile::FlakeEntry dependenciesToFlakeEntry(const Dependencies & deps) static LockFile makeLockFile(EvalState & evalState, FlakeRef & flakeRef) { - Dependencies deps = resolveFlake(evalState, flakeRef, AllowRegistry); - LockFile::FlakeEntry entry = dependenciesToFlakeEntry(deps); + ResolvedFlake resFlake = resolveFlake(evalState, flakeRef, AllowRegistry); + LockFile::FlakeEntry entry = dependenciesToFlakeEntry(resFlake); LockFile lockFile; lockFile.flakeEntries = entry.flakeEntries; lockFile.nonFlakeEntries = entry.nonFlakeEntries; @@ -414,37 +414,37 @@ void updateLockFile(EvalState & state, const Path & path) runProgram("git", true, { "-C", path, "add", "flake.lock" }); } -void callFlake(EvalState & state, const Dependencies & flake, Value & v) +void callFlake(EvalState & state, const ResolvedFlake & resFlake, Value & v) { // Construct the resulting attrset '{description, provides, // ...}'. This attrset is passed lazily as an argument to 'provides'. - state.mkAttrs(v, flake.flakeDeps.size() + flake.nonFlakeDeps.size() + 8); + state.mkAttrs(v, resFlake.flakeDeps.size() + resFlake.nonFlakeDeps.size() + 8); - for (auto & dep : flake.flakeDeps) { - auto vFlake = state.allocAttr(v, dep.flake.id); - callFlake(state, dep, *vFlake); + for (const ResolvedFlake newResFlake : resFlake.flakeDeps) { + auto vFlake = state.allocAttr(v, newResFlake.flake.id); + callFlake(state, newResFlake, *vFlake); } - for (auto & dep : flake.nonFlakeDeps) { - auto vNonFlake = state.allocAttr(v, dep.alias); + for (const NonFlake nonFlake : resFlake.nonFlakeDeps) { + auto vNonFlake = state.allocAttr(v, nonFlake.alias); state.mkAttrs(*vNonFlake, 4); - state.store->isValidPath(dep.path); - mkString(*state.allocAttr(*vNonFlake, state.sOutPath), dep.path, {dep.path}); + state.store->isValidPath(nonFlake.path); + mkString(*state.allocAttr(*vNonFlake, state.sOutPath), nonFlake.path, {nonFlake.path}); } - mkString(*state.allocAttr(v, state.sDescription), flake.flake.description); + mkString(*state.allocAttr(v, state.sDescription), resFlake.flake.description); - auto & path = flake.flake.sourceInfo.storePath; + auto & path = resFlake.flake.sourceInfo.storePath; state.store->isValidPath(path); mkString(*state.allocAttr(v, state.sOutPath), path, {path}); - if (flake.flake.sourceInfo.revCount) - mkInt(*state.allocAttr(v, state.symbols.create("revCount")), *flake.flake.sourceInfo.revCount); + if (resFlake.flake.sourceInfo.revCount) + mkInt(*state.allocAttr(v, state.symbols.create("revCount")), *resFlake.flake.sourceInfo.revCount); auto vProvides = state.allocAttr(v, state.symbols.create("provides")); - mkApp(*vProvides, *flake.flake.vProvides, v); + mkApp(*vProvides, *resFlake.flake.vProvides, v); v.attrs->push_back(Attr(state.symbols.create("self"), &v)); diff --git a/src/libexpr/primops/flake.hh b/src/libexpr/primops/flake.hh index 85f4fdf9f..0b70088cc 100644 --- a/src/libexpr/primops/flake.hh +++ b/src/libexpr/primops/flake.hh @@ -74,15 +74,15 @@ std::shared_ptr getGlobalRegistry(); Flake getFlake(EvalState &, const FlakeRef &, bool impureIsAllowed); -struct Dependencies +struct ResolvedFlake { Flake flake; - std::vector flakeDeps; // The flake dependencies + std::vector flakeDeps; // The flake dependencies std::vector nonFlakeDeps; - Dependencies(const Flake & flake) : flake(flake) {} + ResolvedFlake(const Flake & flake) : flake(flake) {} }; -Dependencies resolveFlake(EvalState &, const FlakeRef &, RegistryAccess registryAccess, bool isTopFlake = true); +ResolvedFlake resolveFlake(EvalState &, const FlakeRef &, RegistryAccess registryAccess, bool isTopFlake = true); void updateLockFile(EvalState &, const Path & path); diff --git a/src/nix/flake.cc b/src/nix/flake.cc index 34d67ee58..07e9e313a 100644 --- a/src/nix/flake.cc +++ b/src/nix/flake.cc @@ -86,20 +86,20 @@ struct CmdFlakeDeps : FlakeCommand, MixJSON, StoreCommand, MixEvalArgs FlakeRef flakeRef(flakeUri); - Dependencies deps = resolveFlake(*evalState, flakeRef, AllowRegistryAtTop); + ResolvedFlake resFlake = resolveFlake(*evalState, flakeRef, AllowRegistryAtTop); - std::queue todo; - todo.push(deps); + std::queue todo; + todo.push(resFlake); while (!todo.empty()) { - deps = todo.front(); + resFlake = todo.front(); todo.pop(); - for (auto & nonFlake : deps.nonFlakeDeps) + for (NonFlake & nonFlake : resFlake.nonFlakeDeps) printNonFlakeInfo(nonFlake, json); - for (auto & newDeps : deps.flakeDeps) - todo.push(newDeps); + for (ResolvedFlake & newResFlake : resFlake.flakeDeps) + todo.push(newResFlake); } } }; From 3392f1b77869269580b58e4931b7a79f44799ce0 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Fri, 19 Apr 2019 14:41:06 +0200 Subject: [PATCH 081/634] Shut up clang warning --- src/nix/main.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/nix/main.cc b/src/nix/main.cc index 01b0866f2..3ec5f48d5 100644 --- a/src/nix/main.cc +++ b/src/nix/main.cc @@ -57,7 +57,7 @@ struct NixArgs : virtual MultiCommand, virtual MixCommonArgs "--help-config' for a list of configuration settings.\n"; } - void printHelp(const string & programName, std::ostream & out) + void printHelp(const string & programName, std::ostream & out) override { MultiCommand::printHelp(programName, out); From cbfdea685764bf66443a999e672656c54289b8c9 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Fri, 19 Apr 2019 14:41:59 +0200 Subject: [PATCH 082/634] fetchGit -> fetchTarball --- release.nix | 2 +- shell.nix | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/release.nix b/release.nix index f52120474..a47ca862f 100644 --- a/release.nix +++ b/release.nix @@ -1,5 +1,5 @@ { nix ? builtins.fetchGit ./. -, nixpkgs ? builtins.fetchGit { url = https://github.com/NixOS/nixpkgs-channels.git; ref = "nixos-19.03"; } +, nixpkgs ? builtins.fetchTarball https://github.com/NixOS/nixpkgs-channels/archive/nixos-19.03.tar.gz , officialRelease ? false , systems ? [ "x86_64-linux" "i686-linux" "x86_64-darwin" "aarch64-linux" ] }: diff --git a/shell.nix b/shell.nix index 73e75fb29..8167f87a2 100644 --- a/shell.nix +++ b/shell.nix @@ -1,6 +1,6 @@ { useClang ? false }: -with import (builtins.fetchGit { url = https://github.com/NixOS/nixpkgs-channels.git; ref = "nixos-19.03"; }) {}; +with import (builtins.fetchTarball https://github.com/NixOS/nixpkgs-channels/archive/nixos-19.03.tar.gz) {}; with import ./release-common.nix { inherit pkgs; }; From f8a52cc598ae9e13c6fe4f04f73e60e4e63a1975 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Fri, 19 Apr 2019 14:53:58 +0200 Subject: [PATCH 083/634] nlohmann-json: 3.5.0 -> 3.6.1 https://github.com/nlohmann/json/releases/tag/v3.6.1 This fixes some clang warnings. --- src/nlohmann/json.hpp | 14648 +++++++++++++++++++++------------------- 1 file changed, 7542 insertions(+), 7106 deletions(-) diff --git a/src/nlohmann/json.hpp b/src/nlohmann/json.hpp index c9af0bed3..5003a4fa2 100644 --- a/src/nlohmann/json.hpp +++ b/src/nlohmann/json.hpp @@ -1,12 +1,12 @@ /* __ _____ _____ _____ __| | __| | | | JSON for Modern C++ -| | |__ | | | | | | version 3.5.0 +| | |__ | | | | | | version 3.6.1 |_____|_____|_____|_|___| https://github.com/nlohmann/json Licensed under the MIT License . SPDX-License-Identifier: MIT -Copyright (c) 2013-2018 Niels Lohmann . +Copyright (c) 2013-2019 Niels Lohmann . Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal @@ -27,12 +27,12 @@ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ -#ifndef NLOHMANN_JSON_HPP -#define NLOHMANN_JSON_HPP +#ifndef INCLUDE_NLOHMANN_JSON_HPP_ +#define INCLUDE_NLOHMANN_JSON_HPP_ #define NLOHMANN_JSON_VERSION_MAJOR 3 -#define NLOHMANN_JSON_VERSION_MINOR 5 -#define NLOHMANN_JSON_VERSION_PATCH 0 +#define NLOHMANN_JSON_VERSION_MINOR 6 +#define NLOHMANN_JSON_VERSION_PATCH 1 #include // all_of, find, for_each #include // assert @@ -42,79 +42,417 @@ SOFTWARE. #include // initializer_list #include // istream, ostream #include // random_access_iterator_tag +#include // unique_ptr #include // accumulate #include // string, stoi, to_string #include // declval, forward, move, pair, swap - -// #include -#ifndef NLOHMANN_JSON_FWD_HPP -#define NLOHMANN_JSON_FWD_HPP - -#include // int64_t, uint64_t -#include // map -#include // allocator -#include // string #include // vector -/*! -@brief namespace for Niels Lohmann -@see https://github.com/nlohmann -@since version 1.0.0 -*/ +// #include + + +#include + +// #include + + +#include // transform +#include // array +#include // and, not +#include // forward_list +#include // inserter, front_inserter, end +#include // map +#include // string +#include // tuple, make_tuple +#include // is_arithmetic, is_same, is_enum, underlying_type, is_convertible +#include // unordered_map +#include // pair, declval +#include // valarray + +// #include + + +#include // exception +#include // runtime_error +#include // to_string + +// #include + + +#include // size_t + namespace nlohmann { -/*! -@brief default JSONSerializer template argument +namespace detail +{ +/// struct to capture the start position of the current token +struct position_t +{ + /// the total number of characters read + std::size_t chars_read_total = 0; + /// the number of characters read in the current line + std::size_t chars_read_current_line = 0; + /// the number of lines read + std::size_t lines_read = 0; -This serializer ignores the template arguments and uses ADL -([argument-dependent lookup](https://en.cppreference.com/w/cpp/language/adl)) -for serialization. -*/ -template -struct adl_serializer; + /// conversion to size_t to preserve SAX interface + constexpr operator size_t() const + { + return chars_read_total; + } +}; -template class ObjectType = - std::map, - template class ArrayType = std::vector, - class StringType = std::string, class BooleanType = bool, - class NumberIntegerType = std::int64_t, - class NumberUnsignedType = std::uint64_t, - class NumberFloatType = double, - template class AllocatorType = std::allocator, - template class JSONSerializer = - adl_serializer> -class basic_json; +} // namespace detail +} // namespace nlohmann + + +namespace nlohmann +{ +namespace detail +{ +//////////////// +// exceptions // +//////////////// /*! -@brief JSON Pointer +@brief general exception of the @ref basic_json class -A JSON pointer defines a string syntax for identifying a specific value -within a JSON document. It can be used with functions `at` and -`operator[]`. Furthermore, JSON pointers are the base for JSON patches. +This class is an extension of `std::exception` objects with a member @a id for +exception ids. It is used as the base class for all exceptions thrown by the +@ref basic_json class. This class can hence be used as "wildcard" to catch +exceptions. -@sa [RFC 6901](https://tools.ietf.org/html/rfc6901) +Subclasses: +- @ref parse_error for exceptions indicating a parse error +- @ref invalid_iterator for exceptions indicating errors with iterators +- @ref type_error for exceptions indicating executing a member function with + a wrong type +- @ref out_of_range for exceptions indicating access out of the defined range +- @ref other_error for exceptions indicating other library errors -@since version 2.0.0 +@internal +@note To have nothrow-copy-constructible exceptions, we internally use + `std::runtime_error` which can cope with arbitrary-length error messages. + Intermediate strings are built with static functions and then passed to + the actual constructor. +@endinternal + +@liveexample{The following code shows how arbitrary library exceptions can be +caught.,exception} + +@since version 3.0.0 */ -template -class json_pointer; +class exception : public std::exception +{ + public: + /// returns the explanatory string + const char* what() const noexcept override + { + return m.what(); + } + + /// the id of the exception + const int id; + + protected: + exception(int id_, const char* what_arg) : id(id_), m(what_arg) {} + + static std::string name(const std::string& ename, int id_) + { + return "[json.exception." + ename + "." + std::to_string(id_) + "] "; + } + + private: + /// an exception object as storage for error messages + std::runtime_error m; +}; /*! -@brief default JSON class +@brief exception indicating a parse error -This type is the default specialization of the @ref basic_json class which -uses the standard template types. +This exception is thrown by the library when a parse error occurs. Parse errors +can occur during the deserialization of JSON text, CBOR, MessagePack, as well +as when using JSON Patch. -@since version 1.0.0 +Member @a byte holds the byte index of the last read character in the input +file. + +Exceptions have ids 1xx. + +name / id | example message | description +------------------------------ | --------------- | ------------------------- +json.exception.parse_error.101 | parse error at 2: unexpected end of input; expected string literal | This error indicates a syntax error while deserializing a JSON text. The error message describes that an unexpected token (character) was encountered, and the member @a byte indicates the error position. +json.exception.parse_error.102 | parse error at 14: missing or wrong low surrogate | JSON uses the `\uxxxx` format to describe Unicode characters. Code points above above 0xFFFF are split into two `\uxxxx` entries ("surrogate pairs"). This error indicates that the surrogate pair is incomplete or contains an invalid code point. +json.exception.parse_error.103 | parse error: code points above 0x10FFFF are invalid | Unicode supports code points up to 0x10FFFF. Code points above 0x10FFFF are invalid. +json.exception.parse_error.104 | parse error: JSON patch must be an array of objects | [RFC 6902](https://tools.ietf.org/html/rfc6902) requires a JSON Patch document to be a JSON document that represents an array of objects. +json.exception.parse_error.105 | parse error: operation must have string member 'op' | An operation of a JSON Patch document must contain exactly one "op" member, whose value indicates the operation to perform. Its value must be one of "add", "remove", "replace", "move", "copy", or "test"; other values are errors. +json.exception.parse_error.106 | parse error: array index '01' must not begin with '0' | An array index in a JSON Pointer ([RFC 6901](https://tools.ietf.org/html/rfc6901)) may be `0` or any number without a leading `0`. +json.exception.parse_error.107 | parse error: JSON pointer must be empty or begin with '/' - was: 'foo' | A JSON Pointer must be a Unicode string containing a sequence of zero or more reference tokens, each prefixed by a `/` character. +json.exception.parse_error.108 | parse error: escape character '~' must be followed with '0' or '1' | In a JSON Pointer, only `~0` and `~1` are valid escape sequences. +json.exception.parse_error.109 | parse error: array index 'one' is not a number | A JSON Pointer array index must be a number. +json.exception.parse_error.110 | parse error at 1: cannot read 2 bytes from vector | When parsing CBOR or MessagePack, the byte vector ends before the complete value has been read. +json.exception.parse_error.112 | parse error at 1: error reading CBOR; last byte: 0xF8 | Not all types of CBOR or MessagePack are supported. This exception occurs if an unsupported byte was read. +json.exception.parse_error.113 | parse error at 2: expected a CBOR string; last byte: 0x98 | While parsing a map key, a value that is not a string has been read. +json.exception.parse_error.114 | parse error: Unsupported BSON record type 0x0F | The parsing of the corresponding BSON record type is not implemented (yet). + +@note For an input with n bytes, 1 is the index of the first character and n+1 + is the index of the terminating null byte or the end of file. This also + holds true when reading a byte vector (CBOR or MessagePack). + +@liveexample{The following code shows how a `parse_error` exception can be +caught.,parse_error} + +@sa - @ref exception for the base class of the library exceptions +@sa - @ref invalid_iterator for exceptions indicating errors with iterators +@sa - @ref type_error for exceptions indicating executing a member function with + a wrong type +@sa - @ref out_of_range for exceptions indicating access out of the defined range +@sa - @ref other_error for exceptions indicating other library errors + +@since version 3.0.0 */ -using json = basic_json<>; +class parse_error : public exception +{ + public: + /*! + @brief create a parse error exception + @param[in] id_ the id of the exception + @param[in] pos the position where the error occurred (or with + chars_read_total=0 if the position cannot be + determined) + @param[in] what_arg the explanatory string + @return parse_error object + */ + static parse_error create(int id_, const position_t& pos, const std::string& what_arg) + { + std::string w = exception::name("parse_error", id_) + "parse error" + + position_string(pos) + ": " + what_arg; + return parse_error(id_, pos.chars_read_total, w.c_str()); + } + + static parse_error create(int id_, std::size_t byte_, const std::string& what_arg) + { + std::string w = exception::name("parse_error", id_) + "parse error" + + (byte_ != 0 ? (" at byte " + std::to_string(byte_)) : "") + + ": " + what_arg; + return parse_error(id_, byte_, w.c_str()); + } + + /*! + @brief byte index of the parse error + + The byte index of the last read character in the input file. + + @note For an input with n bytes, 1 is the index of the first character and + n+1 is the index of the terminating null byte or the end of file. + This also holds true when reading a byte vector (CBOR or MessagePack). + */ + const std::size_t byte; + + private: + parse_error(int id_, std::size_t byte_, const char* what_arg) + : exception(id_, what_arg), byte(byte_) {} + + static std::string position_string(const position_t& pos) + { + return " at line " + std::to_string(pos.lines_read + 1) + + ", column " + std::to_string(pos.chars_read_current_line); + } +}; + +/*! +@brief exception indicating errors with iterators + +This exception is thrown if iterators passed to a library function do not match +the expected semantics. + +Exceptions have ids 2xx. + +name / id | example message | description +----------------------------------- | --------------- | ------------------------- +json.exception.invalid_iterator.201 | iterators are not compatible | The iterators passed to constructor @ref basic_json(InputIT first, InputIT last) are not compatible, meaning they do not belong to the same container. Therefore, the range (@a first, @a last) is invalid. +json.exception.invalid_iterator.202 | iterator does not fit current value | In an erase or insert function, the passed iterator @a pos does not belong to the JSON value for which the function was called. It hence does not define a valid position for the deletion/insertion. +json.exception.invalid_iterator.203 | iterators do not fit current value | Either iterator passed to function @ref erase(IteratorType first, IteratorType last) does not belong to the JSON value from which values shall be erased. It hence does not define a valid range to delete values from. +json.exception.invalid_iterator.204 | iterators out of range | When an iterator range for a primitive type (number, boolean, or string) is passed to a constructor or an erase function, this range has to be exactly (@ref begin(), @ref end()), because this is the only way the single stored value is expressed. All other ranges are invalid. +json.exception.invalid_iterator.205 | iterator out of range | When an iterator for a primitive type (number, boolean, or string) is passed to an erase function, the iterator has to be the @ref begin() iterator, because it is the only way to address the stored value. All other iterators are invalid. +json.exception.invalid_iterator.206 | cannot construct with iterators from null | The iterators passed to constructor @ref basic_json(InputIT first, InputIT last) belong to a JSON null value and hence to not define a valid range. +json.exception.invalid_iterator.207 | cannot use key() for non-object iterators | The key() member function can only be used on iterators belonging to a JSON object, because other types do not have a concept of a key. +json.exception.invalid_iterator.208 | cannot use operator[] for object iterators | The operator[] to specify a concrete offset cannot be used on iterators belonging to a JSON object, because JSON objects are unordered. +json.exception.invalid_iterator.209 | cannot use offsets with object iterators | The offset operators (+, -, +=, -=) cannot be used on iterators belonging to a JSON object, because JSON objects are unordered. +json.exception.invalid_iterator.210 | iterators do not fit | The iterator range passed to the insert function are not compatible, meaning they do not belong to the same container. Therefore, the range (@a first, @a last) is invalid. +json.exception.invalid_iterator.211 | passed iterators may not belong to container | The iterator range passed to the insert function must not be a subrange of the container to insert to. +json.exception.invalid_iterator.212 | cannot compare iterators of different containers | When two iterators are compared, they must belong to the same container. +json.exception.invalid_iterator.213 | cannot compare order of object iterators | The order of object iterators cannot be compared, because JSON objects are unordered. +json.exception.invalid_iterator.214 | cannot get value | Cannot get value for iterator: Either the iterator belongs to a null value or it is an iterator to a primitive type (number, boolean, or string), but the iterator is different to @ref begin(). + +@liveexample{The following code shows how an `invalid_iterator` exception can be +caught.,invalid_iterator} + +@sa - @ref exception for the base class of the library exceptions +@sa - @ref parse_error for exceptions indicating a parse error +@sa - @ref type_error for exceptions indicating executing a member function with + a wrong type +@sa - @ref out_of_range for exceptions indicating access out of the defined range +@sa - @ref other_error for exceptions indicating other library errors + +@since version 3.0.0 +*/ +class invalid_iterator : public exception +{ + public: + static invalid_iterator create(int id_, const std::string& what_arg) + { + std::string w = exception::name("invalid_iterator", id_) + what_arg; + return invalid_iterator(id_, w.c_str()); + } + + private: + invalid_iterator(int id_, const char* what_arg) + : exception(id_, what_arg) {} +}; + +/*! +@brief exception indicating executing a member function with a wrong type + +This exception is thrown in case of a type error; that is, a library function is +executed on a JSON value whose type does not match the expected semantics. + +Exceptions have ids 3xx. + +name / id | example message | description +----------------------------- | --------------- | ------------------------- +json.exception.type_error.301 | cannot create object from initializer list | To create an object from an initializer list, the initializer list must consist only of a list of pairs whose first element is a string. When this constraint is violated, an array is created instead. +json.exception.type_error.302 | type must be object, but is array | During implicit or explicit value conversion, the JSON type must be compatible to the target type. For instance, a JSON string can only be converted into string types, but not into numbers or boolean types. +json.exception.type_error.303 | incompatible ReferenceType for get_ref, actual type is object | To retrieve a reference to a value stored in a @ref basic_json object with @ref get_ref, the type of the reference must match the value type. For instance, for a JSON array, the @a ReferenceType must be @ref array_t &. +json.exception.type_error.304 | cannot use at() with string | The @ref at() member functions can only be executed for certain JSON types. +json.exception.type_error.305 | cannot use operator[] with string | The @ref operator[] member functions can only be executed for certain JSON types. +json.exception.type_error.306 | cannot use value() with string | The @ref value() member functions can only be executed for certain JSON types. +json.exception.type_error.307 | cannot use erase() with string | The @ref erase() member functions can only be executed for certain JSON types. +json.exception.type_error.308 | cannot use push_back() with string | The @ref push_back() and @ref operator+= member functions can only be executed for certain JSON types. +json.exception.type_error.309 | cannot use insert() with | The @ref insert() member functions can only be executed for certain JSON types. +json.exception.type_error.310 | cannot use swap() with number | The @ref swap() member functions can only be executed for certain JSON types. +json.exception.type_error.311 | cannot use emplace_back() with string | The @ref emplace_back() member function can only be executed for certain JSON types. +json.exception.type_error.312 | cannot use update() with string | The @ref update() member functions can only be executed for certain JSON types. +json.exception.type_error.313 | invalid value to unflatten | The @ref unflatten function converts an object whose keys are JSON Pointers back into an arbitrary nested JSON value. The JSON Pointers must not overlap, because then the resulting value would not be well defined. +json.exception.type_error.314 | only objects can be unflattened | The @ref unflatten function only works for an object whose keys are JSON Pointers. +json.exception.type_error.315 | values in object must be primitive | The @ref unflatten function only works for an object whose keys are JSON Pointers and whose values are primitive. +json.exception.type_error.316 | invalid UTF-8 byte at index 10: 0x7E | The @ref dump function only works with UTF-8 encoded strings; that is, if you assign a `std::string` to a JSON value, make sure it is UTF-8 encoded. | +json.exception.type_error.317 | JSON value cannot be serialized to requested format | The dynamic type of the object cannot be represented in the requested serialization format (e.g. a raw `true` or `null` JSON object cannot be serialized to BSON) | + +@liveexample{The following code shows how a `type_error` exception can be +caught.,type_error} + +@sa - @ref exception for the base class of the library exceptions +@sa - @ref parse_error for exceptions indicating a parse error +@sa - @ref invalid_iterator for exceptions indicating errors with iterators +@sa - @ref out_of_range for exceptions indicating access out of the defined range +@sa - @ref other_error for exceptions indicating other library errors + +@since version 3.0.0 +*/ +class type_error : public exception +{ + public: + static type_error create(int id_, const std::string& what_arg) + { + std::string w = exception::name("type_error", id_) + what_arg; + return type_error(id_, w.c_str()); + } + + private: + type_error(int id_, const char* what_arg) : exception(id_, what_arg) {} +}; + +/*! +@brief exception indicating access out of the defined range + +This exception is thrown in case a library function is called on an input +parameter that exceeds the expected range, for instance in case of array +indices or nonexisting object keys. + +Exceptions have ids 4xx. + +name / id | example message | description +------------------------------- | --------------- | ------------------------- +json.exception.out_of_range.401 | array index 3 is out of range | The provided array index @a i is larger than @a size-1. +json.exception.out_of_range.402 | array index '-' (3) is out of range | The special array index `-` in a JSON Pointer never describes a valid element of the array, but the index past the end. That is, it can only be used to add elements at this position, but not to read it. +json.exception.out_of_range.403 | key 'foo' not found | The provided key was not found in the JSON object. +json.exception.out_of_range.404 | unresolved reference token 'foo' | A reference token in a JSON Pointer could not be resolved. +json.exception.out_of_range.405 | JSON pointer has no parent | The JSON Patch operations 'remove' and 'add' can not be applied to the root element of the JSON value. +json.exception.out_of_range.406 | number overflow parsing '10E1000' | A parsed number could not be stored as without changing it to NaN or INF. +json.exception.out_of_range.407 | number overflow serializing '9223372036854775808' | UBJSON and BSON only support integer numbers up to 9223372036854775807. | +json.exception.out_of_range.408 | excessive array size: 8658170730974374167 | The size (following `#`) of an UBJSON array or object exceeds the maximal capacity. | +json.exception.out_of_range.409 | BSON key cannot contain code point U+0000 (at byte 2) | Key identifiers to be serialized to BSON cannot contain code point U+0000, since the key is stored as zero-terminated c-string | + +@liveexample{The following code shows how an `out_of_range` exception can be +caught.,out_of_range} + +@sa - @ref exception for the base class of the library exceptions +@sa - @ref parse_error for exceptions indicating a parse error +@sa - @ref invalid_iterator for exceptions indicating errors with iterators +@sa - @ref type_error for exceptions indicating executing a member function with + a wrong type +@sa - @ref other_error for exceptions indicating other library errors + +@since version 3.0.0 +*/ +class out_of_range : public exception +{ + public: + static out_of_range create(int id_, const std::string& what_arg) + { + std::string w = exception::name("out_of_range", id_) + what_arg; + return out_of_range(id_, w.c_str()); + } + + private: + out_of_range(int id_, const char* what_arg) : exception(id_, what_arg) {} +}; + +/*! +@brief exception indicating other library errors + +This exception is thrown in case of errors that cannot be classified with the +other exception types. + +Exceptions have ids 5xx. + +name / id | example message | description +------------------------------ | --------------- | ------------------------- +json.exception.other_error.501 | unsuccessful: {"op":"test","path":"/baz", "value":"bar"} | A JSON Patch operation 'test' failed. The unsuccessful operation is also printed. + +@sa - @ref exception for the base class of the library exceptions +@sa - @ref parse_error for exceptions indicating a parse error +@sa - @ref invalid_iterator for exceptions indicating errors with iterators +@sa - @ref type_error for exceptions indicating executing a member function with + a wrong type +@sa - @ref out_of_range for exceptions indicating access out of the defined range + +@liveexample{The following code shows how an `other_error` exception can be +caught.,other_error} + +@since version 3.0.0 +*/ +class other_error : public exception +{ + public: + static other_error create(int id_, const std::string& what_arg) + { + std::string w = exception::name("other_error", id_) + what_arg; + return other_error(id_, w.c_str()); + } + + private: + other_error(int id_, const char* what_arg) : exception(id_, what_arg) {} +}; +} // namespace detail } // namespace nlohmann -#endif - // #include +#include // pair + // This file contains all internal macro definitions // You MUST include macro_unscope.hpp at the end of json.hpp to undef all of them @@ -152,6 +490,19 @@ using json = basic_json<>; #define JSON_DEPRECATED #endif +// allow for portable nodiscard warnings +#if defined(__has_cpp_attribute) + #if __has_cpp_attribute(nodiscard) + #define JSON_NODISCARD [[nodiscard]] + #elif __has_cpp_attribute(gnu::warn_unused_result) + #define JSON_NODISCARD [[gnu::warn_unused_result]] + #else + #define JSON_NODISCARD + #endif +#else + #define JSON_NODISCARD +#endif + // allow to disable exceptions #if (defined(__cpp_exceptions) || defined(__EXCEPTIONS) || defined(_CPPUNWIND)) && !defined(JSON_NOEXCEPTION) #define JSON_THROW(exception) throw exception @@ -159,6 +510,7 @@ using json = basic_json<>; #define JSON_CATCH(exception) catch(exception) #define JSON_INTERNAL_CATCH(exception) catch(exception) #else + #include #define JSON_THROW(exception) std::abort() #define JSON_TRY if(true) #define JSON_CATCH(exception) if(false) @@ -187,8 +539,8 @@ using json = basic_json<>; // manual branch prediction #if defined(__clang__) || defined(__GNUC__) || defined(__GNUG__) - #define JSON_LIKELY(x) __builtin_expect(!!(x), 1) - #define JSON_UNLIKELY(x) __builtin_expect(!!(x), 0) + #define JSON_LIKELY(x) __builtin_expect(x, 1) + #define JSON_UNLIKELY(x) __builtin_expect(x, 0) #else #define JSON_LIKELY(x) x #define JSON_UNLIKELY(x) x @@ -322,8 +674,6 @@ constexpr T static_const::value; #include // false_type, is_constructible, is_integral, is_same, true_type #include // declval -// #include - // #include @@ -389,8 +739,10 @@ struct iterator_traits::value>> using pointer = T*; using reference = T&; }; -} -} +} // namespace detail +} // namespace nlohmann + +// #include // #include @@ -412,7 +764,9 @@ struct nonesuch nonesuch() = delete; ~nonesuch() = delete; nonesuch(nonesuch const&) = delete; + nonesuch(nonesuch const&&) = delete; void operator=(nonesuch const&) = delete; + void operator=(nonesuch&&) = delete; }; template +// #include +#ifndef INCLUDE_NLOHMANN_JSON_FWD_HPP_ +#define INCLUDE_NLOHMANN_JSON_FWD_HPP_ + +#include // int64_t, uint64_t +#include // map +#include // allocator +#include // string +#include // vector + +/*! +@brief namespace for Niels Lohmann +@see https://github.com/nlohmann +@since version 1.0.0 +*/ +namespace nlohmann +{ +/*! +@brief default JSONSerializer template argument + +This serializer ignores the template arguments and uses ADL +([argument-dependent lookup](https://en.cppreference.com/w/cpp/language/adl)) +for serialization. +*/ +template +struct adl_serializer; + +template class ObjectType = + std::map, + template class ArrayType = std::vector, + class StringType = std::string, class BooleanType = bool, + class NumberIntegerType = std::int64_t, + class NumberUnsignedType = std::uint64_t, + class NumberFloatType = double, + template class AllocatorType = std::allocator, + template class JSONSerializer = + adl_serializer> +class basic_json; + +/*! +@brief JSON Pointer + +A JSON pointer defines a string syntax for identifying a specific value +within a JSON document. It can be used with functions `at` and +`operator[]`. Furthermore, JSON pointers are the base for JSON patches. + +@sa [RFC 6901](https://tools.ietf.org/html/rfc6901) + +@since version 2.0.0 +*/ +template +class json_pointer; + +/*! +@brief default JSON class + +This type is the default specialization of the @ref basic_json class which +uses the standard template types. + +@since version 1.0.0 +*/ +using json = basic_json<>; +} // namespace nlohmann + +#endif // INCLUDE_NLOHMANN_JSON_FWD_HPP_ namespace nlohmann @@ -792,385 +1210,6 @@ struct is_compatible_type } // namespace detail } // namespace nlohmann -// #include - - -#include // exception -#include // runtime_error -#include // to_string - -// #include - - -#include // size_t - -namespace nlohmann -{ -namespace detail -{ -/// struct to capture the start position of the current token -struct position_t -{ - /// the total number of characters read - std::size_t chars_read_total = 0; - /// the number of characters read in the current line - std::size_t chars_read_current_line = 0; - /// the number of lines read - std::size_t lines_read = 0; - - /// conversion to size_t to preserve SAX interface - constexpr operator size_t() const - { - return chars_read_total; - } -}; - -} -} - - -namespace nlohmann -{ -namespace detail -{ -//////////////// -// exceptions // -//////////////// - -/*! -@brief general exception of the @ref basic_json class - -This class is an extension of `std::exception` objects with a member @a id for -exception ids. It is used as the base class for all exceptions thrown by the -@ref basic_json class. This class can hence be used as "wildcard" to catch -exceptions. - -Subclasses: -- @ref parse_error for exceptions indicating a parse error -- @ref invalid_iterator for exceptions indicating errors with iterators -- @ref type_error for exceptions indicating executing a member function with - a wrong type -- @ref out_of_range for exceptions indicating access out of the defined range -- @ref other_error for exceptions indicating other library errors - -@internal -@note To have nothrow-copy-constructible exceptions, we internally use - `std::runtime_error` which can cope with arbitrary-length error messages. - Intermediate strings are built with static functions and then passed to - the actual constructor. -@endinternal - -@liveexample{The following code shows how arbitrary library exceptions can be -caught.,exception} - -@since version 3.0.0 -*/ -class exception : public std::exception -{ - public: - /// returns the explanatory string - const char* what() const noexcept override - { - return m.what(); - } - - /// the id of the exception - const int id; - - protected: - exception(int id_, const char* what_arg) : id(id_), m(what_arg) {} - - static std::string name(const std::string& ename, int id_) - { - return "[json.exception." + ename + "." + std::to_string(id_) + "] "; - } - - private: - /// an exception object as storage for error messages - std::runtime_error m; -}; - -/*! -@brief exception indicating a parse error - -This exception is thrown by the library when a parse error occurs. Parse errors -can occur during the deserialization of JSON text, CBOR, MessagePack, as well -as when using JSON Patch. - -Member @a byte holds the byte index of the last read character in the input -file. - -Exceptions have ids 1xx. - -name / id | example message | description ------------------------------- | --------------- | ------------------------- -json.exception.parse_error.101 | parse error at 2: unexpected end of input; expected string literal | This error indicates a syntax error while deserializing a JSON text. The error message describes that an unexpected token (character) was encountered, and the member @a byte indicates the error position. -json.exception.parse_error.102 | parse error at 14: missing or wrong low surrogate | JSON uses the `\uxxxx` format to describe Unicode characters. Code points above above 0xFFFF are split into two `\uxxxx` entries ("surrogate pairs"). This error indicates that the surrogate pair is incomplete or contains an invalid code point. -json.exception.parse_error.103 | parse error: code points above 0x10FFFF are invalid | Unicode supports code points up to 0x10FFFF. Code points above 0x10FFFF are invalid. -json.exception.parse_error.104 | parse error: JSON patch must be an array of objects | [RFC 6902](https://tools.ietf.org/html/rfc6902) requires a JSON Patch document to be a JSON document that represents an array of objects. -json.exception.parse_error.105 | parse error: operation must have string member 'op' | An operation of a JSON Patch document must contain exactly one "op" member, whose value indicates the operation to perform. Its value must be one of "add", "remove", "replace", "move", "copy", or "test"; other values are errors. -json.exception.parse_error.106 | parse error: array index '01' must not begin with '0' | An array index in a JSON Pointer ([RFC 6901](https://tools.ietf.org/html/rfc6901)) may be `0` or any number without a leading `0`. -json.exception.parse_error.107 | parse error: JSON pointer must be empty or begin with '/' - was: 'foo' | A JSON Pointer must be a Unicode string containing a sequence of zero or more reference tokens, each prefixed by a `/` character. -json.exception.parse_error.108 | parse error: escape character '~' must be followed with '0' or '1' | In a JSON Pointer, only `~0` and `~1` are valid escape sequences. -json.exception.parse_error.109 | parse error: array index 'one' is not a number | A JSON Pointer array index must be a number. -json.exception.parse_error.110 | parse error at 1: cannot read 2 bytes from vector | When parsing CBOR or MessagePack, the byte vector ends before the complete value has been read. -json.exception.parse_error.112 | parse error at 1: error reading CBOR; last byte: 0xF8 | Not all types of CBOR or MessagePack are supported. This exception occurs if an unsupported byte was read. -json.exception.parse_error.113 | parse error at 2: expected a CBOR string; last byte: 0x98 | While parsing a map key, a value that is not a string has been read. -json.exception.parse_error.114 | parse error: Unsupported BSON record type 0x0F | The parsing of the corresponding BSON record type is not implemented (yet). - -@note For an input with n bytes, 1 is the index of the first character and n+1 - is the index of the terminating null byte or the end of file. This also - holds true when reading a byte vector (CBOR or MessagePack). - -@liveexample{The following code shows how a `parse_error` exception can be -caught.,parse_error} - -@sa @ref exception for the base class of the library exceptions -@sa @ref invalid_iterator for exceptions indicating errors with iterators -@sa @ref type_error for exceptions indicating executing a member function with - a wrong type -@sa @ref out_of_range for exceptions indicating access out of the defined range -@sa @ref other_error for exceptions indicating other library errors - -@since version 3.0.0 -*/ -class parse_error : public exception -{ - public: - /*! - @brief create a parse error exception - @param[in] id_ the id of the exception - @param[in] position the position where the error occurred (or with - chars_read_total=0 if the position cannot be - determined) - @param[in] what_arg the explanatory string - @return parse_error object - */ - static parse_error create(int id_, const position_t& pos, const std::string& what_arg) - { - std::string w = exception::name("parse_error", id_) + "parse error" + - position_string(pos) + ": " + what_arg; - return parse_error(id_, pos.chars_read_total, w.c_str()); - } - - static parse_error create(int id_, std::size_t byte_, const std::string& what_arg) - { - std::string w = exception::name("parse_error", id_) + "parse error" + - (byte_ != 0 ? (" at byte " + std::to_string(byte_)) : "") + - ": " + what_arg; - return parse_error(id_, byte_, w.c_str()); - } - - /*! - @brief byte index of the parse error - - The byte index of the last read character in the input file. - - @note For an input with n bytes, 1 is the index of the first character and - n+1 is the index of the terminating null byte or the end of file. - This also holds true when reading a byte vector (CBOR or MessagePack). - */ - const std::size_t byte; - - private: - parse_error(int id_, std::size_t byte_, const char* what_arg) - : exception(id_, what_arg), byte(byte_) {} - - static std::string position_string(const position_t& pos) - { - return " at line " + std::to_string(pos.lines_read + 1) + - ", column " + std::to_string(pos.chars_read_current_line); - } -}; - -/*! -@brief exception indicating errors with iterators - -This exception is thrown if iterators passed to a library function do not match -the expected semantics. - -Exceptions have ids 2xx. - -name / id | example message | description ------------------------------------ | --------------- | ------------------------- -json.exception.invalid_iterator.201 | iterators are not compatible | The iterators passed to constructor @ref basic_json(InputIT first, InputIT last) are not compatible, meaning they do not belong to the same container. Therefore, the range (@a first, @a last) is invalid. -json.exception.invalid_iterator.202 | iterator does not fit current value | In an erase or insert function, the passed iterator @a pos does not belong to the JSON value for which the function was called. It hence does not define a valid position for the deletion/insertion. -json.exception.invalid_iterator.203 | iterators do not fit current value | Either iterator passed to function @ref erase(IteratorType first, IteratorType last) does not belong to the JSON value from which values shall be erased. It hence does not define a valid range to delete values from. -json.exception.invalid_iterator.204 | iterators out of range | When an iterator range for a primitive type (number, boolean, or string) is passed to a constructor or an erase function, this range has to be exactly (@ref begin(), @ref end()), because this is the only way the single stored value is expressed. All other ranges are invalid. -json.exception.invalid_iterator.205 | iterator out of range | When an iterator for a primitive type (number, boolean, or string) is passed to an erase function, the iterator has to be the @ref begin() iterator, because it is the only way to address the stored value. All other iterators are invalid. -json.exception.invalid_iterator.206 | cannot construct with iterators from null | The iterators passed to constructor @ref basic_json(InputIT first, InputIT last) belong to a JSON null value and hence to not define a valid range. -json.exception.invalid_iterator.207 | cannot use key() for non-object iterators | The key() member function can only be used on iterators belonging to a JSON object, because other types do not have a concept of a key. -json.exception.invalid_iterator.208 | cannot use operator[] for object iterators | The operator[] to specify a concrete offset cannot be used on iterators belonging to a JSON object, because JSON objects are unordered. -json.exception.invalid_iterator.209 | cannot use offsets with object iterators | The offset operators (+, -, +=, -=) cannot be used on iterators belonging to a JSON object, because JSON objects are unordered. -json.exception.invalid_iterator.210 | iterators do not fit | The iterator range passed to the insert function are not compatible, meaning they do not belong to the same container. Therefore, the range (@a first, @a last) is invalid. -json.exception.invalid_iterator.211 | passed iterators may not belong to container | The iterator range passed to the insert function must not be a subrange of the container to insert to. -json.exception.invalid_iterator.212 | cannot compare iterators of different containers | When two iterators are compared, they must belong to the same container. -json.exception.invalid_iterator.213 | cannot compare order of object iterators | The order of object iterators cannot be compared, because JSON objects are unordered. -json.exception.invalid_iterator.214 | cannot get value | Cannot get value for iterator: Either the iterator belongs to a null value or it is an iterator to a primitive type (number, boolean, or string), but the iterator is different to @ref begin(). - -@liveexample{The following code shows how an `invalid_iterator` exception can be -caught.,invalid_iterator} - -@sa @ref exception for the base class of the library exceptions -@sa @ref parse_error for exceptions indicating a parse error -@sa @ref type_error for exceptions indicating executing a member function with - a wrong type -@sa @ref out_of_range for exceptions indicating access out of the defined range -@sa @ref other_error for exceptions indicating other library errors - -@since version 3.0.0 -*/ -class invalid_iterator : public exception -{ - public: - static invalid_iterator create(int id_, const std::string& what_arg) - { - std::string w = exception::name("invalid_iterator", id_) + what_arg; - return invalid_iterator(id_, w.c_str()); - } - - private: - invalid_iterator(int id_, const char* what_arg) - : exception(id_, what_arg) {} -}; - -/*! -@brief exception indicating executing a member function with a wrong type - -This exception is thrown in case of a type error; that is, a library function is -executed on a JSON value whose type does not match the expected semantics. - -Exceptions have ids 3xx. - -name / id | example message | description ------------------------------ | --------------- | ------------------------- -json.exception.type_error.301 | cannot create object from initializer list | To create an object from an initializer list, the initializer list must consist only of a list of pairs whose first element is a string. When this constraint is violated, an array is created instead. -json.exception.type_error.302 | type must be object, but is array | During implicit or explicit value conversion, the JSON type must be compatible to the target type. For instance, a JSON string can only be converted into string types, but not into numbers or boolean types. -json.exception.type_error.303 | incompatible ReferenceType for get_ref, actual type is object | To retrieve a reference to a value stored in a @ref basic_json object with @ref get_ref, the type of the reference must match the value type. For instance, for a JSON array, the @a ReferenceType must be @ref array_t&. -json.exception.type_error.304 | cannot use at() with string | The @ref at() member functions can only be executed for certain JSON types. -json.exception.type_error.305 | cannot use operator[] with string | The @ref operator[] member functions can only be executed for certain JSON types. -json.exception.type_error.306 | cannot use value() with string | The @ref value() member functions can only be executed for certain JSON types. -json.exception.type_error.307 | cannot use erase() with string | The @ref erase() member functions can only be executed for certain JSON types. -json.exception.type_error.308 | cannot use push_back() with string | The @ref push_back() and @ref operator+= member functions can only be executed for certain JSON types. -json.exception.type_error.309 | cannot use insert() with | The @ref insert() member functions can only be executed for certain JSON types. -json.exception.type_error.310 | cannot use swap() with number | The @ref swap() member functions can only be executed for certain JSON types. -json.exception.type_error.311 | cannot use emplace_back() with string | The @ref emplace_back() member function can only be executed for certain JSON types. -json.exception.type_error.312 | cannot use update() with string | The @ref update() member functions can only be executed for certain JSON types. -json.exception.type_error.313 | invalid value to unflatten | The @ref unflatten function converts an object whose keys are JSON Pointers back into an arbitrary nested JSON value. The JSON Pointers must not overlap, because then the resulting value would not be well defined. -json.exception.type_error.314 | only objects can be unflattened | The @ref unflatten function only works for an object whose keys are JSON Pointers. -json.exception.type_error.315 | values in object must be primitive | The @ref unflatten function only works for an object whose keys are JSON Pointers and whose values are primitive. -json.exception.type_error.316 | invalid UTF-8 byte at index 10: 0x7E | The @ref dump function only works with UTF-8 encoded strings; that is, if you assign a `std::string` to a JSON value, make sure it is UTF-8 encoded. | -json.exception.type_error.317 | JSON value cannot be serialized to requested format | The dynamic type of the object cannot be represented in the requested serialization format (e.g. a raw `true` or `null` JSON object cannot be serialized to BSON) | - -@liveexample{The following code shows how a `type_error` exception can be -caught.,type_error} - -@sa @ref exception for the base class of the library exceptions -@sa @ref parse_error for exceptions indicating a parse error -@sa @ref invalid_iterator for exceptions indicating errors with iterators -@sa @ref out_of_range for exceptions indicating access out of the defined range -@sa @ref other_error for exceptions indicating other library errors - -@since version 3.0.0 -*/ -class type_error : public exception -{ - public: - static type_error create(int id_, const std::string& what_arg) - { - std::string w = exception::name("type_error", id_) + what_arg; - return type_error(id_, w.c_str()); - } - - private: - type_error(int id_, const char* what_arg) : exception(id_, what_arg) {} -}; - -/*! -@brief exception indicating access out of the defined range - -This exception is thrown in case a library function is called on an input -parameter that exceeds the expected range, for instance in case of array -indices or nonexisting object keys. - -Exceptions have ids 4xx. - -name / id | example message | description -------------------------------- | --------------- | ------------------------- -json.exception.out_of_range.401 | array index 3 is out of range | The provided array index @a i is larger than @a size-1. -json.exception.out_of_range.402 | array index '-' (3) is out of range | The special array index `-` in a JSON Pointer never describes a valid element of the array, but the index past the end. That is, it can only be used to add elements at this position, but not to read it. -json.exception.out_of_range.403 | key 'foo' not found | The provided key was not found in the JSON object. -json.exception.out_of_range.404 | unresolved reference token 'foo' | A reference token in a JSON Pointer could not be resolved. -json.exception.out_of_range.405 | JSON pointer has no parent | The JSON Patch operations 'remove' and 'add' can not be applied to the root element of the JSON value. -json.exception.out_of_range.406 | number overflow parsing '10E1000' | A parsed number could not be stored as without changing it to NaN or INF. -json.exception.out_of_range.407 | number overflow serializing '9223372036854775808' | UBJSON and BSON only support integer numbers up to 9223372036854775807. | -json.exception.out_of_range.408 | excessive array size: 8658170730974374167 | The size (following `#`) of an UBJSON array or object exceeds the maximal capacity. | -json.exception.out_of_range.409 | BSON key cannot contain code point U+0000 (at byte 2) | Key identifiers to be serialized to BSON cannot contain code point U+0000, since the key is stored as zero-terminated c-string | - -@liveexample{The following code shows how an `out_of_range` exception can be -caught.,out_of_range} - -@sa @ref exception for the base class of the library exceptions -@sa @ref parse_error for exceptions indicating a parse error -@sa @ref invalid_iterator for exceptions indicating errors with iterators -@sa @ref type_error for exceptions indicating executing a member function with - a wrong type -@sa @ref other_error for exceptions indicating other library errors - -@since version 3.0.0 -*/ -class out_of_range : public exception -{ - public: - static out_of_range create(int id_, const std::string& what_arg) - { - std::string w = exception::name("out_of_range", id_) + what_arg; - return out_of_range(id_, w.c_str()); - } - - private: - out_of_range(int id_, const char* what_arg) : exception(id_, what_arg) {} -}; - -/*! -@brief exception indicating other library errors - -This exception is thrown in case of errors that cannot be classified with the -other exception types. - -Exceptions have ids 5xx. - -name / id | example message | description ------------------------------- | --------------- | ------------------------- -json.exception.other_error.501 | unsuccessful: {"op":"test","path":"/baz", "value":"bar"} | A JSON Patch operation 'test' failed. The unsuccessful operation is also printed. - -@sa @ref exception for the base class of the library exceptions -@sa @ref parse_error for exceptions indicating a parse error -@sa @ref invalid_iterator for exceptions indicating errors with iterators -@sa @ref type_error for exceptions indicating executing a member function with - a wrong type -@sa @ref out_of_range for exceptions indicating access out of the defined range - -@liveexample{The following code shows how an `other_error` exception can be -caught.,other_error} - -@since version 3.0.0 -*/ -class other_error : public exception -{ - public: - static other_error create(int id_, const std::string& what_arg) - { - std::string w = exception::name("other_error", id_) + what_arg; - return other_error(id_, w.c_str()); - } - - private: - other_error(int id_, const char* what_arg) : exception(id_, what_arg) {} -}; -} // namespace detail -} // namespace nlohmann - // #include @@ -1178,6 +1217,7 @@ class other_error : public exception #include // and #include // size_t #include // uint8_t +#include // string namespace nlohmann { @@ -1249,32 +1289,6 @@ inline bool operator<(const value_t lhs, const value_t rhs) noexcept } // namespace detail } // namespace nlohmann -// #include - - -#include // transform -#include // array -#include // and, not -#include // forward_list -#include // inserter, front_inserter, end -#include // map -#include // string -#include // tuple, make_tuple -#include // is_arithmetic, is_same, is_enum, underlying_type, is_convertible -#include // unordered_map -#include // pair, declval -#include // valarray - -// #include - -// #include - -// #include - -// #include - -// #include - namespace nlohmann { @@ -1629,32 +1643,28 @@ constexpr const auto& from_json = detail::static_const::va // #include +#include // copy #include // or, and, not #include // begin, end +#include // string #include // tuple, get #include // is_same, is_constructible, is_floating_point, is_enum, underlying_type #include // move, forward, declval, pair #include // valarray #include // vector -// #include - -// #include - -// #include - // #include #include // size_t -#include // string, to_string #include // input_iterator_tag +#include // string, to_string #include // tuple_size, get, tuple_element -// #include - // #include +// #include + namespace nlohmann { @@ -1700,13 +1710,13 @@ template class iteration_proxy_value } /// equality operator (needed for InputIterator) - bool operator==(const iteration_proxy_value& o) const noexcept + bool operator==(const iteration_proxy_value& o) const { return anchor == o.anchor; } /// inequality operator (needed for range-based for) - bool operator!=(const iteration_proxy_value& o) const noexcept + bool operator!=(const iteration_proxy_value& o) const { return anchor != o.anchor; } @@ -1795,6 +1805,11 @@ auto get(const nlohmann::detail::iteration_proxy_value& i) -> decl // And see https://github.com/nlohmann/json/pull/1391 namespace std { +#if defined(__clang__) + // Fix: https://github.com/nlohmann/json/issues/1401 + #pragma clang diagnostic push + #pragma clang diagnostic ignored "-Wmismatched-tags" +#endif template class tuple_size<::nlohmann::detail::iteration_proxy_value> : public std::integral_constant {}; @@ -1807,7 +1822,17 @@ class tuple_element> get(std::declval < ::nlohmann::detail::iteration_proxy_value> ())); }; -} +#if defined(__clang__) + #pragma clang diagnostic pop +#endif +} // namespace std + +// #include + +// #include + +// #include + namespace nlohmann { @@ -2137,11 +2162,81 @@ constexpr const auto& to_json = detail::static_const::value; } // namespace } // namespace nlohmann + +namespace nlohmann +{ + +template +struct adl_serializer +{ + /*! + @brief convert a JSON value to any value type + + This function is usually called by the `get()` function of the + @ref basic_json class (either explicit or via conversion operators). + + @param[in] j JSON value to read from + @param[in,out] val value to write to + */ + template + static auto from_json(BasicJsonType&& j, ValueType& val) noexcept( + noexcept(::nlohmann::from_json(std::forward(j), val))) + -> decltype(::nlohmann::from_json(std::forward(j), val), void()) + { + ::nlohmann::from_json(std::forward(j), val); + } + + /*! + @brief convert any value type to a JSON value + + This function is usually called by the constructors of the @ref basic_json + class. + + @param[in,out] j JSON value to write to + @param[in] val value to read from + */ + template + static auto to_json(BasicJsonType& j, ValueType&& val) noexcept( + noexcept(::nlohmann::to_json(j, std::forward(val)))) + -> decltype(::nlohmann::to_json(j, std::forward(val)), void()) + { + ::nlohmann::to_json(j, std::forward(val)); + } +}; + +} // namespace nlohmann + +// #include + +// #include + +// #include + +// #include + + +#include // generate_n +#include // array +#include // assert +#include // ldexp +#include // size_t +#include // uint8_t, uint16_t, uint32_t, uint64_t +#include // snprintf +#include // memcpy +#include // back_inserter +#include // numeric_limits +#include // char_traits, string +#include // make_pair, move + +// #include + // #include +#include // array #include // assert #include // size_t +#include //FILE * #include // strlen #include // istream #include // begin, end, iterator_traits, random_access_iterator_tag, distance, next @@ -2150,7 +2245,8 @@ constexpr const auto& to_json = detail::static_const::value; #include // string, char_traits #include // enable_if, is_base_of, is_pointer, is_integral, remove_pointer #include // pair, declval -#include //FILE * + +// #include // #include @@ -2198,10 +2294,18 @@ class file_input_adapter : public input_adapter_protocol : m_file(f) {} + // make class move-only + file_input_adapter(const file_input_adapter&) = delete; + file_input_adapter(file_input_adapter&&) = default; + file_input_adapter& operator=(const file_input_adapter&) = delete; + file_input_adapter& operator=(file_input_adapter&&) = default; + ~file_input_adapter() override = default; + std::char_traits::int_type get_character() noexcept override { return std::fgetc(m_file); } + private: /// the file pointer to read from std::FILE* m_file; @@ -2293,7 +2397,11 @@ template struct wide_string_input_helper { // UTF-32 - static void fill_buffer(const WideStringType& str, size_t& current_wchar, std::array::int_type, 4>& utf8_bytes, size_t& utf8_bytes_index, size_t& utf8_bytes_filled) + static void fill_buffer(const WideStringType& str, + size_t& current_wchar, + std::array::int_type, 4>& utf8_bytes, + size_t& utf8_bytes_index, + size_t& utf8_bytes_filled) { utf8_bytes_index = 0; @@ -2305,39 +2413,39 @@ struct wide_string_input_helper else { // get the current character - const auto wc = static_cast(str[current_wchar++]); + const auto wc = static_cast(str[current_wchar++]); // UTF-32 to UTF-8 encoding if (wc < 0x80) { - utf8_bytes[0] = wc; + utf8_bytes[0] = static_cast::int_type>(wc); utf8_bytes_filled = 1; } else if (wc <= 0x7FF) { - utf8_bytes[0] = 0xC0 | ((wc >> 6) & 0x1F); - utf8_bytes[1] = 0x80 | (wc & 0x3F); + utf8_bytes[0] = static_cast::int_type>(0xC0u | ((wc >> 6u) & 0x1Fu)); + utf8_bytes[1] = static_cast::int_type>(0x80u | (wc & 0x3Fu)); utf8_bytes_filled = 2; } else if (wc <= 0xFFFF) { - utf8_bytes[0] = 0xE0 | ((wc >> 12) & 0x0F); - utf8_bytes[1] = 0x80 | ((wc >> 6) & 0x3F); - utf8_bytes[2] = 0x80 | (wc & 0x3F); + utf8_bytes[0] = static_cast::int_type>(0xE0u | ((wc >> 12u) & 0x0Fu)); + utf8_bytes[1] = static_cast::int_type>(0x80u | ((wc >> 6u) & 0x3Fu)); + utf8_bytes[2] = static_cast::int_type>(0x80u | (wc & 0x3Fu)); utf8_bytes_filled = 3; } else if (wc <= 0x10FFFF) { - utf8_bytes[0] = 0xF0 | ((wc >> 18) & 0x07); - utf8_bytes[1] = 0x80 | ((wc >> 12) & 0x3F); - utf8_bytes[2] = 0x80 | ((wc >> 6) & 0x3F); - utf8_bytes[3] = 0x80 | (wc & 0x3F); + utf8_bytes[0] = static_cast::int_type>(0xF0u | ((wc >> 18u) & 0x07u)); + utf8_bytes[1] = static_cast::int_type>(0x80u | ((wc >> 12u) & 0x3Fu)); + utf8_bytes[2] = static_cast::int_type>(0x80u | ((wc >> 6u) & 0x3Fu)); + utf8_bytes[3] = static_cast::int_type>(0x80u | (wc & 0x3Fu)); utf8_bytes_filled = 4; } else { // unknown character - utf8_bytes[0] = wc; + utf8_bytes[0] = static_cast::int_type>(wc); utf8_bytes_filled = 1; } } @@ -2348,7 +2456,11 @@ template struct wide_string_input_helper { // UTF-16 - static void fill_buffer(const WideStringType& str, size_t& current_wchar, std::array::int_type, 4>& utf8_bytes, size_t& utf8_bytes_index, size_t& utf8_bytes_filled) + static void fill_buffer(const WideStringType& str, + size_t& current_wchar, + std::array::int_type, 4>& utf8_bytes, + size_t& utf8_bytes_index, + size_t& utf8_bytes_filled) { utf8_bytes_index = 0; @@ -2360,44 +2472,44 @@ struct wide_string_input_helper else { // get the current character - const auto wc = static_cast(str[current_wchar++]); + const auto wc = static_cast(str[current_wchar++]); // UTF-16 to UTF-8 encoding if (wc < 0x80) { - utf8_bytes[0] = wc; + utf8_bytes[0] = static_cast::int_type>(wc); utf8_bytes_filled = 1; } else if (wc <= 0x7FF) { - utf8_bytes[0] = 0xC0 | ((wc >> 6)); - utf8_bytes[1] = 0x80 | (wc & 0x3F); + utf8_bytes[0] = static_cast::int_type>(0xC0u | ((wc >> 6u))); + utf8_bytes[1] = static_cast::int_type>(0x80u | (wc & 0x3Fu)); utf8_bytes_filled = 2; } else if (0xD800 > wc or wc >= 0xE000) { - utf8_bytes[0] = 0xE0 | ((wc >> 12)); - utf8_bytes[1] = 0x80 | ((wc >> 6) & 0x3F); - utf8_bytes[2] = 0x80 | (wc & 0x3F); + utf8_bytes[0] = static_cast::int_type>(0xE0u | ((wc >> 12u))); + utf8_bytes[1] = static_cast::int_type>(0x80u | ((wc >> 6u) & 0x3Fu)); + utf8_bytes[2] = static_cast::int_type>(0x80u | (wc & 0x3Fu)); utf8_bytes_filled = 3; } else { if (current_wchar < str.size()) { - const auto wc2 = static_cast(str[current_wchar++]); - const int charcode = 0x10000 + (((wc & 0x3FF) << 10) | (wc2 & 0x3FF)); - utf8_bytes[0] = 0xf0 | (charcode >> 18); - utf8_bytes[1] = 0x80 | ((charcode >> 12) & 0x3F); - utf8_bytes[2] = 0x80 | ((charcode >> 6) & 0x3F); - utf8_bytes[3] = 0x80 | (charcode & 0x3F); + const auto wc2 = static_cast(str[current_wchar++]); + const auto charcode = 0x10000u + (((wc & 0x3FFu) << 10u) | (wc2 & 0x3FFu)); + utf8_bytes[0] = static_cast::int_type>(0xF0u | (charcode >> 18u)); + utf8_bytes[1] = static_cast::int_type>(0x80u | ((charcode >> 12u) & 0x3Fu)); + utf8_bytes[2] = static_cast::int_type>(0x80u | ((charcode >> 6u) & 0x3Fu)); + utf8_bytes[3] = static_cast::int_type>(0x80u | (charcode & 0x3Fu)); utf8_bytes_filled = 4; } else { // unknown character ++current_wchar; - utf8_bytes[0] = wc; + utf8_bytes[0] = static_cast::int_type>(wc); utf8_bytes_filled = 1; } } @@ -2409,7 +2521,7 @@ template class wide_string_input_adapter : public input_adapter_protocol { public: - explicit wide_string_input_adapter(const WideStringType& w) noexcept + explicit wide_string_input_adapter(const WideStringType& w) noexcept : str(w) {} @@ -2561,23 +2673,2847 @@ class input_adapter } // namespace detail } // namespace nlohmann +// #include + + +#include // assert +#include +#include // string +#include // move +#include // vector + +// #include + +// #include + + +namespace nlohmann +{ + +/*! +@brief SAX interface + +This class describes the SAX interface used by @ref nlohmann::json::sax_parse. +Each function is called in different situations while the input is parsed. The +boolean return value informs the parser whether to continue processing the +input. +*/ +template +struct json_sax +{ + /// type for (signed) integers + using number_integer_t = typename BasicJsonType::number_integer_t; + /// type for unsigned integers + using number_unsigned_t = typename BasicJsonType::number_unsigned_t; + /// type for floating-point numbers + using number_float_t = typename BasicJsonType::number_float_t; + /// type for strings + using string_t = typename BasicJsonType::string_t; + + /*! + @brief a null value was read + @return whether parsing should proceed + */ + virtual bool null() = 0; + + /*! + @brief a boolean value was read + @param[in] val boolean value + @return whether parsing should proceed + */ + virtual bool boolean(bool val) = 0; + + /*! + @brief an integer number was read + @param[in] val integer value + @return whether parsing should proceed + */ + virtual bool number_integer(number_integer_t val) = 0; + + /*! + @brief an unsigned integer number was read + @param[in] val unsigned integer value + @return whether parsing should proceed + */ + virtual bool number_unsigned(number_unsigned_t val) = 0; + + /*! + @brief an floating-point number was read + @param[in] val floating-point value + @param[in] s raw token value + @return whether parsing should proceed + */ + virtual bool number_float(number_float_t val, const string_t& s) = 0; + + /*! + @brief a string was read + @param[in] val string value + @return whether parsing should proceed + @note It is safe to move the passed string. + */ + virtual bool string(string_t& val) = 0; + + /*! + @brief the beginning of an object was read + @param[in] elements number of object elements or -1 if unknown + @return whether parsing should proceed + @note binary formats may report the number of elements + */ + virtual bool start_object(std::size_t elements) = 0; + + /*! + @brief an object key was read + @param[in] val object key + @return whether parsing should proceed + @note It is safe to move the passed string. + */ + virtual bool key(string_t& val) = 0; + + /*! + @brief the end of an object was read + @return whether parsing should proceed + */ + virtual bool end_object() = 0; + + /*! + @brief the beginning of an array was read + @param[in] elements number of array elements or -1 if unknown + @return whether parsing should proceed + @note binary formats may report the number of elements + */ + virtual bool start_array(std::size_t elements) = 0; + + /*! + @brief the end of an array was read + @return whether parsing should proceed + */ + virtual bool end_array() = 0; + + /*! + @brief a parse error occurred + @param[in] position the position in the input where the error occurs + @param[in] last_token the last read token + @param[in] ex an exception object describing the error + @return whether parsing should proceed (must return false) + */ + virtual bool parse_error(std::size_t position, + const std::string& last_token, + const detail::exception& ex) = 0; + + virtual ~json_sax() = default; +}; + + +namespace detail +{ +/*! +@brief SAX implementation to create a JSON value from SAX events + +This class implements the @ref json_sax interface and processes the SAX events +to create a JSON value which makes it basically a DOM parser. The structure or +hierarchy of the JSON value is managed by the stack `ref_stack` which contains +a pointer to the respective array or object for each recursion depth. + +After successful parsing, the value that is passed by reference to the +constructor contains the parsed value. + +@tparam BasicJsonType the JSON type +*/ +template +class json_sax_dom_parser +{ + public: + using number_integer_t = typename BasicJsonType::number_integer_t; + using number_unsigned_t = typename BasicJsonType::number_unsigned_t; + using number_float_t = typename BasicJsonType::number_float_t; + using string_t = typename BasicJsonType::string_t; + + /*! + @param[in, out] r reference to a JSON value that is manipulated while + parsing + @param[in] allow_exceptions_ whether parse errors yield exceptions + */ + explicit json_sax_dom_parser(BasicJsonType& r, const bool allow_exceptions_ = true) + : root(r), allow_exceptions(allow_exceptions_) + {} + + // make class move-only + json_sax_dom_parser(const json_sax_dom_parser&) = delete; + json_sax_dom_parser(json_sax_dom_parser&&) = default; + json_sax_dom_parser& operator=(const json_sax_dom_parser&) = delete; + json_sax_dom_parser& operator=(json_sax_dom_parser&&) = default; + ~json_sax_dom_parser() = default; + + bool null() + { + handle_value(nullptr); + return true; + } + + bool boolean(bool val) + { + handle_value(val); + return true; + } + + bool number_integer(number_integer_t val) + { + handle_value(val); + return true; + } + + bool number_unsigned(number_unsigned_t val) + { + handle_value(val); + return true; + } + + bool number_float(number_float_t val, const string_t& /*unused*/) + { + handle_value(val); + return true; + } + + bool string(string_t& val) + { + handle_value(val); + return true; + } + + bool start_object(std::size_t len) + { + ref_stack.push_back(handle_value(BasicJsonType::value_t::object)); + + if (JSON_UNLIKELY(len != std::size_t(-1) and len > ref_stack.back()->max_size())) + { + JSON_THROW(out_of_range::create(408, + "excessive object size: " + std::to_string(len))); + } + + return true; + } + + bool key(string_t& val) + { + // add null at given key and store the reference for later + object_element = &(ref_stack.back()->m_value.object->operator[](val)); + return true; + } + + bool end_object() + { + ref_stack.pop_back(); + return true; + } + + bool start_array(std::size_t len) + { + ref_stack.push_back(handle_value(BasicJsonType::value_t::array)); + + if (JSON_UNLIKELY(len != std::size_t(-1) and len > ref_stack.back()->max_size())) + { + JSON_THROW(out_of_range::create(408, + "excessive array size: " + std::to_string(len))); + } + + return true; + } + + bool end_array() + { + ref_stack.pop_back(); + return true; + } + + bool parse_error(std::size_t /*unused*/, const std::string& /*unused*/, + const detail::exception& ex) + { + errored = true; + if (allow_exceptions) + { + // determine the proper exception type from the id + switch ((ex.id / 100) % 100) + { + case 1: + JSON_THROW(*static_cast(&ex)); + case 4: + JSON_THROW(*static_cast(&ex)); + // LCOV_EXCL_START + case 2: + JSON_THROW(*static_cast(&ex)); + case 3: + JSON_THROW(*static_cast(&ex)); + case 5: + JSON_THROW(*static_cast(&ex)); + default: + assert(false); + // LCOV_EXCL_STOP + } + } + return false; + } + + constexpr bool is_errored() const + { + return errored; + } + + private: + /*! + @invariant If the ref stack is empty, then the passed value will be the new + root. + @invariant If the ref stack contains a value, then it is an array or an + object to which we can add elements + */ + template + BasicJsonType* handle_value(Value&& v) + { + if (ref_stack.empty()) + { + root = BasicJsonType(std::forward(v)); + return &root; + } + + assert(ref_stack.back()->is_array() or ref_stack.back()->is_object()); + + if (ref_stack.back()->is_array()) + { + ref_stack.back()->m_value.array->emplace_back(std::forward(v)); + return &(ref_stack.back()->m_value.array->back()); + } + + assert(ref_stack.back()->is_object()); + assert(object_element); + *object_element = BasicJsonType(std::forward(v)); + return object_element; + } + + /// the parsed JSON value + BasicJsonType& root; + /// stack to model hierarchy of values + std::vector ref_stack {}; + /// helper to hold the reference for the next object element + BasicJsonType* object_element = nullptr; + /// whether a syntax error occurred + bool errored = false; + /// whether to throw exceptions in case of errors + const bool allow_exceptions = true; +}; + +template +class json_sax_dom_callback_parser +{ + public: + using number_integer_t = typename BasicJsonType::number_integer_t; + using number_unsigned_t = typename BasicJsonType::number_unsigned_t; + using number_float_t = typename BasicJsonType::number_float_t; + using string_t = typename BasicJsonType::string_t; + using parser_callback_t = typename BasicJsonType::parser_callback_t; + using parse_event_t = typename BasicJsonType::parse_event_t; + + json_sax_dom_callback_parser(BasicJsonType& r, + const parser_callback_t cb, + const bool allow_exceptions_ = true) + : root(r), callback(cb), allow_exceptions(allow_exceptions_) + { + keep_stack.push_back(true); + } + + // make class move-only + json_sax_dom_callback_parser(const json_sax_dom_callback_parser&) = delete; + json_sax_dom_callback_parser(json_sax_dom_callback_parser&&) = default; + json_sax_dom_callback_parser& operator=(const json_sax_dom_callback_parser&) = delete; + json_sax_dom_callback_parser& operator=(json_sax_dom_callback_parser&&) = default; + ~json_sax_dom_callback_parser() = default; + + bool null() + { + handle_value(nullptr); + return true; + } + + bool boolean(bool val) + { + handle_value(val); + return true; + } + + bool number_integer(number_integer_t val) + { + handle_value(val); + return true; + } + + bool number_unsigned(number_unsigned_t val) + { + handle_value(val); + return true; + } + + bool number_float(number_float_t val, const string_t& /*unused*/) + { + handle_value(val); + return true; + } + + bool string(string_t& val) + { + handle_value(val); + return true; + } + + bool start_object(std::size_t len) + { + // check callback for object start + const bool keep = callback(static_cast(ref_stack.size()), parse_event_t::object_start, discarded); + keep_stack.push_back(keep); + + auto val = handle_value(BasicJsonType::value_t::object, true); + ref_stack.push_back(val.second); + + // check object limit + if (ref_stack.back() and JSON_UNLIKELY(len != std::size_t(-1) and len > ref_stack.back()->max_size())) + { + JSON_THROW(out_of_range::create(408, "excessive object size: " + std::to_string(len))); + } + + return true; + } + + bool key(string_t& val) + { + BasicJsonType k = BasicJsonType(val); + + // check callback for key + const bool keep = callback(static_cast(ref_stack.size()), parse_event_t::key, k); + key_keep_stack.push_back(keep); + + // add discarded value at given key and store the reference for later + if (keep and ref_stack.back()) + { + object_element = &(ref_stack.back()->m_value.object->operator[](val) = discarded); + } + + return true; + } + + bool end_object() + { + if (ref_stack.back() and not callback(static_cast(ref_stack.size()) - 1, parse_event_t::object_end, *ref_stack.back())) + { + // discard object + *ref_stack.back() = discarded; + } + + assert(not ref_stack.empty()); + assert(not keep_stack.empty()); + ref_stack.pop_back(); + keep_stack.pop_back(); + + if (not ref_stack.empty() and ref_stack.back() and ref_stack.back()->is_object()) + { + // remove discarded value + for (auto it = ref_stack.back()->begin(); it != ref_stack.back()->end(); ++it) + { + if (it->is_discarded()) + { + ref_stack.back()->erase(it); + break; + } + } + } + + return true; + } + + bool start_array(std::size_t len) + { + const bool keep = callback(static_cast(ref_stack.size()), parse_event_t::array_start, discarded); + keep_stack.push_back(keep); + + auto val = handle_value(BasicJsonType::value_t::array, true); + ref_stack.push_back(val.second); + + // check array limit + if (ref_stack.back() and JSON_UNLIKELY(len != std::size_t(-1) and len > ref_stack.back()->max_size())) + { + JSON_THROW(out_of_range::create(408, "excessive array size: " + std::to_string(len))); + } + + return true; + } + + bool end_array() + { + bool keep = true; + + if (ref_stack.back()) + { + keep = callback(static_cast(ref_stack.size()) - 1, parse_event_t::array_end, *ref_stack.back()); + if (not keep) + { + // discard array + *ref_stack.back() = discarded; + } + } + + assert(not ref_stack.empty()); + assert(not keep_stack.empty()); + ref_stack.pop_back(); + keep_stack.pop_back(); + + // remove discarded value + if (not keep and not ref_stack.empty() and ref_stack.back()->is_array()) + { + ref_stack.back()->m_value.array->pop_back(); + } + + return true; + } + + bool parse_error(std::size_t /*unused*/, const std::string& /*unused*/, + const detail::exception& ex) + { + errored = true; + if (allow_exceptions) + { + // determine the proper exception type from the id + switch ((ex.id / 100) % 100) + { + case 1: + JSON_THROW(*static_cast(&ex)); + case 4: + JSON_THROW(*static_cast(&ex)); + // LCOV_EXCL_START + case 2: + JSON_THROW(*static_cast(&ex)); + case 3: + JSON_THROW(*static_cast(&ex)); + case 5: + JSON_THROW(*static_cast(&ex)); + default: + assert(false); + // LCOV_EXCL_STOP + } + } + return false; + } + + constexpr bool is_errored() const + { + return errored; + } + + private: + /*! + @param[in] v value to add to the JSON value we build during parsing + @param[in] skip_callback whether we should skip calling the callback + function; this is required after start_array() and + start_object() SAX events, because otherwise we would call the + callback function with an empty array or object, respectively. + + @invariant If the ref stack is empty, then the passed value will be the new + root. + @invariant If the ref stack contains a value, then it is an array or an + object to which we can add elements + + @return pair of boolean (whether value should be kept) and pointer (to the + passed value in the ref_stack hierarchy; nullptr if not kept) + */ + template + std::pair handle_value(Value&& v, const bool skip_callback = false) + { + assert(not keep_stack.empty()); + + // do not handle this value if we know it would be added to a discarded + // container + if (not keep_stack.back()) + { + return {false, nullptr}; + } + + // create value + auto value = BasicJsonType(std::forward(v)); + + // check callback + const bool keep = skip_callback or callback(static_cast(ref_stack.size()), parse_event_t::value, value); + + // do not handle this value if we just learnt it shall be discarded + if (not keep) + { + return {false, nullptr}; + } + + if (ref_stack.empty()) + { + root = std::move(value); + return {true, &root}; + } + + // skip this value if we already decided to skip the parent + // (https://github.com/nlohmann/json/issues/971#issuecomment-413678360) + if (not ref_stack.back()) + { + return {false, nullptr}; + } + + // we now only expect arrays and objects + assert(ref_stack.back()->is_array() or ref_stack.back()->is_object()); + + // array + if (ref_stack.back()->is_array()) + { + ref_stack.back()->m_value.array->push_back(std::move(value)); + return {true, &(ref_stack.back()->m_value.array->back())}; + } + + // object + assert(ref_stack.back()->is_object()); + // check if we should store an element for the current key + assert(not key_keep_stack.empty()); + const bool store_element = key_keep_stack.back(); + key_keep_stack.pop_back(); + + if (not store_element) + { + return {false, nullptr}; + } + + assert(object_element); + *object_element = std::move(value); + return {true, object_element}; + } + + /// the parsed JSON value + BasicJsonType& root; + /// stack to model hierarchy of values + std::vector ref_stack {}; + /// stack to manage which values to keep + std::vector keep_stack {}; + /// stack to manage which object keys to keep + std::vector key_keep_stack {}; + /// helper to hold the reference for the next object element + BasicJsonType* object_element = nullptr; + /// whether a syntax error occurred + bool errored = false; + /// callback function + const parser_callback_t callback = nullptr; + /// whether to throw exceptions in case of errors + const bool allow_exceptions = true; + /// a discarded value for the callback + BasicJsonType discarded = BasicJsonType::value_t::discarded; +}; + +template +class json_sax_acceptor +{ + public: + using number_integer_t = typename BasicJsonType::number_integer_t; + using number_unsigned_t = typename BasicJsonType::number_unsigned_t; + using number_float_t = typename BasicJsonType::number_float_t; + using string_t = typename BasicJsonType::string_t; + + bool null() + { + return true; + } + + bool boolean(bool /*unused*/) + { + return true; + } + + bool number_integer(number_integer_t /*unused*/) + { + return true; + } + + bool number_unsigned(number_unsigned_t /*unused*/) + { + return true; + } + + bool number_float(number_float_t /*unused*/, const string_t& /*unused*/) + { + return true; + } + + bool string(string_t& /*unused*/) + { + return true; + } + + bool start_object(std::size_t /*unused*/ = std::size_t(-1)) + { + return true; + } + + bool key(string_t& /*unused*/) + { + return true; + } + + bool end_object() + { + return true; + } + + bool start_array(std::size_t /*unused*/ = std::size_t(-1)) + { + return true; + } + + bool end_array() + { + return true; + } + + bool parse_error(std::size_t /*unused*/, const std::string& /*unused*/, const detail::exception& /*unused*/) + { + return false; + } +}; +} // namespace detail + +} // namespace nlohmann + +// #include + +// #include + + +#include // size_t +#include // declval +#include // string + +// #include + +// #include + + +namespace nlohmann +{ +namespace detail +{ +template +using null_function_t = decltype(std::declval().null()); + +template +using boolean_function_t = + decltype(std::declval().boolean(std::declval())); + +template +using number_integer_function_t = + decltype(std::declval().number_integer(std::declval())); + +template +using number_unsigned_function_t = + decltype(std::declval().number_unsigned(std::declval())); + +template +using number_float_function_t = decltype(std::declval().number_float( + std::declval(), std::declval())); + +template +using string_function_t = + decltype(std::declval().string(std::declval())); + +template +using start_object_function_t = + decltype(std::declval().start_object(std::declval())); + +template +using key_function_t = + decltype(std::declval().key(std::declval())); + +template +using end_object_function_t = decltype(std::declval().end_object()); + +template +using start_array_function_t = + decltype(std::declval().start_array(std::declval())); + +template +using end_array_function_t = decltype(std::declval().end_array()); + +template +using parse_error_function_t = decltype(std::declval().parse_error( + std::declval(), std::declval(), + std::declval())); + +template +struct is_sax +{ + private: + static_assert(is_basic_json::value, + "BasicJsonType must be of type basic_json<...>"); + + using number_integer_t = typename BasicJsonType::number_integer_t; + using number_unsigned_t = typename BasicJsonType::number_unsigned_t; + using number_float_t = typename BasicJsonType::number_float_t; + using string_t = typename BasicJsonType::string_t; + using exception_t = typename BasicJsonType::exception; + + public: + static constexpr bool value = + is_detected_exact::value && + is_detected_exact::value && + is_detected_exact::value && + is_detected_exact::value && + is_detected_exact::value && + is_detected_exact::value && + is_detected_exact::value && + is_detected_exact::value && + is_detected_exact::value && + is_detected_exact::value && + is_detected_exact::value && + is_detected_exact::value; +}; + +template +struct is_sax_static_asserts +{ + private: + static_assert(is_basic_json::value, + "BasicJsonType must be of type basic_json<...>"); + + using number_integer_t = typename BasicJsonType::number_integer_t; + using number_unsigned_t = typename BasicJsonType::number_unsigned_t; + using number_float_t = typename BasicJsonType::number_float_t; + using string_t = typename BasicJsonType::string_t; + using exception_t = typename BasicJsonType::exception; + + public: + static_assert(is_detected_exact::value, + "Missing/invalid function: bool null()"); + static_assert(is_detected_exact::value, + "Missing/invalid function: bool boolean(bool)"); + static_assert(is_detected_exact::value, + "Missing/invalid function: bool boolean(bool)"); + static_assert( + is_detected_exact::value, + "Missing/invalid function: bool number_integer(number_integer_t)"); + static_assert( + is_detected_exact::value, + "Missing/invalid function: bool number_unsigned(number_unsigned_t)"); + static_assert(is_detected_exact::value, + "Missing/invalid function: bool number_float(number_float_t, const string_t&)"); + static_assert( + is_detected_exact::value, + "Missing/invalid function: bool string(string_t&)"); + static_assert(is_detected_exact::value, + "Missing/invalid function: bool start_object(std::size_t)"); + static_assert(is_detected_exact::value, + "Missing/invalid function: bool key(string_t&)"); + static_assert(is_detected_exact::value, + "Missing/invalid function: bool end_object()"); + static_assert(is_detected_exact::value, + "Missing/invalid function: bool start_array(std::size_t)"); + static_assert(is_detected_exact::value, + "Missing/invalid function: bool end_array()"); + static_assert( + is_detected_exact::value, + "Missing/invalid function: bool parse_error(std::size_t, const " + "std::string&, const exception&)"); +}; +} // namespace detail +} // namespace nlohmann + +// #include + + +namespace nlohmann +{ +namespace detail +{ +/////////////////// +// binary reader // +/////////////////// + +/*! +@brief deserialization of CBOR, MessagePack, and UBJSON values +*/ +template> +class binary_reader +{ + using number_integer_t = typename BasicJsonType::number_integer_t; + using number_unsigned_t = typename BasicJsonType::number_unsigned_t; + using number_float_t = typename BasicJsonType::number_float_t; + using string_t = typename BasicJsonType::string_t; + using json_sax_t = SAX; + + public: + /*! + @brief create a binary reader + + @param[in] adapter input adapter to read from + */ + explicit binary_reader(input_adapter_t adapter) : ia(std::move(adapter)) + { + (void)detail::is_sax_static_asserts {}; + assert(ia); + } + + // make class move-only + binary_reader(const binary_reader&) = delete; + binary_reader(binary_reader&&) = default; + binary_reader& operator=(const binary_reader&) = delete; + binary_reader& operator=(binary_reader&&) = default; + ~binary_reader() = default; + + /*! + @param[in] format the binary format to parse + @param[in] sax_ a SAX event processor + @param[in] strict whether to expect the input to be consumed completed + + @return + */ + bool sax_parse(const input_format_t format, + json_sax_t* sax_, + const bool strict = true) + { + sax = sax_; + bool result = false; + + switch (format) + { + case input_format_t::bson: + result = parse_bson_internal(); + break; + + case input_format_t::cbor: + result = parse_cbor_internal(); + break; + + case input_format_t::msgpack: + result = parse_msgpack_internal(); + break; + + case input_format_t::ubjson: + result = parse_ubjson_internal(); + break; + + default: // LCOV_EXCL_LINE + assert(false); // LCOV_EXCL_LINE + } + + // strict mode: next byte must be EOF + if (result and strict) + { + if (format == input_format_t::ubjson) + { + get_ignore_noop(); + } + else + { + get(); + } + + if (JSON_UNLIKELY(current != std::char_traits::eof())) + { + return sax->parse_error(chars_read, get_token_string(), + parse_error::create(110, chars_read, exception_message(format, "expected end of input; last byte: 0x" + get_token_string(), "value"))); + } + } + + return result; + } + + /*! + @brief determine system byte order + + @return true if and only if system's byte order is little endian + + @note from http://stackoverflow.com/a/1001328/266378 + */ + static constexpr bool little_endianess(int num = 1) noexcept + { + return *reinterpret_cast(&num) == 1; + } + + private: + ////////// + // BSON // + ////////// + + /*! + @brief Reads in a BSON-object and passes it to the SAX-parser. + @return whether a valid BSON-value was passed to the SAX parser + */ + bool parse_bson_internal() + { + std::int32_t document_size; + get_number(input_format_t::bson, document_size); + + if (JSON_UNLIKELY(not sax->start_object(std::size_t(-1)))) + { + return false; + } + + if (JSON_UNLIKELY(not parse_bson_element_list(/*is_array*/false))) + { + return false; + } + + return sax->end_object(); + } + + /*! + @brief Parses a C-style string from the BSON input. + @param[in, out] result A reference to the string variable where the read + string is to be stored. + @return `true` if the \x00-byte indicating the end of the string was + encountered before the EOF; false` indicates an unexpected EOF. + */ + bool get_bson_cstr(string_t& result) + { + auto out = std::back_inserter(result); + while (true) + { + get(); + if (JSON_UNLIKELY(not unexpect_eof(input_format_t::bson, "cstring"))) + { + return false; + } + if (current == 0x00) + { + return true; + } + *out++ = static_cast(current); + } + + return true; + } + + /*! + @brief Parses a zero-terminated string of length @a len from the BSON + input. + @param[in] len The length (including the zero-byte at the end) of the + string to be read. + @param[in, out] result A reference to the string variable where the read + string is to be stored. + @tparam NumberType The type of the length @a len + @pre len >= 1 + @return `true` if the string was successfully parsed + */ + template + bool get_bson_string(const NumberType len, string_t& result) + { + if (JSON_UNLIKELY(len < 1)) + { + auto last_token = get_token_string(); + return sax->parse_error(chars_read, last_token, parse_error::create(112, chars_read, exception_message(input_format_t::bson, "string length must be at least 1, is " + std::to_string(len), "string"))); + } + + return get_string(input_format_t::bson, len - static_cast(1), result) and get() != std::char_traits::eof(); + } + + /*! + @brief Read a BSON document element of the given @a element_type. + @param[in] element_type The BSON element type, c.f. http://bsonspec.org/spec.html + @param[in] element_type_parse_position The position in the input stream, + where the `element_type` was read. + @warning Not all BSON element types are supported yet. An unsupported + @a element_type will give rise to a parse_error.114: + Unsupported BSON record type 0x... + @return whether a valid BSON-object/array was passed to the SAX parser + */ + bool parse_bson_element_internal(const int element_type, + const std::size_t element_type_parse_position) + { + switch (element_type) + { + case 0x01: // double + { + double number; + return get_number(input_format_t::bson, number) and sax->number_float(static_cast(number), ""); + } + + case 0x02: // string + { + std::int32_t len; + string_t value; + return get_number(input_format_t::bson, len) and get_bson_string(len, value) and sax->string(value); + } + + case 0x03: // object + { + return parse_bson_internal(); + } + + case 0x04: // array + { + return parse_bson_array(); + } + + case 0x08: // boolean + { + return sax->boolean(get() != 0); + } + + case 0x0A: // null + { + return sax->null(); + } + + case 0x10: // int32 + { + std::int32_t value; + return get_number(input_format_t::bson, value) and sax->number_integer(value); + } + + case 0x12: // int64 + { + std::int64_t value; + return get_number(input_format_t::bson, value) and sax->number_integer(value); + } + + default: // anything else not supported (yet) + { + std::array cr{{}}; + (std::snprintf)(cr.data(), cr.size(), "%.2hhX", static_cast(element_type)); + return sax->parse_error(element_type_parse_position, std::string(cr.data()), parse_error::create(114, element_type_parse_position, "Unsupported BSON record type 0x" + std::string(cr.data()))); + } + } + } + + /*! + @brief Read a BSON element list (as specified in the BSON-spec) + + The same binary layout is used for objects and arrays, hence it must be + indicated with the argument @a is_array which one is expected + (true --> array, false --> object). + + @param[in] is_array Determines if the element list being read is to be + treated as an object (@a is_array == false), or as an + array (@a is_array == true). + @return whether a valid BSON-object/array was passed to the SAX parser + */ + bool parse_bson_element_list(const bool is_array) + { + string_t key; + while (int element_type = get()) + { + if (JSON_UNLIKELY(not unexpect_eof(input_format_t::bson, "element list"))) + { + return false; + } + + const std::size_t element_type_parse_position = chars_read; + if (JSON_UNLIKELY(not get_bson_cstr(key))) + { + return false; + } + + if (not is_array and not sax->key(key)) + { + return false; + } + + if (JSON_UNLIKELY(not parse_bson_element_internal(element_type, element_type_parse_position))) + { + return false; + } + + // get_bson_cstr only appends + key.clear(); + } + + return true; + } + + /*! + @brief Reads an array from the BSON input and passes it to the SAX-parser. + @return whether a valid BSON-array was passed to the SAX parser + */ + bool parse_bson_array() + { + std::int32_t document_size; + get_number(input_format_t::bson, document_size); + + if (JSON_UNLIKELY(not sax->start_array(std::size_t(-1)))) + { + return false; + } + + if (JSON_UNLIKELY(not parse_bson_element_list(/*is_array*/true))) + { + return false; + } + + return sax->end_array(); + } + + ////////// + // CBOR // + ////////// + + /*! + @param[in] get_char whether a new character should be retrieved from the + input (true, default) or whether the last read + character should be considered instead + + @return whether a valid CBOR value was passed to the SAX parser + */ + bool parse_cbor_internal(const bool get_char = true) + { + switch (get_char ? get() : current) + { + // EOF + case std::char_traits::eof(): + return unexpect_eof(input_format_t::cbor, "value"); + + // Integer 0x00..0x17 (0..23) + case 0x00: + case 0x01: + case 0x02: + case 0x03: + case 0x04: + case 0x05: + case 0x06: + case 0x07: + case 0x08: + case 0x09: + case 0x0A: + case 0x0B: + case 0x0C: + case 0x0D: + case 0x0E: + case 0x0F: + case 0x10: + case 0x11: + case 0x12: + case 0x13: + case 0x14: + case 0x15: + case 0x16: + case 0x17: + return sax->number_unsigned(static_cast(current)); + + case 0x18: // Unsigned integer (one-byte uint8_t follows) + { + std::uint8_t number; + return get_number(input_format_t::cbor, number) and sax->number_unsigned(number); + } + + case 0x19: // Unsigned integer (two-byte uint16_t follows) + { + std::uint16_t number; + return get_number(input_format_t::cbor, number) and sax->number_unsigned(number); + } + + case 0x1A: // Unsigned integer (four-byte uint32_t follows) + { + std::uint32_t number; + return get_number(input_format_t::cbor, number) and sax->number_unsigned(number); + } + + case 0x1B: // Unsigned integer (eight-byte uint64_t follows) + { + std::uint64_t number; + return get_number(input_format_t::cbor, number) and sax->number_unsigned(number); + } + + // Negative integer -1-0x00..-1-0x17 (-1..-24) + case 0x20: + case 0x21: + case 0x22: + case 0x23: + case 0x24: + case 0x25: + case 0x26: + case 0x27: + case 0x28: + case 0x29: + case 0x2A: + case 0x2B: + case 0x2C: + case 0x2D: + case 0x2E: + case 0x2F: + case 0x30: + case 0x31: + case 0x32: + case 0x33: + case 0x34: + case 0x35: + case 0x36: + case 0x37: + return sax->number_integer(static_cast(0x20 - 1 - current)); + + case 0x38: // Negative integer (one-byte uint8_t follows) + { + std::uint8_t number; + return get_number(input_format_t::cbor, number) and sax->number_integer(static_cast(-1) - number); + } + + case 0x39: // Negative integer -1-n (two-byte uint16_t follows) + { + std::uint16_t number; + return get_number(input_format_t::cbor, number) and sax->number_integer(static_cast(-1) - number); + } + + case 0x3A: // Negative integer -1-n (four-byte uint32_t follows) + { + std::uint32_t number; + return get_number(input_format_t::cbor, number) and sax->number_integer(static_cast(-1) - number); + } + + case 0x3B: // Negative integer -1-n (eight-byte uint64_t follows) + { + std::uint64_t number; + return get_number(input_format_t::cbor, number) and sax->number_integer(static_cast(-1) + - static_cast(number)); + } + + // UTF-8 string (0x00..0x17 bytes follow) + case 0x60: + case 0x61: + case 0x62: + case 0x63: + case 0x64: + case 0x65: + case 0x66: + case 0x67: + case 0x68: + case 0x69: + case 0x6A: + case 0x6B: + case 0x6C: + case 0x6D: + case 0x6E: + case 0x6F: + case 0x70: + case 0x71: + case 0x72: + case 0x73: + case 0x74: + case 0x75: + case 0x76: + case 0x77: + case 0x78: // UTF-8 string (one-byte uint8_t for n follows) + case 0x79: // UTF-8 string (two-byte uint16_t for n follow) + case 0x7A: // UTF-8 string (four-byte uint32_t for n follow) + case 0x7B: // UTF-8 string (eight-byte uint64_t for n follow) + case 0x7F: // UTF-8 string (indefinite length) + { + string_t s; + return get_cbor_string(s) and sax->string(s); + } + + // array (0x00..0x17 data items follow) + case 0x80: + case 0x81: + case 0x82: + case 0x83: + case 0x84: + case 0x85: + case 0x86: + case 0x87: + case 0x88: + case 0x89: + case 0x8A: + case 0x8B: + case 0x8C: + case 0x8D: + case 0x8E: + case 0x8F: + case 0x90: + case 0x91: + case 0x92: + case 0x93: + case 0x94: + case 0x95: + case 0x96: + case 0x97: + return get_cbor_array(static_cast(static_cast(current) & 0x1Fu)); + + case 0x98: // array (one-byte uint8_t for n follows) + { + std::uint8_t len; + return get_number(input_format_t::cbor, len) and get_cbor_array(static_cast(len)); + } + + case 0x99: // array (two-byte uint16_t for n follow) + { + std::uint16_t len; + return get_number(input_format_t::cbor, len) and get_cbor_array(static_cast(len)); + } + + case 0x9A: // array (four-byte uint32_t for n follow) + { + std::uint32_t len; + return get_number(input_format_t::cbor, len) and get_cbor_array(static_cast(len)); + } + + case 0x9B: // array (eight-byte uint64_t for n follow) + { + std::uint64_t len; + return get_number(input_format_t::cbor, len) and get_cbor_array(static_cast(len)); + } + + case 0x9F: // array (indefinite length) + return get_cbor_array(std::size_t(-1)); + + // map (0x00..0x17 pairs of data items follow) + case 0xA0: + case 0xA1: + case 0xA2: + case 0xA3: + case 0xA4: + case 0xA5: + case 0xA6: + case 0xA7: + case 0xA8: + case 0xA9: + case 0xAA: + case 0xAB: + case 0xAC: + case 0xAD: + case 0xAE: + case 0xAF: + case 0xB0: + case 0xB1: + case 0xB2: + case 0xB3: + case 0xB4: + case 0xB5: + case 0xB6: + case 0xB7: + return get_cbor_object(static_cast(static_cast(current) & 0x1Fu)); + + case 0xB8: // map (one-byte uint8_t for n follows) + { + std::uint8_t len; + return get_number(input_format_t::cbor, len) and get_cbor_object(static_cast(len)); + } + + case 0xB9: // map (two-byte uint16_t for n follow) + { + std::uint16_t len; + return get_number(input_format_t::cbor, len) and get_cbor_object(static_cast(len)); + } + + case 0xBA: // map (four-byte uint32_t for n follow) + { + std::uint32_t len; + return get_number(input_format_t::cbor, len) and get_cbor_object(static_cast(len)); + } + + case 0xBB: // map (eight-byte uint64_t for n follow) + { + std::uint64_t len; + return get_number(input_format_t::cbor, len) and get_cbor_object(static_cast(len)); + } + + case 0xBF: // map (indefinite length) + return get_cbor_object(std::size_t(-1)); + + case 0xF4: // false + return sax->boolean(false); + + case 0xF5: // true + return sax->boolean(true); + + case 0xF6: // null + return sax->null(); + + case 0xF9: // Half-Precision Float (two-byte IEEE 754) + { + const int byte1_raw = get(); + if (JSON_UNLIKELY(not unexpect_eof(input_format_t::cbor, "number"))) + { + return false; + } + const int byte2_raw = get(); + if (JSON_UNLIKELY(not unexpect_eof(input_format_t::cbor, "number"))) + { + return false; + } + + const auto byte1 = static_cast(byte1_raw); + const auto byte2 = static_cast(byte2_raw); + + // code from RFC 7049, Appendix D, Figure 3: + // As half-precision floating-point numbers were only added + // to IEEE 754 in 2008, today's programming platforms often + // still only have limited support for them. It is very + // easy to include at least decoding support for them even + // without such support. An example of a small decoder for + // half-precision floating-point numbers in the C language + // is shown in Fig. 3. + const auto half = static_cast((byte1 << 8u) + byte2); + const double val = [&half] + { + const int exp = (half >> 10u) & 0x1Fu; + const unsigned int mant = half & 0x3FFu; + assert(0 <= exp and exp <= 32); + assert(0 <= mant and mant <= 1024); + switch (exp) + { + case 0: + return std::ldexp(mant, -24); + case 31: + return (mant == 0) + ? std::numeric_limits::infinity() + : std::numeric_limits::quiet_NaN(); + default: + return std::ldexp(mant + 1024, exp - 25); + } + }(); + return sax->number_float((half & 0x8000u) != 0 + ? static_cast(-val) + : static_cast(val), ""); + } + + case 0xFA: // Single-Precision Float (four-byte IEEE 754) + { + float number; + return get_number(input_format_t::cbor, number) and sax->number_float(static_cast(number), ""); + } + + case 0xFB: // Double-Precision Float (eight-byte IEEE 754) + { + double number; + return get_number(input_format_t::cbor, number) and sax->number_float(static_cast(number), ""); + } + + default: // anything else (0xFF is handled inside the other types) + { + auto last_token = get_token_string(); + return sax->parse_error(chars_read, last_token, parse_error::create(112, chars_read, exception_message(input_format_t::cbor, "invalid byte: 0x" + last_token, "value"))); + } + } + } + + /*! + @brief reads a CBOR string + + This function first reads starting bytes to determine the expected + string length and then copies this number of bytes into a string. + Additionally, CBOR's strings with indefinite lengths are supported. + + @param[out] result created string + + @return whether string creation completed + */ + bool get_cbor_string(string_t& result) + { + if (JSON_UNLIKELY(not unexpect_eof(input_format_t::cbor, "string"))) + { + return false; + } + + switch (current) + { + // UTF-8 string (0x00..0x17 bytes follow) + case 0x60: + case 0x61: + case 0x62: + case 0x63: + case 0x64: + case 0x65: + case 0x66: + case 0x67: + case 0x68: + case 0x69: + case 0x6A: + case 0x6B: + case 0x6C: + case 0x6D: + case 0x6E: + case 0x6F: + case 0x70: + case 0x71: + case 0x72: + case 0x73: + case 0x74: + case 0x75: + case 0x76: + case 0x77: + { + return get_string(input_format_t::cbor, static_cast(current) & 0x1Fu, result); + } + + case 0x78: // UTF-8 string (one-byte uint8_t for n follows) + { + std::uint8_t len; + return get_number(input_format_t::cbor, len) and get_string(input_format_t::cbor, len, result); + } + + case 0x79: // UTF-8 string (two-byte uint16_t for n follow) + { + std::uint16_t len; + return get_number(input_format_t::cbor, len) and get_string(input_format_t::cbor, len, result); + } + + case 0x7A: // UTF-8 string (four-byte uint32_t for n follow) + { + std::uint32_t len; + return get_number(input_format_t::cbor, len) and get_string(input_format_t::cbor, len, result); + } + + case 0x7B: // UTF-8 string (eight-byte uint64_t for n follow) + { + std::uint64_t len; + return get_number(input_format_t::cbor, len) and get_string(input_format_t::cbor, len, result); + } + + case 0x7F: // UTF-8 string (indefinite length) + { + while (get() != 0xFF) + { + string_t chunk; + if (not get_cbor_string(chunk)) + { + return false; + } + result.append(chunk); + } + return true; + } + + default: + { + auto last_token = get_token_string(); + return sax->parse_error(chars_read, last_token, parse_error::create(113, chars_read, exception_message(input_format_t::cbor, "expected length specification (0x60-0x7B) or indefinite string type (0x7F); last byte: 0x" + last_token, "string"))); + } + } + } + + /*! + @param[in] len the length of the array or std::size_t(-1) for an + array of indefinite size + @return whether array creation completed + */ + bool get_cbor_array(const std::size_t len) + { + if (JSON_UNLIKELY(not sax->start_array(len))) + { + return false; + } + + if (len != std::size_t(-1)) + { + for (std::size_t i = 0; i < len; ++i) + { + if (JSON_UNLIKELY(not parse_cbor_internal())) + { + return false; + } + } + } + else + { + while (get() != 0xFF) + { + if (JSON_UNLIKELY(not parse_cbor_internal(false))) + { + return false; + } + } + } + + return sax->end_array(); + } + + /*! + @param[in] len the length of the object or std::size_t(-1) for an + object of indefinite size + @return whether object creation completed + */ + bool get_cbor_object(const std::size_t len) + { + if (JSON_UNLIKELY(not sax->start_object(len))) + { + return false; + } + + string_t key; + if (len != std::size_t(-1)) + { + for (std::size_t i = 0; i < len; ++i) + { + get(); + if (JSON_UNLIKELY(not get_cbor_string(key) or not sax->key(key))) + { + return false; + } + + if (JSON_UNLIKELY(not parse_cbor_internal())) + { + return false; + } + key.clear(); + } + } + else + { + while (get() != 0xFF) + { + if (JSON_UNLIKELY(not get_cbor_string(key) or not sax->key(key))) + { + return false; + } + + if (JSON_UNLIKELY(not parse_cbor_internal())) + { + return false; + } + key.clear(); + } + } + + return sax->end_object(); + } + + ///////////// + // MsgPack // + ///////////// + + /*! + @return whether a valid MessagePack value was passed to the SAX parser + */ + bool parse_msgpack_internal() + { + switch (get()) + { + // EOF + case std::char_traits::eof(): + return unexpect_eof(input_format_t::msgpack, "value"); + + // positive fixint + case 0x00: + case 0x01: + case 0x02: + case 0x03: + case 0x04: + case 0x05: + case 0x06: + case 0x07: + case 0x08: + case 0x09: + case 0x0A: + case 0x0B: + case 0x0C: + case 0x0D: + case 0x0E: + case 0x0F: + case 0x10: + case 0x11: + case 0x12: + case 0x13: + case 0x14: + case 0x15: + case 0x16: + case 0x17: + case 0x18: + case 0x19: + case 0x1A: + case 0x1B: + case 0x1C: + case 0x1D: + case 0x1E: + case 0x1F: + case 0x20: + case 0x21: + case 0x22: + case 0x23: + case 0x24: + case 0x25: + case 0x26: + case 0x27: + case 0x28: + case 0x29: + case 0x2A: + case 0x2B: + case 0x2C: + case 0x2D: + case 0x2E: + case 0x2F: + case 0x30: + case 0x31: + case 0x32: + case 0x33: + case 0x34: + case 0x35: + case 0x36: + case 0x37: + case 0x38: + case 0x39: + case 0x3A: + case 0x3B: + case 0x3C: + case 0x3D: + case 0x3E: + case 0x3F: + case 0x40: + case 0x41: + case 0x42: + case 0x43: + case 0x44: + case 0x45: + case 0x46: + case 0x47: + case 0x48: + case 0x49: + case 0x4A: + case 0x4B: + case 0x4C: + case 0x4D: + case 0x4E: + case 0x4F: + case 0x50: + case 0x51: + case 0x52: + case 0x53: + case 0x54: + case 0x55: + case 0x56: + case 0x57: + case 0x58: + case 0x59: + case 0x5A: + case 0x5B: + case 0x5C: + case 0x5D: + case 0x5E: + case 0x5F: + case 0x60: + case 0x61: + case 0x62: + case 0x63: + case 0x64: + case 0x65: + case 0x66: + case 0x67: + case 0x68: + case 0x69: + case 0x6A: + case 0x6B: + case 0x6C: + case 0x6D: + case 0x6E: + case 0x6F: + case 0x70: + case 0x71: + case 0x72: + case 0x73: + case 0x74: + case 0x75: + case 0x76: + case 0x77: + case 0x78: + case 0x79: + case 0x7A: + case 0x7B: + case 0x7C: + case 0x7D: + case 0x7E: + case 0x7F: + return sax->number_unsigned(static_cast(current)); + + // fixmap + case 0x80: + case 0x81: + case 0x82: + case 0x83: + case 0x84: + case 0x85: + case 0x86: + case 0x87: + case 0x88: + case 0x89: + case 0x8A: + case 0x8B: + case 0x8C: + case 0x8D: + case 0x8E: + case 0x8F: + return get_msgpack_object(static_cast(static_cast(current) & 0x0Fu)); + + // fixarray + case 0x90: + case 0x91: + case 0x92: + case 0x93: + case 0x94: + case 0x95: + case 0x96: + case 0x97: + case 0x98: + case 0x99: + case 0x9A: + case 0x9B: + case 0x9C: + case 0x9D: + case 0x9E: + case 0x9F: + return get_msgpack_array(static_cast(static_cast(current) & 0x0Fu)); + + // fixstr + case 0xA0: + case 0xA1: + case 0xA2: + case 0xA3: + case 0xA4: + case 0xA5: + case 0xA6: + case 0xA7: + case 0xA8: + case 0xA9: + case 0xAA: + case 0xAB: + case 0xAC: + case 0xAD: + case 0xAE: + case 0xAF: + case 0xB0: + case 0xB1: + case 0xB2: + case 0xB3: + case 0xB4: + case 0xB5: + case 0xB6: + case 0xB7: + case 0xB8: + case 0xB9: + case 0xBA: + case 0xBB: + case 0xBC: + case 0xBD: + case 0xBE: + case 0xBF: + { + string_t s; + return get_msgpack_string(s) and sax->string(s); + } + + case 0xC0: // nil + return sax->null(); + + case 0xC2: // false + return sax->boolean(false); + + case 0xC3: // true + return sax->boolean(true); + + case 0xCA: // float 32 + { + float number; + return get_number(input_format_t::msgpack, number) and sax->number_float(static_cast(number), ""); + } + + case 0xCB: // float 64 + { + double number; + return get_number(input_format_t::msgpack, number) and sax->number_float(static_cast(number), ""); + } + + case 0xCC: // uint 8 + { + std::uint8_t number; + return get_number(input_format_t::msgpack, number) and sax->number_unsigned(number); + } + + case 0xCD: // uint 16 + { + std::uint16_t number; + return get_number(input_format_t::msgpack, number) and sax->number_unsigned(number); + } + + case 0xCE: // uint 32 + { + std::uint32_t number; + return get_number(input_format_t::msgpack, number) and sax->number_unsigned(number); + } + + case 0xCF: // uint 64 + { + std::uint64_t number; + return get_number(input_format_t::msgpack, number) and sax->number_unsigned(number); + } + + case 0xD0: // int 8 + { + std::int8_t number; + return get_number(input_format_t::msgpack, number) and sax->number_integer(number); + } + + case 0xD1: // int 16 + { + std::int16_t number; + return get_number(input_format_t::msgpack, number) and sax->number_integer(number); + } + + case 0xD2: // int 32 + { + std::int32_t number; + return get_number(input_format_t::msgpack, number) and sax->number_integer(number); + } + + case 0xD3: // int 64 + { + std::int64_t number; + return get_number(input_format_t::msgpack, number) and sax->number_integer(number); + } + + case 0xD9: // str 8 + case 0xDA: // str 16 + case 0xDB: // str 32 + { + string_t s; + return get_msgpack_string(s) and sax->string(s); + } + + case 0xDC: // array 16 + { + std::uint16_t len; + return get_number(input_format_t::msgpack, len) and get_msgpack_array(static_cast(len)); + } + + case 0xDD: // array 32 + { + std::uint32_t len; + return get_number(input_format_t::msgpack, len) and get_msgpack_array(static_cast(len)); + } + + case 0xDE: // map 16 + { + std::uint16_t len; + return get_number(input_format_t::msgpack, len) and get_msgpack_object(static_cast(len)); + } + + case 0xDF: // map 32 + { + std::uint32_t len; + return get_number(input_format_t::msgpack, len) and get_msgpack_object(static_cast(len)); + } + + // negative fixint + case 0xE0: + case 0xE1: + case 0xE2: + case 0xE3: + case 0xE4: + case 0xE5: + case 0xE6: + case 0xE7: + case 0xE8: + case 0xE9: + case 0xEA: + case 0xEB: + case 0xEC: + case 0xED: + case 0xEE: + case 0xEF: + case 0xF0: + case 0xF1: + case 0xF2: + case 0xF3: + case 0xF4: + case 0xF5: + case 0xF6: + case 0xF7: + case 0xF8: + case 0xF9: + case 0xFA: + case 0xFB: + case 0xFC: + case 0xFD: + case 0xFE: + case 0xFF: + return sax->number_integer(static_cast(current)); + + default: // anything else + { + auto last_token = get_token_string(); + return sax->parse_error(chars_read, last_token, parse_error::create(112, chars_read, exception_message(input_format_t::msgpack, "invalid byte: 0x" + last_token, "value"))); + } + } + } + + /*! + @brief reads a MessagePack string + + This function first reads starting bytes to determine the expected + string length and then copies this number of bytes into a string. + + @param[out] result created string + + @return whether string creation completed + */ + bool get_msgpack_string(string_t& result) + { + if (JSON_UNLIKELY(not unexpect_eof(input_format_t::msgpack, "string"))) + { + return false; + } + + switch (current) + { + // fixstr + case 0xA0: + case 0xA1: + case 0xA2: + case 0xA3: + case 0xA4: + case 0xA5: + case 0xA6: + case 0xA7: + case 0xA8: + case 0xA9: + case 0xAA: + case 0xAB: + case 0xAC: + case 0xAD: + case 0xAE: + case 0xAF: + case 0xB0: + case 0xB1: + case 0xB2: + case 0xB3: + case 0xB4: + case 0xB5: + case 0xB6: + case 0xB7: + case 0xB8: + case 0xB9: + case 0xBA: + case 0xBB: + case 0xBC: + case 0xBD: + case 0xBE: + case 0xBF: + { + return get_string(input_format_t::msgpack, static_cast(current) & 0x1Fu, result); + } + + case 0xD9: // str 8 + { + std::uint8_t len; + return get_number(input_format_t::msgpack, len) and get_string(input_format_t::msgpack, len, result); + } + + case 0xDA: // str 16 + { + std::uint16_t len; + return get_number(input_format_t::msgpack, len) and get_string(input_format_t::msgpack, len, result); + } + + case 0xDB: // str 32 + { + std::uint32_t len; + return get_number(input_format_t::msgpack, len) and get_string(input_format_t::msgpack, len, result); + } + + default: + { + auto last_token = get_token_string(); + return sax->parse_error(chars_read, last_token, parse_error::create(113, chars_read, exception_message(input_format_t::msgpack, "expected length specification (0xA0-0xBF, 0xD9-0xDB); last byte: 0x" + last_token, "string"))); + } + } + } + + /*! + @param[in] len the length of the array + @return whether array creation completed + */ + bool get_msgpack_array(const std::size_t len) + { + if (JSON_UNLIKELY(not sax->start_array(len))) + { + return false; + } + + for (std::size_t i = 0; i < len; ++i) + { + if (JSON_UNLIKELY(not parse_msgpack_internal())) + { + return false; + } + } + + return sax->end_array(); + } + + /*! + @param[in] len the length of the object + @return whether object creation completed + */ + bool get_msgpack_object(const std::size_t len) + { + if (JSON_UNLIKELY(not sax->start_object(len))) + { + return false; + } + + string_t key; + for (std::size_t i = 0; i < len; ++i) + { + get(); + if (JSON_UNLIKELY(not get_msgpack_string(key) or not sax->key(key))) + { + return false; + } + + if (JSON_UNLIKELY(not parse_msgpack_internal())) + { + return false; + } + key.clear(); + } + + return sax->end_object(); + } + + //////////// + // UBJSON // + //////////// + + /*! + @param[in] get_char whether a new character should be retrieved from the + input (true, default) or whether the last read + character should be considered instead + + @return whether a valid UBJSON value was passed to the SAX parser + */ + bool parse_ubjson_internal(const bool get_char = true) + { + return get_ubjson_value(get_char ? get_ignore_noop() : current); + } + + /*! + @brief reads a UBJSON string + + This function is either called after reading the 'S' byte explicitly + indicating a string, or in case of an object key where the 'S' byte can be + left out. + + @param[out] result created string + @param[in] get_char whether a new character should be retrieved from the + input (true, default) or whether the last read + character should be considered instead + + @return whether string creation completed + */ + bool get_ubjson_string(string_t& result, const bool get_char = true) + { + if (get_char) + { + get(); // TODO(niels): may we ignore N here? + } + + if (JSON_UNLIKELY(not unexpect_eof(input_format_t::ubjson, "value"))) + { + return false; + } + + switch (current) + { + case 'U': + { + std::uint8_t len; + return get_number(input_format_t::ubjson, len) and get_string(input_format_t::ubjson, len, result); + } + + case 'i': + { + std::int8_t len; + return get_number(input_format_t::ubjson, len) and get_string(input_format_t::ubjson, len, result); + } + + case 'I': + { + std::int16_t len; + return get_number(input_format_t::ubjson, len) and get_string(input_format_t::ubjson, len, result); + } + + case 'l': + { + std::int32_t len; + return get_number(input_format_t::ubjson, len) and get_string(input_format_t::ubjson, len, result); + } + + case 'L': + { + std::int64_t len; + return get_number(input_format_t::ubjson, len) and get_string(input_format_t::ubjson, len, result); + } + + default: + auto last_token = get_token_string(); + return sax->parse_error(chars_read, last_token, parse_error::create(113, chars_read, exception_message(input_format_t::ubjson, "expected length type specification (U, i, I, l, L); last byte: 0x" + last_token, "string"))); + } + } + + /*! + @param[out] result determined size + @return whether size determination completed + */ + bool get_ubjson_size_value(std::size_t& result) + { + switch (get_ignore_noop()) + { + case 'U': + { + std::uint8_t number; + if (JSON_UNLIKELY(not get_number(input_format_t::ubjson, number))) + { + return false; + } + result = static_cast(number); + return true; + } + + case 'i': + { + std::int8_t number; + if (JSON_UNLIKELY(not get_number(input_format_t::ubjson, number))) + { + return false; + } + result = static_cast(number); + return true; + } + + case 'I': + { + std::int16_t number; + if (JSON_UNLIKELY(not get_number(input_format_t::ubjson, number))) + { + return false; + } + result = static_cast(number); + return true; + } + + case 'l': + { + std::int32_t number; + if (JSON_UNLIKELY(not get_number(input_format_t::ubjson, number))) + { + return false; + } + result = static_cast(number); + return true; + } + + case 'L': + { + std::int64_t number; + if (JSON_UNLIKELY(not get_number(input_format_t::ubjson, number))) + { + return false; + } + result = static_cast(number); + return true; + } + + default: + { + auto last_token = get_token_string(); + return sax->parse_error(chars_read, last_token, parse_error::create(113, chars_read, exception_message(input_format_t::ubjson, "expected length type specification (U, i, I, l, L) after '#'; last byte: 0x" + last_token, "size"))); + } + } + } + + /*! + @brief determine the type and size for a container + + In the optimized UBJSON format, a type and a size can be provided to allow + for a more compact representation. + + @param[out] result pair of the size and the type + + @return whether pair creation completed + */ + bool get_ubjson_size_type(std::pair& result) + { + result.first = string_t::npos; // size + result.second = 0; // type + + get_ignore_noop(); + + if (current == '$') + { + result.second = get(); // must not ignore 'N', because 'N' maybe the type + if (JSON_UNLIKELY(not unexpect_eof(input_format_t::ubjson, "type"))) + { + return false; + } + + get_ignore_noop(); + if (JSON_UNLIKELY(current != '#')) + { + if (JSON_UNLIKELY(not unexpect_eof(input_format_t::ubjson, "value"))) + { + return false; + } + auto last_token = get_token_string(); + return sax->parse_error(chars_read, last_token, parse_error::create(112, chars_read, exception_message(input_format_t::ubjson, "expected '#' after type information; last byte: 0x" + last_token, "size"))); + } + + return get_ubjson_size_value(result.first); + } + + if (current == '#') + { + return get_ubjson_size_value(result.first); + } + + return true; + } + + /*! + @param prefix the previously read or set type prefix + @return whether value creation completed + */ + bool get_ubjson_value(const int prefix) + { + switch (prefix) + { + case std::char_traits::eof(): // EOF + return unexpect_eof(input_format_t::ubjson, "value"); + + case 'T': // true + return sax->boolean(true); + case 'F': // false + return sax->boolean(false); + + case 'Z': // null + return sax->null(); + + case 'U': + { + std::uint8_t number; + return get_number(input_format_t::ubjson, number) and sax->number_unsigned(number); + } + + case 'i': + { + std::int8_t number; + return get_number(input_format_t::ubjson, number) and sax->number_integer(number); + } + + case 'I': + { + std::int16_t number; + return get_number(input_format_t::ubjson, number) and sax->number_integer(number); + } + + case 'l': + { + std::int32_t number; + return get_number(input_format_t::ubjson, number) and sax->number_integer(number); + } + + case 'L': + { + std::int64_t number; + return get_number(input_format_t::ubjson, number) and sax->number_integer(number); + } + + case 'd': + { + float number; + return get_number(input_format_t::ubjson, number) and sax->number_float(static_cast(number), ""); + } + + case 'D': + { + double number; + return get_number(input_format_t::ubjson, number) and sax->number_float(static_cast(number), ""); + } + + case 'C': // char + { + get(); + if (JSON_UNLIKELY(not unexpect_eof(input_format_t::ubjson, "char"))) + { + return false; + } + if (JSON_UNLIKELY(current > 127)) + { + auto last_token = get_token_string(); + return sax->parse_error(chars_read, last_token, parse_error::create(113, chars_read, exception_message(input_format_t::ubjson, "byte after 'C' must be in range 0x00..0x7F; last byte: 0x" + last_token, "char"))); + } + string_t s(1, static_cast(current)); + return sax->string(s); + } + + case 'S': // string + { + string_t s; + return get_ubjson_string(s) and sax->string(s); + } + + case '[': // array + return get_ubjson_array(); + + case '{': // object + return get_ubjson_object(); + + default: // anything else + { + auto last_token = get_token_string(); + return sax->parse_error(chars_read, last_token, parse_error::create(112, chars_read, exception_message(input_format_t::ubjson, "invalid byte: 0x" + last_token, "value"))); + } + } + } + + /*! + @return whether array creation completed + */ + bool get_ubjson_array() + { + std::pair size_and_type; + if (JSON_UNLIKELY(not get_ubjson_size_type(size_and_type))) + { + return false; + } + + if (size_and_type.first != string_t::npos) + { + if (JSON_UNLIKELY(not sax->start_array(size_and_type.first))) + { + return false; + } + + if (size_and_type.second != 0) + { + if (size_and_type.second != 'N') + { + for (std::size_t i = 0; i < size_and_type.first; ++i) + { + if (JSON_UNLIKELY(not get_ubjson_value(size_and_type.second))) + { + return false; + } + } + } + } + else + { + for (std::size_t i = 0; i < size_and_type.first; ++i) + { + if (JSON_UNLIKELY(not parse_ubjson_internal())) + { + return false; + } + } + } + } + else + { + if (JSON_UNLIKELY(not sax->start_array(std::size_t(-1)))) + { + return false; + } + + while (current != ']') + { + if (JSON_UNLIKELY(not parse_ubjson_internal(false))) + { + return false; + } + get_ignore_noop(); + } + } + + return sax->end_array(); + } + + /*! + @return whether object creation completed + */ + bool get_ubjson_object() + { + std::pair size_and_type; + if (JSON_UNLIKELY(not get_ubjson_size_type(size_and_type))) + { + return false; + } + + string_t key; + if (size_and_type.first != string_t::npos) + { + if (JSON_UNLIKELY(not sax->start_object(size_and_type.first))) + { + return false; + } + + if (size_and_type.second != 0) + { + for (std::size_t i = 0; i < size_and_type.first; ++i) + { + if (JSON_UNLIKELY(not get_ubjson_string(key) or not sax->key(key))) + { + return false; + } + if (JSON_UNLIKELY(not get_ubjson_value(size_and_type.second))) + { + return false; + } + key.clear(); + } + } + else + { + for (std::size_t i = 0; i < size_and_type.first; ++i) + { + if (JSON_UNLIKELY(not get_ubjson_string(key) or not sax->key(key))) + { + return false; + } + if (JSON_UNLIKELY(not parse_ubjson_internal())) + { + return false; + } + key.clear(); + } + } + } + else + { + if (JSON_UNLIKELY(not sax->start_object(std::size_t(-1)))) + { + return false; + } + + while (current != '}') + { + if (JSON_UNLIKELY(not get_ubjson_string(key, false) or not sax->key(key))) + { + return false; + } + if (JSON_UNLIKELY(not parse_ubjson_internal())) + { + return false; + } + get_ignore_noop(); + key.clear(); + } + } + + return sax->end_object(); + } + + /////////////////////// + // Utility functions // + /////////////////////// + + /*! + @brief get next character from the input + + This function provides the interface to the used input adapter. It does + not throw in case the input reached EOF, but returns a -'ve valued + `std::char_traits::eof()` in that case. + + @return character read from the input + */ + int get() + { + ++chars_read; + return current = ia->get_character(); + } + + /*! + @return character read from the input after ignoring all 'N' entries + */ + int get_ignore_noop() + { + do + { + get(); + } + while (current == 'N'); + + return current; + } + + /* + @brief read a number from the input + + @tparam NumberType the type of the number + @param[in] format the current format (for diagnostics) + @param[out] result number of type @a NumberType + + @return whether conversion completed + + @note This function needs to respect the system's endianess, because + bytes in CBOR, MessagePack, and UBJSON are stored in network order + (big endian) and therefore need reordering on little endian systems. + */ + template + bool get_number(const input_format_t format, NumberType& result) + { + // step 1: read input into array with system's byte order + std::array vec; + for (std::size_t i = 0; i < sizeof(NumberType); ++i) + { + get(); + if (JSON_UNLIKELY(not unexpect_eof(format, "number"))) + { + return false; + } + + // reverse byte order prior to conversion if necessary + if (is_little_endian != InputIsLittleEndian) + { + vec[sizeof(NumberType) - i - 1] = static_cast(current); + } + else + { + vec[i] = static_cast(current); // LCOV_EXCL_LINE + } + } + + // step 2: convert array into number of type T and return + std::memcpy(&result, vec.data(), sizeof(NumberType)); + return true; + } + + /*! + @brief create a string by reading characters from the input + + @tparam NumberType the type of the number + @param[in] format the current format (for diagnostics) + @param[in] len number of characters to read + @param[out] result string created by reading @a len bytes + + @return whether string creation completed + + @note We can not reserve @a len bytes for the result, because @a len + may be too large. Usually, @ref unexpect_eof() detects the end of + the input before we run out of string memory. + */ + template + bool get_string(const input_format_t format, + const NumberType len, + string_t& result) + { + bool success = true; + std::generate_n(std::back_inserter(result), len, [this, &success, &format]() + { + get(); + if (JSON_UNLIKELY(not unexpect_eof(format, "string"))) + { + success = false; + } + return static_cast(current); + }); + return success; + } + + /*! + @param[in] format the current format (for diagnostics) + @param[in] context further context information (for diagnostics) + @return whether the last read character is not EOF + */ + bool unexpect_eof(const input_format_t format, const char* context) const + { + if (JSON_UNLIKELY(current == std::char_traits::eof())) + { + return sax->parse_error(chars_read, "", + parse_error::create(110, chars_read, exception_message(format, "unexpected end of input", context))); + } + return true; + } + + /*! + @return a string representation of the last read byte + */ + std::string get_token_string() const + { + std::array cr{{}}; + (std::snprintf)(cr.data(), cr.size(), "%.2hhX", static_cast(current)); + return std::string{cr.data()}; + } + + /*! + @param[in] format the current format + @param[in] detail a detailed error message + @param[in] context further contect information + @return a message string to use in the parse_error exceptions + */ + std::string exception_message(const input_format_t format, + const std::string& detail, + const std::string& context) const + { + std::string error_msg = "syntax error while parsing "; + + switch (format) + { + case input_format_t::cbor: + error_msg += "CBOR"; + break; + + case input_format_t::msgpack: + error_msg += "MessagePack"; + break; + + case input_format_t::ubjson: + error_msg += "UBJSON"; + break; + + case input_format_t::bson: + error_msg += "BSON"; + break; + + default: // LCOV_EXCL_LINE + assert(false); // LCOV_EXCL_LINE + } + + return error_msg + " " + context + ": " + detail; + } + + private: + /// input adapter + input_adapter_t ia = nullptr; + + /// the current character + int current = std::char_traits::eof(); + + /// the number of characters read + std::size_t chars_read = 0; + + /// whether we can assume little endianess + const bool is_little_endian = little_endianess(); + + /// the SAX parser + json_sax_t* sax = nullptr; +}; +} // namespace detail +} // namespace nlohmann + +// #include + // #include +#include // array #include // localeconv #include // size_t -#include // strtof, strtod, strtold, strtoll, strtoull #include // snprintf +#include // strtof, strtod, strtold, strtoll, strtoull #include // initializer_list #include // char_traits, string +#include // move #include // vector -// #include - // #include // #include +// #include + namespace nlohmann { @@ -2715,22 +5651,22 @@ class lexer assert(current == 'u'); int codepoint = 0; - const auto factors = { 12, 8, 4, 0 }; + const auto factors = { 12u, 8u, 4u, 0u }; for (const auto factor : factors) { get(); if (current >= '0' and current <= '9') { - codepoint += ((current - 0x30) << factor); + codepoint += static_cast((static_cast(current) - 0x30u) << factor); } else if (current >= 'A' and current <= 'F') { - codepoint += ((current - 0x37) << factor); + codepoint += static_cast((static_cast(current) - 0x37u) << factor); } else if (current >= 'a' and current <= 'f') { - codepoint += ((current - 0x57) << factor); + codepoint += static_cast((static_cast(current) - 0x57u) << factor); } else { @@ -2888,15 +5824,15 @@ class lexer if (JSON_LIKELY(0xDC00 <= codepoint2 and codepoint2 <= 0xDFFF)) { // overwrite codepoint - codepoint = - // high surrogate occupies the most significant 22 bits - (codepoint1 << 10) - // low surrogate occupies the least significant 15 bits - + codepoint2 - // there is still the 0xD800, 0xDC00 and 0x10000 noise - // in the result so we have to subtract with: - // (0xD800 << 10) + DC00 - 0x10000 = 0x35FDC00 - - 0x35FDC00; + codepoint = static_cast( + // high surrogate occupies the most significant 22 bits + (static_cast(codepoint1) << 10u) + // low surrogate occupies the least significant 15 bits + + static_cast(codepoint2) + // there is still the 0xD800, 0xDC00 and 0x10000 noise + // in the result so we have to subtract with: + // (0xD800 << 10) + DC00 - 0x10000 = 0x35FDC00 + - 0x35FDC00u); } else { @@ -2931,23 +5867,23 @@ class lexer else if (codepoint <= 0x7FF) { // 2-byte characters: 110xxxxx 10xxxxxx - add(0xC0 | (codepoint >> 6)); - add(0x80 | (codepoint & 0x3F)); + add(static_cast(0xC0u | (static_cast(codepoint) >> 6u))); + add(static_cast(0x80u | (static_cast(codepoint) & 0x3Fu))); } else if (codepoint <= 0xFFFF) { // 3-byte characters: 1110xxxx 10xxxxxx 10xxxxxx - add(0xE0 | (codepoint >> 12)); - add(0x80 | ((codepoint >> 6) & 0x3F)); - add(0x80 | (codepoint & 0x3F)); + add(static_cast(0xE0u | (static_cast(codepoint) >> 12u))); + add(static_cast(0x80u | ((static_cast(codepoint) >> 6u) & 0x3Fu))); + add(static_cast(0x80u | (static_cast(codepoint) & 0x3Fu))); } else { // 4-byte characters: 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx - add(0xF0 | (codepoint >> 18)); - add(0x80 | ((codepoint >> 12) & 0x3F)); - add(0x80 | ((codepoint >> 6) & 0x3F)); - add(0x80 | (codepoint & 0x3F)); + add(static_cast(0xF0u | (static_cast(codepoint) >> 18u))); + add(static_cast(0x80u | ((static_cast(codepoint) >> 12u) & 0x3Fu))); + add(static_cast(0x80u | ((static_cast(codepoint) >> 6u) & 0x3Fu))); + add(static_cast(0x80u | (static_cast(codepoint) & 0x3Fu))); } break; @@ -3473,13 +6409,9 @@ class lexer goto scan_number_any1; } - // LCOV_EXCL_START - default: - { - // all other characters are rejected outside scan_number() - assert(false); - } - // LCOV_EXCL_STOP + // all other characters are rejected outside scan_number() + default: // LCOV_EXCL_LINE + assert(false); // LCOV_EXCL_LINE } scan_number_minus: @@ -3829,7 +6761,7 @@ scan_number_done: if (current == '\n') { ++position.lines_read; - ++position.chars_read_current_line = 0; + position.chars_read_current_line = 0; } return current; @@ -3864,7 +6796,7 @@ scan_number_done: if (JSON_LIKELY(current != std::char_traits::eof())) { - assert(token_string.size() != 0); + assert(not token_string.empty()); token_string.pop_back(); } } @@ -3926,9 +6858,9 @@ scan_number_done: if ('\x00' <= c and c <= '\x1F') { // escape control characters - char cs[9]; - (std::snprintf)(cs, 9, "", static_cast(c)); - result += cs; + std::array cs{{}}; + (std::snprintf)(cs.data(), cs.size(), "", static_cast(c)); + result += cs.data(); } else { @@ -4050,7 +6982,7 @@ scan_number_done: bool next_unget = false; /// the start position of the current token - position_t position; + position_t position {}; /// raw input token string (for error messages) std::vector token_string {}; @@ -4081,865 +7013,20 @@ scan_number_done: #include // function #include // string #include // move +#include // vector // #include -// #include - -// #include - - -#include // size_t -#include // declval - -// #include - -// #include - - -namespace nlohmann -{ -namespace detail -{ -template -using null_function_t = decltype(std::declval().null()); - -template -using boolean_function_t = - decltype(std::declval().boolean(std::declval())); - -template -using number_integer_function_t = - decltype(std::declval().number_integer(std::declval())); - -template -using number_unsigned_function_t = - decltype(std::declval().number_unsigned(std::declval())); - -template -using number_float_function_t = decltype(std::declval().number_float( - std::declval(), std::declval())); - -template -using string_function_t = - decltype(std::declval().string(std::declval())); - -template -using start_object_function_t = - decltype(std::declval().start_object(std::declval())); - -template -using key_function_t = - decltype(std::declval().key(std::declval())); - -template -using end_object_function_t = decltype(std::declval().end_object()); - -template -using start_array_function_t = - decltype(std::declval().start_array(std::declval())); - -template -using end_array_function_t = decltype(std::declval().end_array()); - -template -using parse_error_function_t = decltype(std::declval().parse_error( - std::declval(), std::declval(), - std::declval())); - -template -struct is_sax -{ - private: - static_assert(is_basic_json::value, - "BasicJsonType must be of type basic_json<...>"); - - using number_integer_t = typename BasicJsonType::number_integer_t; - using number_unsigned_t = typename BasicJsonType::number_unsigned_t; - using number_float_t = typename BasicJsonType::number_float_t; - using string_t = typename BasicJsonType::string_t; - using exception_t = typename BasicJsonType::exception; - - public: - static constexpr bool value = - is_detected_exact::value && - is_detected_exact::value && - is_detected_exact::value && - is_detected_exact::value && - is_detected_exact::value && - is_detected_exact::value && - is_detected_exact::value && - is_detected_exact::value && - is_detected_exact::value && - is_detected_exact::value && - is_detected_exact::value && - is_detected_exact::value; -}; - -template -struct is_sax_static_asserts -{ - private: - static_assert(is_basic_json::value, - "BasicJsonType must be of type basic_json<...>"); - - using number_integer_t = typename BasicJsonType::number_integer_t; - using number_unsigned_t = typename BasicJsonType::number_unsigned_t; - using number_float_t = typename BasicJsonType::number_float_t; - using string_t = typename BasicJsonType::string_t; - using exception_t = typename BasicJsonType::exception; - - public: - static_assert(is_detected_exact::value, - "Missing/invalid function: bool null()"); - static_assert(is_detected_exact::value, - "Missing/invalid function: bool boolean(bool)"); - static_assert(is_detected_exact::value, - "Missing/invalid function: bool boolean(bool)"); - static_assert( - is_detected_exact::value, - "Missing/invalid function: bool number_integer(number_integer_t)"); - static_assert( - is_detected_exact::value, - "Missing/invalid function: bool number_unsigned(number_unsigned_t)"); - static_assert(is_detected_exact::value, - "Missing/invalid function: bool number_float(number_float_t, const string_t&)"); - static_assert( - is_detected_exact::value, - "Missing/invalid function: bool string(string_t&)"); - static_assert(is_detected_exact::value, - "Missing/invalid function: bool start_object(std::size_t)"); - static_assert(is_detected_exact::value, - "Missing/invalid function: bool key(string_t&)"); - static_assert(is_detected_exact::value, - "Missing/invalid function: bool end_object()"); - static_assert(is_detected_exact::value, - "Missing/invalid function: bool start_array(std::size_t)"); - static_assert(is_detected_exact::value, - "Missing/invalid function: bool end_array()"); - static_assert( - is_detected_exact::value, - "Missing/invalid function: bool parse_error(std::size_t, const " - "std::string&, const exception&)"); -}; -} // namespace detail -} // namespace nlohmann - // #include // #include - -#include -#include -#include - -// #include - -// #include - - -namespace nlohmann -{ - -/*! -@brief SAX interface - -This class describes the SAX interface used by @ref nlohmann::json::sax_parse. -Each function is called in different situations while the input is parsed. The -boolean return value informs the parser whether to continue processing the -input. -*/ -template -struct json_sax -{ - /// type for (signed) integers - using number_integer_t = typename BasicJsonType::number_integer_t; - /// type for unsigned integers - using number_unsigned_t = typename BasicJsonType::number_unsigned_t; - /// type for floating-point numbers - using number_float_t = typename BasicJsonType::number_float_t; - /// type for strings - using string_t = typename BasicJsonType::string_t; - - /*! - @brief a null value was read - @return whether parsing should proceed - */ - virtual bool null() = 0; - - /*! - @brief a boolean value was read - @param[in] val boolean value - @return whether parsing should proceed - */ - virtual bool boolean(bool val) = 0; - - /*! - @brief an integer number was read - @param[in] val integer value - @return whether parsing should proceed - */ - virtual bool number_integer(number_integer_t val) = 0; - - /*! - @brief an unsigned integer number was read - @param[in] val unsigned integer value - @return whether parsing should proceed - */ - virtual bool number_unsigned(number_unsigned_t val) = 0; - - /*! - @brief an floating-point number was read - @param[in] val floating-point value - @param[in] s raw token value - @return whether parsing should proceed - */ - virtual bool number_float(number_float_t val, const string_t& s) = 0; - - /*! - @brief a string was read - @param[in] val string value - @return whether parsing should proceed - @note It is safe to move the passed string. - */ - virtual bool string(string_t& val) = 0; - - /*! - @brief the beginning of an object was read - @param[in] elements number of object elements or -1 if unknown - @return whether parsing should proceed - @note binary formats may report the number of elements - */ - virtual bool start_object(std::size_t elements) = 0; - - /*! - @brief an object key was read - @param[in] val object key - @return whether parsing should proceed - @note It is safe to move the passed string. - */ - virtual bool key(string_t& val) = 0; - - /*! - @brief the end of an object was read - @return whether parsing should proceed - */ - virtual bool end_object() = 0; - - /*! - @brief the beginning of an array was read - @param[in] elements number of array elements or -1 if unknown - @return whether parsing should proceed - @note binary formats may report the number of elements - */ - virtual bool start_array(std::size_t elements) = 0; - - /*! - @brief the end of an array was read - @return whether parsing should proceed - */ - virtual bool end_array() = 0; - - /*! - @brief a parse error occurred - @param[in] position the position in the input where the error occurs - @param[in] last_token the last read token - @param[in] ex an exception object describing the error - @return whether parsing should proceed (must return false) - */ - virtual bool parse_error(std::size_t position, - const std::string& last_token, - const detail::exception& ex) = 0; - - virtual ~json_sax() = default; -}; - - -namespace detail -{ -/*! -@brief SAX implementation to create a JSON value from SAX events - -This class implements the @ref json_sax interface and processes the SAX events -to create a JSON value which makes it basically a DOM parser. The structure or -hierarchy of the JSON value is managed by the stack `ref_stack` which contains -a pointer to the respective array or object for each recursion depth. - -After successful parsing, the value that is passed by reference to the -constructor contains the parsed value. - -@tparam BasicJsonType the JSON type -*/ -template -class json_sax_dom_parser -{ - public: - using number_integer_t = typename BasicJsonType::number_integer_t; - using number_unsigned_t = typename BasicJsonType::number_unsigned_t; - using number_float_t = typename BasicJsonType::number_float_t; - using string_t = typename BasicJsonType::string_t; - - /*! - @param[in, out] r reference to a JSON value that is manipulated while - parsing - @param[in] allow_exceptions_ whether parse errors yield exceptions - */ - explicit json_sax_dom_parser(BasicJsonType& r, const bool allow_exceptions_ = true) - : root(r), allow_exceptions(allow_exceptions_) - {} - - bool null() - { - handle_value(nullptr); - return true; - } - - bool boolean(bool val) - { - handle_value(val); - return true; - } - - bool number_integer(number_integer_t val) - { - handle_value(val); - return true; - } - - bool number_unsigned(number_unsigned_t val) - { - handle_value(val); - return true; - } - - bool number_float(number_float_t val, const string_t& /*unused*/) - { - handle_value(val); - return true; - } - - bool string(string_t& val) - { - handle_value(val); - return true; - } - - bool start_object(std::size_t len) - { - ref_stack.push_back(handle_value(BasicJsonType::value_t::object)); - - if (JSON_UNLIKELY(len != std::size_t(-1) and len > ref_stack.back()->max_size())) - { - JSON_THROW(out_of_range::create(408, - "excessive object size: " + std::to_string(len))); - } - - return true; - } - - bool key(string_t& val) - { - // add null at given key and store the reference for later - object_element = &(ref_stack.back()->m_value.object->operator[](val)); - return true; - } - - bool end_object() - { - ref_stack.pop_back(); - return true; - } - - bool start_array(std::size_t len) - { - ref_stack.push_back(handle_value(BasicJsonType::value_t::array)); - - if (JSON_UNLIKELY(len != std::size_t(-1) and len > ref_stack.back()->max_size())) - { - JSON_THROW(out_of_range::create(408, - "excessive array size: " + std::to_string(len))); - } - - return true; - } - - bool end_array() - { - ref_stack.pop_back(); - return true; - } - - bool parse_error(std::size_t /*unused*/, const std::string& /*unused*/, - const detail::exception& ex) - { - errored = true; - if (allow_exceptions) - { - // determine the proper exception type from the id - switch ((ex.id / 100) % 100) - { - case 1: - JSON_THROW(*reinterpret_cast(&ex)); - case 4: - JSON_THROW(*reinterpret_cast(&ex)); - // LCOV_EXCL_START - case 2: - JSON_THROW(*reinterpret_cast(&ex)); - case 3: - JSON_THROW(*reinterpret_cast(&ex)); - case 5: - JSON_THROW(*reinterpret_cast(&ex)); - default: - assert(false); - // LCOV_EXCL_STOP - } - } - return false; - } - - constexpr bool is_errored() const - { - return errored; - } - - private: - /*! - @invariant If the ref stack is empty, then the passed value will be the new - root. - @invariant If the ref stack contains a value, then it is an array or an - object to which we can add elements - */ - template - BasicJsonType* handle_value(Value&& v) - { - if (ref_stack.empty()) - { - root = BasicJsonType(std::forward(v)); - return &root; - } - - assert(ref_stack.back()->is_array() or ref_stack.back()->is_object()); - - if (ref_stack.back()->is_array()) - { - ref_stack.back()->m_value.array->emplace_back(std::forward(v)); - return &(ref_stack.back()->m_value.array->back()); - } - else - { - assert(object_element); - *object_element = BasicJsonType(std::forward(v)); - return object_element; - } - } - - /// the parsed JSON value - BasicJsonType& root; - /// stack to model hierarchy of values - std::vector ref_stack; - /// helper to hold the reference for the next object element - BasicJsonType* object_element = nullptr; - /// whether a syntax error occurred - bool errored = false; - /// whether to throw exceptions in case of errors - const bool allow_exceptions = true; -}; - -template -class json_sax_dom_callback_parser -{ - public: - using number_integer_t = typename BasicJsonType::number_integer_t; - using number_unsigned_t = typename BasicJsonType::number_unsigned_t; - using number_float_t = typename BasicJsonType::number_float_t; - using string_t = typename BasicJsonType::string_t; - using parser_callback_t = typename BasicJsonType::parser_callback_t; - using parse_event_t = typename BasicJsonType::parse_event_t; - - json_sax_dom_callback_parser(BasicJsonType& r, - const parser_callback_t cb, - const bool allow_exceptions_ = true) - : root(r), callback(cb), allow_exceptions(allow_exceptions_) - { - keep_stack.push_back(true); - } - - bool null() - { - handle_value(nullptr); - return true; - } - - bool boolean(bool val) - { - handle_value(val); - return true; - } - - bool number_integer(number_integer_t val) - { - handle_value(val); - return true; - } - - bool number_unsigned(number_unsigned_t val) - { - handle_value(val); - return true; - } - - bool number_float(number_float_t val, const string_t& /*unused*/) - { - handle_value(val); - return true; - } - - bool string(string_t& val) - { - handle_value(val); - return true; - } - - bool start_object(std::size_t len) - { - // check callback for object start - const bool keep = callback(static_cast(ref_stack.size()), parse_event_t::object_start, discarded); - keep_stack.push_back(keep); - - auto val = handle_value(BasicJsonType::value_t::object, true); - ref_stack.push_back(val.second); - - // check object limit - if (ref_stack.back()) - { - if (JSON_UNLIKELY(len != std::size_t(-1) and len > ref_stack.back()->max_size())) - { - JSON_THROW(out_of_range::create(408, - "excessive object size: " + std::to_string(len))); - } - } - - return true; - } - - bool key(string_t& val) - { - BasicJsonType k = BasicJsonType(val); - - // check callback for key - const bool keep = callback(static_cast(ref_stack.size()), parse_event_t::key, k); - key_keep_stack.push_back(keep); - - // add discarded value at given key and store the reference for later - if (keep and ref_stack.back()) - { - object_element = &(ref_stack.back()->m_value.object->operator[](val) = discarded); - } - - return true; - } - - bool end_object() - { - if (ref_stack.back()) - { - if (not callback(static_cast(ref_stack.size()) - 1, parse_event_t::object_end, *ref_stack.back())) - { - // discard object - *ref_stack.back() = discarded; - } - } - - assert(not ref_stack.empty()); - assert(not keep_stack.empty()); - ref_stack.pop_back(); - keep_stack.pop_back(); - - if (not ref_stack.empty() and ref_stack.back()) - { - // remove discarded value - if (ref_stack.back()->is_object()) - { - for (auto it = ref_stack.back()->begin(); it != ref_stack.back()->end(); ++it) - { - if (it->is_discarded()) - { - ref_stack.back()->erase(it); - break; - } - } - } - } - - return true; - } - - bool start_array(std::size_t len) - { - const bool keep = callback(static_cast(ref_stack.size()), parse_event_t::array_start, discarded); - keep_stack.push_back(keep); - - auto val = handle_value(BasicJsonType::value_t::array, true); - ref_stack.push_back(val.second); - - // check array limit - if (ref_stack.back()) - { - if (JSON_UNLIKELY(len != std::size_t(-1) and len > ref_stack.back()->max_size())) - { - JSON_THROW(out_of_range::create(408, - "excessive array size: " + std::to_string(len))); - } - } - - return true; - } - - bool end_array() - { - bool keep = true; - - if (ref_stack.back()) - { - keep = callback(static_cast(ref_stack.size()) - 1, parse_event_t::array_end, *ref_stack.back()); - if (not keep) - { - // discard array - *ref_stack.back() = discarded; - } - } - - assert(not ref_stack.empty()); - assert(not keep_stack.empty()); - ref_stack.pop_back(); - keep_stack.pop_back(); - - // remove discarded value - if (not keep and not ref_stack.empty()) - { - if (ref_stack.back()->is_array()) - { - ref_stack.back()->m_value.array->pop_back(); - } - } - - return true; - } - - bool parse_error(std::size_t /*unused*/, const std::string& /*unused*/, - const detail::exception& ex) - { - errored = true; - if (allow_exceptions) - { - // determine the proper exception type from the id - switch ((ex.id / 100) % 100) - { - case 1: - JSON_THROW(*reinterpret_cast(&ex)); - case 4: - JSON_THROW(*reinterpret_cast(&ex)); - // LCOV_EXCL_START - case 2: - JSON_THROW(*reinterpret_cast(&ex)); - case 3: - JSON_THROW(*reinterpret_cast(&ex)); - case 5: - JSON_THROW(*reinterpret_cast(&ex)); - default: - assert(false); - // LCOV_EXCL_STOP - } - } - return false; - } - - constexpr bool is_errored() const - { - return errored; - } - - private: - /*! - @param[in] v value to add to the JSON value we build during parsing - @param[in] skip_callback whether we should skip calling the callback - function; this is required after start_array() and - start_object() SAX events, because otherwise we would call the - callback function with an empty array or object, respectively. - - @invariant If the ref stack is empty, then the passed value will be the new - root. - @invariant If the ref stack contains a value, then it is an array or an - object to which we can add elements - - @return pair of boolean (whether value should be kept) and pointer (to the - passed value in the ref_stack hierarchy; nullptr if not kept) - */ - template - std::pair handle_value(Value&& v, const bool skip_callback = false) - { - assert(not keep_stack.empty()); - - // do not handle this value if we know it would be added to a discarded - // container - if (not keep_stack.back()) - { - return {false, nullptr}; - } - - // create value - auto value = BasicJsonType(std::forward(v)); - - // check callback - const bool keep = skip_callback or callback(static_cast(ref_stack.size()), parse_event_t::value, value); - - // do not handle this value if we just learnt it shall be discarded - if (not keep) - { - return {false, nullptr}; - } - - if (ref_stack.empty()) - { - root = std::move(value); - return {true, &root}; - } - - // skip this value if we already decided to skip the parent - // (https://github.com/nlohmann/json/issues/971#issuecomment-413678360) - if (not ref_stack.back()) - { - return {false, nullptr}; - } - - // we now only expect arrays and objects - assert(ref_stack.back()->is_array() or ref_stack.back()->is_object()); - - if (ref_stack.back()->is_array()) - { - ref_stack.back()->m_value.array->push_back(std::move(value)); - return {true, &(ref_stack.back()->m_value.array->back())}; - } - else - { - // check if we should store an element for the current key - assert(not key_keep_stack.empty()); - const bool store_element = key_keep_stack.back(); - key_keep_stack.pop_back(); - - if (not store_element) - { - return {false, nullptr}; - } - - assert(object_element); - *object_element = std::move(value); - return {true, object_element}; - } - } - - /// the parsed JSON value - BasicJsonType& root; - /// stack to model hierarchy of values - std::vector ref_stack; - /// stack to manage which values to keep - std::vector keep_stack; - /// stack to manage which object keys to keep - std::vector key_keep_stack; - /// helper to hold the reference for the next object element - BasicJsonType* object_element = nullptr; - /// whether a syntax error occurred - bool errored = false; - /// callback function - const parser_callback_t callback = nullptr; - /// whether to throw exceptions in case of errors - const bool allow_exceptions = true; - /// a discarded value for the callback - BasicJsonType discarded = BasicJsonType::value_t::discarded; -}; - -template -class json_sax_acceptor -{ - public: - using number_integer_t = typename BasicJsonType::number_integer_t; - using number_unsigned_t = typename BasicJsonType::number_unsigned_t; - using number_float_t = typename BasicJsonType::number_float_t; - using string_t = typename BasicJsonType::string_t; - - bool null() - { - return true; - } - - bool boolean(bool /*unused*/) - { - return true; - } - - bool number_integer(number_integer_t /*unused*/) - { - return true; - } - - bool number_unsigned(number_unsigned_t /*unused*/) - { - return true; - } - - bool number_float(number_float_t /*unused*/, const string_t& /*unused*/) - { - return true; - } - - bool string(string_t& /*unused*/) - { - return true; - } - - bool start_object(std::size_t /*unused*/ = std::size_t(-1)) - { - return true; - } - - bool key(string_t& /*unused*/) - { - return true; - } - - bool end_object() - { - return true; - } - - bool start_array(std::size_t /*unused*/ = std::size_t(-1)) - { - return true; - } - - bool end_array() - { - return true; - } - - bool parse_error(std::size_t /*unused*/, const std::string& /*unused*/, const detail::exception& /*unused*/) - { - return false; - } -}; -} // namespace detail - -} // namespace nlohmann - // #include +// #include + +// #include + // #include @@ -5189,14 +7276,13 @@ class parser m_lexer.get_token_string(), out_of_range::create(406, "number overflow parsing '" + m_lexer.get_token_string() + "'")); } - else + + if (JSON_UNLIKELY(not sax->number_float(res, m_lexer.get_string()))) { - if (JSON_UNLIKELY(not sax->number_float(res, m_lexer.get_string()))) - { - return false; - } - break; + return false; } + + break; } case token_type::literal_false: @@ -5282,103 +7368,95 @@ class parser // empty stack: we reached the end of the hierarchy: done return true; } - else + + if (states.back()) // array { - if (states.back()) // array + // comma -> next value + if (get_token() == token_type::value_separator) { - // comma -> next value - if (get_token() == token_type::value_separator) + // parse a new value + get_token(); + continue; + } + + // closing ] + if (JSON_LIKELY(last_token == token_type::end_array)) + { + if (JSON_UNLIKELY(not sax->end_array())) { - // parse a new value - get_token(); - continue; + return false; } - // closing ] - if (JSON_LIKELY(last_token == token_type::end_array)) - { - if (JSON_UNLIKELY(not sax->end_array())) - { - return false; - } + // We are done with this array. Before we can parse a + // new value, we need to evaluate the new state first. + // By setting skip_to_state_evaluation to false, we + // are effectively jumping to the beginning of this if. + assert(not states.empty()); + states.pop_back(); + skip_to_state_evaluation = true; + continue; + } - // We are done with this array. Before we can parse a - // new value, we need to evaluate the new state first. - // By setting skip_to_state_evaluation to false, we - // are effectively jumping to the beginning of this if. - assert(not states.empty()); - states.pop_back(); - skip_to_state_evaluation = true; - continue; - } - else + return sax->parse_error(m_lexer.get_position(), + m_lexer.get_token_string(), + parse_error::create(101, m_lexer.get_position(), + exception_message(token_type::end_array, "array"))); + } + else // object + { + // comma -> next value + if (get_token() == token_type::value_separator) + { + // parse key + if (JSON_UNLIKELY(get_token() != token_type::value_string)) { return sax->parse_error(m_lexer.get_position(), m_lexer.get_token_string(), parse_error::create(101, m_lexer.get_position(), - exception_message(token_type::end_array, "array"))); + exception_message(token_type::value_string, "object key"))); } - } - else // object - { - // comma -> next value - if (get_token() == token_type::value_separator) + + if (JSON_UNLIKELY(not sax->key(m_lexer.get_string()))) { - // parse key - if (JSON_UNLIKELY(get_token() != token_type::value_string)) - { - return sax->parse_error(m_lexer.get_position(), - m_lexer.get_token_string(), - parse_error::create(101, m_lexer.get_position(), - exception_message(token_type::value_string, "object key"))); - } - else - { - if (JSON_UNLIKELY(not sax->key(m_lexer.get_string()))) - { - return false; - } - } - - // parse separator (:) - if (JSON_UNLIKELY(get_token() != token_type::name_separator)) - { - return sax->parse_error(m_lexer.get_position(), - m_lexer.get_token_string(), - parse_error::create(101, m_lexer.get_position(), - exception_message(token_type::name_separator, "object separator"))); - } - - // parse values - get_token(); - continue; + return false; } - // closing } - if (JSON_LIKELY(last_token == token_type::end_object)) - { - if (JSON_UNLIKELY(not sax->end_object())) - { - return false; - } - - // We are done with this object. Before we can parse a - // new value, we need to evaluate the new state first. - // By setting skip_to_state_evaluation to false, we - // are effectively jumping to the beginning of this if. - assert(not states.empty()); - states.pop_back(); - skip_to_state_evaluation = true; - continue; - } - else + // parse separator (:) + if (JSON_UNLIKELY(get_token() != token_type::name_separator)) { return sax->parse_error(m_lexer.get_position(), m_lexer.get_token_string(), parse_error::create(101, m_lexer.get_position(), - exception_message(token_type::end_object, "object"))); + exception_message(token_type::name_separator, "object separator"))); } + + // parse values + get_token(); + continue; } + + // closing } + if (JSON_LIKELY(last_token == token_type::end_object)) + { + if (JSON_UNLIKELY(not sax->end_object())) + { + return false; + } + + // We are done with this object. Before we can parse a + // new value, we need to evaluate the new state first. + // By setting skip_to_state_evaluation to false, we + // are effectively jumping to the beginning of this if. + assert(not states.empty()); + states.pop_back(); + skip_to_state_evaluation = true; + continue; + } + + return sax->parse_error(m_lexer.get_position(), + m_lexer.get_token_string(), + parse_error::create(101, m_lexer.get_position(), + exception_message(token_type::end_object, "object"))); } } } @@ -5386,7 +7464,7 @@ class parser /// get next token from lexer token_type get_token() { - return (last_token = m_lexer.scan()); + return last_token = m_lexer.scan(); } std::string exception_message(const token_type expected, const std::string& context) @@ -5431,6 +7509,9 @@ class parser } // namespace detail } // namespace nlohmann +// #include + + // #include @@ -5553,11 +7634,6 @@ class primitive_iterator_t } // namespace detail } // namespace nlohmann -// #include - - -// #include - namespace nlohmann { @@ -5598,6 +7674,8 @@ template struct internal_iterator // #include +// #include + // #include @@ -6196,10 +8274,11 @@ class iter_impl /// associated JSON instance pointer m_object = nullptr; /// the actual iterator of the associated instance - internal_iterator::type> m_it; + internal_iterator::type> m_it {}; }; } // namespace detail } // namespace nlohmann + // #include // #include @@ -6323,5390 +8402,22 @@ class json_reverse_iterator : public std::reverse_iterator } // namespace detail } // namespace nlohmann -// #include - - -#include // copy -#include // size_t -#include // streamsize -#include // back_inserter -#include // shared_ptr, make_shared -#include // basic_ostream -#include // basic_string -#include // vector - -namespace nlohmann -{ -namespace detail -{ -/// abstract output adapter interface -template struct output_adapter_protocol -{ - virtual void write_character(CharType c) = 0; - virtual void write_characters(const CharType* s, std::size_t length) = 0; - virtual ~output_adapter_protocol() = default; -}; - -/// a type to simplify interfaces -template -using output_adapter_t = std::shared_ptr>; - -/// output adapter for byte vectors -template -class output_vector_adapter : public output_adapter_protocol -{ - public: - explicit output_vector_adapter(std::vector& vec) noexcept - : v(vec) - {} - - void write_character(CharType c) override - { - v.push_back(c); - } - - void write_characters(const CharType* s, std::size_t length) override - { - std::copy(s, s + length, std::back_inserter(v)); - } - - private: - std::vector& v; -}; - -/// output adapter for output streams -template -class output_stream_adapter : public output_adapter_protocol -{ - public: - explicit output_stream_adapter(std::basic_ostream& s) noexcept - : stream(s) - {} - - void write_character(CharType c) override - { - stream.put(c); - } - - void write_characters(const CharType* s, std::size_t length) override - { - stream.write(s, static_cast(length)); - } - - private: - std::basic_ostream& stream; -}; - -/// output adapter for basic_string -template> -class output_string_adapter : public output_adapter_protocol -{ - public: - explicit output_string_adapter(StringType& s) noexcept - : str(s) - {} - - void write_character(CharType c) override - { - str.push_back(c); - } - - void write_characters(const CharType* s, std::size_t length) override - { - str.append(s, length); - } - - private: - StringType& str; -}; - -template> -class output_adapter -{ - public: - output_adapter(std::vector& vec) - : oa(std::make_shared>(vec)) {} - - output_adapter(std::basic_ostream& s) - : oa(std::make_shared>(s)) {} - - output_adapter(StringType& s) - : oa(std::make_shared>(s)) {} - - operator output_adapter_t() - { - return oa; - } - - private: - output_adapter_t oa = nullptr; -}; -} // namespace detail -} // namespace nlohmann - -// #include - - -#include // generate_n -#include // array -#include // assert -#include // ldexp -#include // size_t -#include // uint8_t, uint16_t, uint32_t, uint64_t -#include // snprintf -#include // memcpy -#include // back_inserter -#include // numeric_limits -#include // char_traits, string -#include // make_pair, move - -// #include - -// #include - -// #include - -// #include - -// #include - -// #include - - -namespace nlohmann -{ -namespace detail -{ -/////////////////// -// binary reader // -/////////////////// - -/*! -@brief deserialization of CBOR, MessagePack, and UBJSON values -*/ -template> -class binary_reader -{ - using number_integer_t = typename BasicJsonType::number_integer_t; - using number_unsigned_t = typename BasicJsonType::number_unsigned_t; - using number_float_t = typename BasicJsonType::number_float_t; - using string_t = typename BasicJsonType::string_t; - using json_sax_t = SAX; - - public: - /*! - @brief create a binary reader - - @param[in] adapter input adapter to read from - */ - explicit binary_reader(input_adapter_t adapter) : ia(std::move(adapter)) - { - (void)detail::is_sax_static_asserts {}; - assert(ia); - } - - /*! - @param[in] format the binary format to parse - @param[in] sax_ a SAX event processor - @param[in] strict whether to expect the input to be consumed completed - - @return - */ - bool sax_parse(const input_format_t format, - json_sax_t* sax_, - const bool strict = true) - { - sax = sax_; - bool result = false; - - switch (format) - { - case input_format_t::bson: - result = parse_bson_internal(); - break; - - case input_format_t::cbor: - result = parse_cbor_internal(); - break; - - case input_format_t::msgpack: - result = parse_msgpack_internal(); - break; - - case input_format_t::ubjson: - result = parse_ubjson_internal(); - break; - - // LCOV_EXCL_START - default: - assert(false); - // LCOV_EXCL_STOP - } - - // strict mode: next byte must be EOF - if (result and strict) - { - if (format == input_format_t::ubjson) - { - get_ignore_noop(); - } - else - { - get(); - } - - if (JSON_UNLIKELY(current != std::char_traits::eof())) - { - return sax->parse_error(chars_read, get_token_string(), - parse_error::create(110, chars_read, exception_message(format, "expected end of input; last byte: 0x" + get_token_string(), "value"))); - } - } - - return result; - } - - /*! - @brief determine system byte order - - @return true if and only if system's byte order is little endian - - @note from http://stackoverflow.com/a/1001328/266378 - */ - static constexpr bool little_endianess(int num = 1) noexcept - { - return (*reinterpret_cast(&num) == 1); - } - - private: - ////////// - // BSON // - ////////// - - /*! - @brief Reads in a BSON-object and passes it to the SAX-parser. - @return whether a valid BSON-value was passed to the SAX parser - */ - bool parse_bson_internal() - { - std::int32_t document_size; - get_number(input_format_t::bson, document_size); - - if (JSON_UNLIKELY(not sax->start_object(std::size_t(-1)))) - { - return false; - } - - if (JSON_UNLIKELY(not parse_bson_element_list(/*is_array*/false))) - { - return false; - } - - return sax->end_object(); - } - - /*! - @brief Parses a C-style string from the BSON input. - @param[in, out] result A reference to the string variable where the read - string is to be stored. - @return `true` if the \x00-byte indicating the end of the string was - encountered before the EOF; false` indicates an unexpected EOF. - */ - bool get_bson_cstr(string_t& result) - { - auto out = std::back_inserter(result); - while (true) - { - get(); - if (JSON_UNLIKELY(not unexpect_eof(input_format_t::bson, "cstring"))) - { - return false; - } - if (current == 0x00) - { - return true; - } - *out++ = static_cast(current); - } - - return true; - } - - /*! - @brief Parses a zero-terminated string of length @a len from the BSON - input. - @param[in] len The length (including the zero-byte at the end) of the - string to be read. - @param[in, out] result A reference to the string variable where the read - string is to be stored. - @tparam NumberType The type of the length @a len - @pre len >= 1 - @return `true` if the string was successfully parsed - */ - template - bool get_bson_string(const NumberType len, string_t& result) - { - if (JSON_UNLIKELY(len < 1)) - { - auto last_token = get_token_string(); - return sax->parse_error(chars_read, last_token, parse_error::create(112, chars_read, exception_message(input_format_t::bson, "string length must be at least 1, is " + std::to_string(len), "string"))); - } - - return get_string(input_format_t::bson, len - static_cast(1), result) and get() != std::char_traits::eof(); - } - - /*! - @brief Read a BSON document element of the given @a element_type. - @param[in] element_type The BSON element type, c.f. http://bsonspec.org/spec.html - @param[in] element_type_parse_position The position in the input stream, - where the `element_type` was read. - @warning Not all BSON element types are supported yet. An unsupported - @a element_type will give rise to a parse_error.114: - Unsupported BSON record type 0x... - @return whether a valid BSON-object/array was passed to the SAX parser - */ - bool parse_bson_element_internal(const int element_type, - const std::size_t element_type_parse_position) - { - switch (element_type) - { - case 0x01: // double - { - double number; - return get_number(input_format_t::bson, number) and sax->number_float(static_cast(number), ""); - } - - case 0x02: // string - { - std::int32_t len; - string_t value; - return get_number(input_format_t::bson, len) and get_bson_string(len, value) and sax->string(value); - } - - case 0x03: // object - { - return parse_bson_internal(); - } - - case 0x04: // array - { - return parse_bson_array(); - } - - case 0x08: // boolean - { - return sax->boolean(get() != 0); - } - - case 0x0A: // null - { - return sax->null(); - } - - case 0x10: // int32 - { - std::int32_t value; - return get_number(input_format_t::bson, value) and sax->number_integer(value); - } - - case 0x12: // int64 - { - std::int64_t value; - return get_number(input_format_t::bson, value) and sax->number_integer(value); - } - - default: // anything else not supported (yet) - { - char cr[3]; - (std::snprintf)(cr, sizeof(cr), "%.2hhX", static_cast(element_type)); - return sax->parse_error(element_type_parse_position, std::string(cr), parse_error::create(114, element_type_parse_position, "Unsupported BSON record type 0x" + std::string(cr))); - } - } - } - - /*! - @brief Read a BSON element list (as specified in the BSON-spec) - - The same binary layout is used for objects and arrays, hence it must be - indicated with the argument @a is_array which one is expected - (true --> array, false --> object). - - @param[in] is_array Determines if the element list being read is to be - treated as an object (@a is_array == false), or as an - array (@a is_array == true). - @return whether a valid BSON-object/array was passed to the SAX parser - */ - bool parse_bson_element_list(const bool is_array) - { - string_t key; - while (int element_type = get()) - { - if (JSON_UNLIKELY(not unexpect_eof(input_format_t::bson, "element list"))) - { - return false; - } - - const std::size_t element_type_parse_position = chars_read; - if (JSON_UNLIKELY(not get_bson_cstr(key))) - { - return false; - } - - if (not is_array) - { - if (not sax->key(key)) - { - return false; - } - } - - if (JSON_UNLIKELY(not parse_bson_element_internal(element_type, element_type_parse_position))) - { - return false; - } - - // get_bson_cstr only appends - key.clear(); - } - - return true; - } - - /*! - @brief Reads an array from the BSON input and passes it to the SAX-parser. - @return whether a valid BSON-array was passed to the SAX parser - */ - bool parse_bson_array() - { - std::int32_t document_size; - get_number(input_format_t::bson, document_size); - - if (JSON_UNLIKELY(not sax->start_array(std::size_t(-1)))) - { - return false; - } - - if (JSON_UNLIKELY(not parse_bson_element_list(/*is_array*/true))) - { - return false; - } - - return sax->end_array(); - } - - ////////// - // CBOR // - ////////// - - /*! - @param[in] get_char whether a new character should be retrieved from the - input (true, default) or whether the last read - character should be considered instead - - @return whether a valid CBOR value was passed to the SAX parser - */ - bool parse_cbor_internal(const bool get_char = true) - { - switch (get_char ? get() : current) - { - // EOF - case std::char_traits::eof(): - return unexpect_eof(input_format_t::cbor, "value"); - - // Integer 0x00..0x17 (0..23) - case 0x00: - case 0x01: - case 0x02: - case 0x03: - case 0x04: - case 0x05: - case 0x06: - case 0x07: - case 0x08: - case 0x09: - case 0x0A: - case 0x0B: - case 0x0C: - case 0x0D: - case 0x0E: - case 0x0F: - case 0x10: - case 0x11: - case 0x12: - case 0x13: - case 0x14: - case 0x15: - case 0x16: - case 0x17: - return sax->number_unsigned(static_cast(current)); - - case 0x18: // Unsigned integer (one-byte uint8_t follows) - { - uint8_t number; - return get_number(input_format_t::cbor, number) and sax->number_unsigned(number); - } - - case 0x19: // Unsigned integer (two-byte uint16_t follows) - { - uint16_t number; - return get_number(input_format_t::cbor, number) and sax->number_unsigned(number); - } - - case 0x1A: // Unsigned integer (four-byte uint32_t follows) - { - uint32_t number; - return get_number(input_format_t::cbor, number) and sax->number_unsigned(number); - } - - case 0x1B: // Unsigned integer (eight-byte uint64_t follows) - { - uint64_t number; - return get_number(input_format_t::cbor, number) and sax->number_unsigned(number); - } - - // Negative integer -1-0x00..-1-0x17 (-1..-24) - case 0x20: - case 0x21: - case 0x22: - case 0x23: - case 0x24: - case 0x25: - case 0x26: - case 0x27: - case 0x28: - case 0x29: - case 0x2A: - case 0x2B: - case 0x2C: - case 0x2D: - case 0x2E: - case 0x2F: - case 0x30: - case 0x31: - case 0x32: - case 0x33: - case 0x34: - case 0x35: - case 0x36: - case 0x37: - return sax->number_integer(static_cast(0x20 - 1 - current)); - - case 0x38: // Negative integer (one-byte uint8_t follows) - { - uint8_t number; - return get_number(input_format_t::cbor, number) and sax->number_integer(static_cast(-1) - number); - } - - case 0x39: // Negative integer -1-n (two-byte uint16_t follows) - { - uint16_t number; - return get_number(input_format_t::cbor, number) and sax->number_integer(static_cast(-1) - number); - } - - case 0x3A: // Negative integer -1-n (four-byte uint32_t follows) - { - uint32_t number; - return get_number(input_format_t::cbor, number) and sax->number_integer(static_cast(-1) - number); - } - - case 0x3B: // Negative integer -1-n (eight-byte uint64_t follows) - { - uint64_t number; - return get_number(input_format_t::cbor, number) and sax->number_integer(static_cast(-1) - - static_cast(number)); - } - - // UTF-8 string (0x00..0x17 bytes follow) - case 0x60: - case 0x61: - case 0x62: - case 0x63: - case 0x64: - case 0x65: - case 0x66: - case 0x67: - case 0x68: - case 0x69: - case 0x6A: - case 0x6B: - case 0x6C: - case 0x6D: - case 0x6E: - case 0x6F: - case 0x70: - case 0x71: - case 0x72: - case 0x73: - case 0x74: - case 0x75: - case 0x76: - case 0x77: - case 0x78: // UTF-8 string (one-byte uint8_t for n follows) - case 0x79: // UTF-8 string (two-byte uint16_t for n follow) - case 0x7A: // UTF-8 string (four-byte uint32_t for n follow) - case 0x7B: // UTF-8 string (eight-byte uint64_t for n follow) - case 0x7F: // UTF-8 string (indefinite length) - { - string_t s; - return get_cbor_string(s) and sax->string(s); - } - - // array (0x00..0x17 data items follow) - case 0x80: - case 0x81: - case 0x82: - case 0x83: - case 0x84: - case 0x85: - case 0x86: - case 0x87: - case 0x88: - case 0x89: - case 0x8A: - case 0x8B: - case 0x8C: - case 0x8D: - case 0x8E: - case 0x8F: - case 0x90: - case 0x91: - case 0x92: - case 0x93: - case 0x94: - case 0x95: - case 0x96: - case 0x97: - return get_cbor_array(static_cast(current & 0x1F)); - - case 0x98: // array (one-byte uint8_t for n follows) - { - uint8_t len; - return get_number(input_format_t::cbor, len) and get_cbor_array(static_cast(len)); - } - - case 0x99: // array (two-byte uint16_t for n follow) - { - uint16_t len; - return get_number(input_format_t::cbor, len) and get_cbor_array(static_cast(len)); - } - - case 0x9A: // array (four-byte uint32_t for n follow) - { - uint32_t len; - return get_number(input_format_t::cbor, len) and get_cbor_array(static_cast(len)); - } - - case 0x9B: // array (eight-byte uint64_t for n follow) - { - uint64_t len; - return get_number(input_format_t::cbor, len) and get_cbor_array(static_cast(len)); - } - - case 0x9F: // array (indefinite length) - return get_cbor_array(std::size_t(-1)); - - // map (0x00..0x17 pairs of data items follow) - case 0xA0: - case 0xA1: - case 0xA2: - case 0xA3: - case 0xA4: - case 0xA5: - case 0xA6: - case 0xA7: - case 0xA8: - case 0xA9: - case 0xAA: - case 0xAB: - case 0xAC: - case 0xAD: - case 0xAE: - case 0xAF: - case 0xB0: - case 0xB1: - case 0xB2: - case 0xB3: - case 0xB4: - case 0xB5: - case 0xB6: - case 0xB7: - return get_cbor_object(static_cast(current & 0x1F)); - - case 0xB8: // map (one-byte uint8_t for n follows) - { - uint8_t len; - return get_number(input_format_t::cbor, len) and get_cbor_object(static_cast(len)); - } - - case 0xB9: // map (two-byte uint16_t for n follow) - { - uint16_t len; - return get_number(input_format_t::cbor, len) and get_cbor_object(static_cast(len)); - } - - case 0xBA: // map (four-byte uint32_t for n follow) - { - uint32_t len; - return get_number(input_format_t::cbor, len) and get_cbor_object(static_cast(len)); - } - - case 0xBB: // map (eight-byte uint64_t for n follow) - { - uint64_t len; - return get_number(input_format_t::cbor, len) and get_cbor_object(static_cast(len)); - } - - case 0xBF: // map (indefinite length) - return get_cbor_object(std::size_t(-1)); - - case 0xF4: // false - return sax->boolean(false); - - case 0xF5: // true - return sax->boolean(true); - - case 0xF6: // null - return sax->null(); - - case 0xF9: // Half-Precision Float (two-byte IEEE 754) - { - const int byte1_raw = get(); - if (JSON_UNLIKELY(not unexpect_eof(input_format_t::cbor, "number"))) - { - return false; - } - const int byte2_raw = get(); - if (JSON_UNLIKELY(not unexpect_eof(input_format_t::cbor, "number"))) - { - return false; - } - - const auto byte1 = static_cast(byte1_raw); - const auto byte2 = static_cast(byte2_raw); - - // code from RFC 7049, Appendix D, Figure 3: - // As half-precision floating-point numbers were only added - // to IEEE 754 in 2008, today's programming platforms often - // still only have limited support for them. It is very - // easy to include at least decoding support for them even - // without such support. An example of a small decoder for - // half-precision floating-point numbers in the C language - // is shown in Fig. 3. - const int half = (byte1 << 8) + byte2; - const double val = [&half] - { - const int exp = (half >> 10) & 0x1F; - const int mant = half & 0x3FF; - assert(0 <= exp and exp <= 32); - assert(0 <= mant and mant <= 1024); - switch (exp) - { - case 0: - return std::ldexp(mant, -24); - case 31: - return (mant == 0) - ? std::numeric_limits::infinity() - : std::numeric_limits::quiet_NaN(); - default: - return std::ldexp(mant + 1024, exp - 25); - } - }(); - return sax->number_float((half & 0x8000) != 0 - ? static_cast(-val) - : static_cast(val), ""); - } - - case 0xFA: // Single-Precision Float (four-byte IEEE 754) - { - float number; - return get_number(input_format_t::cbor, number) and sax->number_float(static_cast(number), ""); - } - - case 0xFB: // Double-Precision Float (eight-byte IEEE 754) - { - double number; - return get_number(input_format_t::cbor, number) and sax->number_float(static_cast(number), ""); - } - - default: // anything else (0xFF is handled inside the other types) - { - auto last_token = get_token_string(); - return sax->parse_error(chars_read, last_token, parse_error::create(112, chars_read, exception_message(input_format_t::cbor, "invalid byte: 0x" + last_token, "value"))); - } - } - } - - /*! - @brief reads a CBOR string - - This function first reads starting bytes to determine the expected - string length and then copies this number of bytes into a string. - Additionally, CBOR's strings with indefinite lengths are supported. - - @param[out] result created string - - @return whether string creation completed - */ - bool get_cbor_string(string_t& result) - { - if (JSON_UNLIKELY(not unexpect_eof(input_format_t::cbor, "string"))) - { - return false; - } - - switch (current) - { - // UTF-8 string (0x00..0x17 bytes follow) - case 0x60: - case 0x61: - case 0x62: - case 0x63: - case 0x64: - case 0x65: - case 0x66: - case 0x67: - case 0x68: - case 0x69: - case 0x6A: - case 0x6B: - case 0x6C: - case 0x6D: - case 0x6E: - case 0x6F: - case 0x70: - case 0x71: - case 0x72: - case 0x73: - case 0x74: - case 0x75: - case 0x76: - case 0x77: - { - return get_string(input_format_t::cbor, current & 0x1F, result); - } - - case 0x78: // UTF-8 string (one-byte uint8_t for n follows) - { - uint8_t len; - return get_number(input_format_t::cbor, len) and get_string(input_format_t::cbor, len, result); - } - - case 0x79: // UTF-8 string (two-byte uint16_t for n follow) - { - uint16_t len; - return get_number(input_format_t::cbor, len) and get_string(input_format_t::cbor, len, result); - } - - case 0x7A: // UTF-8 string (four-byte uint32_t for n follow) - { - uint32_t len; - return get_number(input_format_t::cbor, len) and get_string(input_format_t::cbor, len, result); - } - - case 0x7B: // UTF-8 string (eight-byte uint64_t for n follow) - { - uint64_t len; - return get_number(input_format_t::cbor, len) and get_string(input_format_t::cbor, len, result); - } - - case 0x7F: // UTF-8 string (indefinite length) - { - while (get() != 0xFF) - { - string_t chunk; - if (not get_cbor_string(chunk)) - { - return false; - } - result.append(chunk); - } - return true; - } - - default: - { - auto last_token = get_token_string(); - return sax->parse_error(chars_read, last_token, parse_error::create(113, chars_read, exception_message(input_format_t::cbor, "expected length specification (0x60-0x7B) or indefinite string type (0x7F); last byte: 0x" + last_token, "string"))); - } - } - } - - /*! - @param[in] len the length of the array or std::size_t(-1) for an - array of indefinite size - @return whether array creation completed - */ - bool get_cbor_array(const std::size_t len) - { - if (JSON_UNLIKELY(not sax->start_array(len))) - { - return false; - } - - if (len != std::size_t(-1)) - { - for (std::size_t i = 0; i < len; ++i) - { - if (JSON_UNLIKELY(not parse_cbor_internal())) - { - return false; - } - } - } - else - { - while (get() != 0xFF) - { - if (JSON_UNLIKELY(not parse_cbor_internal(false))) - { - return false; - } - } - } - - return sax->end_array(); - } - - /*! - @param[in] len the length of the object or std::size_t(-1) for an - object of indefinite size - @return whether object creation completed - */ - bool get_cbor_object(const std::size_t len) - { - if (not JSON_UNLIKELY(sax->start_object(len))) - { - return false; - } - - string_t key; - if (len != std::size_t(-1)) - { - for (std::size_t i = 0; i < len; ++i) - { - get(); - if (JSON_UNLIKELY(not get_cbor_string(key) or not sax->key(key))) - { - return false; - } - - if (JSON_UNLIKELY(not parse_cbor_internal())) - { - return false; - } - key.clear(); - } - } - else - { - while (get() != 0xFF) - { - if (JSON_UNLIKELY(not get_cbor_string(key) or not sax->key(key))) - { - return false; - } - - if (JSON_UNLIKELY(not parse_cbor_internal())) - { - return false; - } - key.clear(); - } - } - - return sax->end_object(); - } - - ///////////// - // MsgPack // - ///////////// - - /*! - @return whether a valid MessagePack value was passed to the SAX parser - */ - bool parse_msgpack_internal() - { - switch (get()) - { - // EOF - case std::char_traits::eof(): - return unexpect_eof(input_format_t::msgpack, "value"); - - // positive fixint - case 0x00: - case 0x01: - case 0x02: - case 0x03: - case 0x04: - case 0x05: - case 0x06: - case 0x07: - case 0x08: - case 0x09: - case 0x0A: - case 0x0B: - case 0x0C: - case 0x0D: - case 0x0E: - case 0x0F: - case 0x10: - case 0x11: - case 0x12: - case 0x13: - case 0x14: - case 0x15: - case 0x16: - case 0x17: - case 0x18: - case 0x19: - case 0x1A: - case 0x1B: - case 0x1C: - case 0x1D: - case 0x1E: - case 0x1F: - case 0x20: - case 0x21: - case 0x22: - case 0x23: - case 0x24: - case 0x25: - case 0x26: - case 0x27: - case 0x28: - case 0x29: - case 0x2A: - case 0x2B: - case 0x2C: - case 0x2D: - case 0x2E: - case 0x2F: - case 0x30: - case 0x31: - case 0x32: - case 0x33: - case 0x34: - case 0x35: - case 0x36: - case 0x37: - case 0x38: - case 0x39: - case 0x3A: - case 0x3B: - case 0x3C: - case 0x3D: - case 0x3E: - case 0x3F: - case 0x40: - case 0x41: - case 0x42: - case 0x43: - case 0x44: - case 0x45: - case 0x46: - case 0x47: - case 0x48: - case 0x49: - case 0x4A: - case 0x4B: - case 0x4C: - case 0x4D: - case 0x4E: - case 0x4F: - case 0x50: - case 0x51: - case 0x52: - case 0x53: - case 0x54: - case 0x55: - case 0x56: - case 0x57: - case 0x58: - case 0x59: - case 0x5A: - case 0x5B: - case 0x5C: - case 0x5D: - case 0x5E: - case 0x5F: - case 0x60: - case 0x61: - case 0x62: - case 0x63: - case 0x64: - case 0x65: - case 0x66: - case 0x67: - case 0x68: - case 0x69: - case 0x6A: - case 0x6B: - case 0x6C: - case 0x6D: - case 0x6E: - case 0x6F: - case 0x70: - case 0x71: - case 0x72: - case 0x73: - case 0x74: - case 0x75: - case 0x76: - case 0x77: - case 0x78: - case 0x79: - case 0x7A: - case 0x7B: - case 0x7C: - case 0x7D: - case 0x7E: - case 0x7F: - return sax->number_unsigned(static_cast(current)); - - // fixmap - case 0x80: - case 0x81: - case 0x82: - case 0x83: - case 0x84: - case 0x85: - case 0x86: - case 0x87: - case 0x88: - case 0x89: - case 0x8A: - case 0x8B: - case 0x8C: - case 0x8D: - case 0x8E: - case 0x8F: - return get_msgpack_object(static_cast(current & 0x0F)); - - // fixarray - case 0x90: - case 0x91: - case 0x92: - case 0x93: - case 0x94: - case 0x95: - case 0x96: - case 0x97: - case 0x98: - case 0x99: - case 0x9A: - case 0x9B: - case 0x9C: - case 0x9D: - case 0x9E: - case 0x9F: - return get_msgpack_array(static_cast(current & 0x0F)); - - // fixstr - case 0xA0: - case 0xA1: - case 0xA2: - case 0xA3: - case 0xA4: - case 0xA5: - case 0xA6: - case 0xA7: - case 0xA8: - case 0xA9: - case 0xAA: - case 0xAB: - case 0xAC: - case 0xAD: - case 0xAE: - case 0xAF: - case 0xB0: - case 0xB1: - case 0xB2: - case 0xB3: - case 0xB4: - case 0xB5: - case 0xB6: - case 0xB7: - case 0xB8: - case 0xB9: - case 0xBA: - case 0xBB: - case 0xBC: - case 0xBD: - case 0xBE: - case 0xBF: - { - string_t s; - return get_msgpack_string(s) and sax->string(s); - } - - case 0xC0: // nil - return sax->null(); - - case 0xC2: // false - return sax->boolean(false); - - case 0xC3: // true - return sax->boolean(true); - - case 0xCA: // float 32 - { - float number; - return get_number(input_format_t::msgpack, number) and sax->number_float(static_cast(number), ""); - } - - case 0xCB: // float 64 - { - double number; - return get_number(input_format_t::msgpack, number) and sax->number_float(static_cast(number), ""); - } - - case 0xCC: // uint 8 - { - uint8_t number; - return get_number(input_format_t::msgpack, number) and sax->number_unsigned(number); - } - - case 0xCD: // uint 16 - { - uint16_t number; - return get_number(input_format_t::msgpack, number) and sax->number_unsigned(number); - } - - case 0xCE: // uint 32 - { - uint32_t number; - return get_number(input_format_t::msgpack, number) and sax->number_unsigned(number); - } - - case 0xCF: // uint 64 - { - uint64_t number; - return get_number(input_format_t::msgpack, number) and sax->number_unsigned(number); - } - - case 0xD0: // int 8 - { - int8_t number; - return get_number(input_format_t::msgpack, number) and sax->number_integer(number); - } - - case 0xD1: // int 16 - { - int16_t number; - return get_number(input_format_t::msgpack, number) and sax->number_integer(number); - } - - case 0xD2: // int 32 - { - int32_t number; - return get_number(input_format_t::msgpack, number) and sax->number_integer(number); - } - - case 0xD3: // int 64 - { - int64_t number; - return get_number(input_format_t::msgpack, number) and sax->number_integer(number); - } - - case 0xD9: // str 8 - case 0xDA: // str 16 - case 0xDB: // str 32 - { - string_t s; - return get_msgpack_string(s) and sax->string(s); - } - - case 0xDC: // array 16 - { - uint16_t len; - return get_number(input_format_t::msgpack, len) and get_msgpack_array(static_cast(len)); - } - - case 0xDD: // array 32 - { - uint32_t len; - return get_number(input_format_t::msgpack, len) and get_msgpack_array(static_cast(len)); - } - - case 0xDE: // map 16 - { - uint16_t len; - return get_number(input_format_t::msgpack, len) and get_msgpack_object(static_cast(len)); - } - - case 0xDF: // map 32 - { - uint32_t len; - return get_number(input_format_t::msgpack, len) and get_msgpack_object(static_cast(len)); - } - - // negative fixint - case 0xE0: - case 0xE1: - case 0xE2: - case 0xE3: - case 0xE4: - case 0xE5: - case 0xE6: - case 0xE7: - case 0xE8: - case 0xE9: - case 0xEA: - case 0xEB: - case 0xEC: - case 0xED: - case 0xEE: - case 0xEF: - case 0xF0: - case 0xF1: - case 0xF2: - case 0xF3: - case 0xF4: - case 0xF5: - case 0xF6: - case 0xF7: - case 0xF8: - case 0xF9: - case 0xFA: - case 0xFB: - case 0xFC: - case 0xFD: - case 0xFE: - case 0xFF: - return sax->number_integer(static_cast(current)); - - default: // anything else - { - auto last_token = get_token_string(); - return sax->parse_error(chars_read, last_token, parse_error::create(112, chars_read, exception_message(input_format_t::msgpack, "invalid byte: 0x" + last_token, "value"))); - } - } - } - - /*! - @brief reads a MessagePack string - - This function first reads starting bytes to determine the expected - string length and then copies this number of bytes into a string. - - @param[out] result created string - - @return whether string creation completed - */ - bool get_msgpack_string(string_t& result) - { - if (JSON_UNLIKELY(not unexpect_eof(input_format_t::msgpack, "string"))) - { - return false; - } - - switch (current) - { - // fixstr - case 0xA0: - case 0xA1: - case 0xA2: - case 0xA3: - case 0xA4: - case 0xA5: - case 0xA6: - case 0xA7: - case 0xA8: - case 0xA9: - case 0xAA: - case 0xAB: - case 0xAC: - case 0xAD: - case 0xAE: - case 0xAF: - case 0xB0: - case 0xB1: - case 0xB2: - case 0xB3: - case 0xB4: - case 0xB5: - case 0xB6: - case 0xB7: - case 0xB8: - case 0xB9: - case 0xBA: - case 0xBB: - case 0xBC: - case 0xBD: - case 0xBE: - case 0xBF: - { - return get_string(input_format_t::msgpack, current & 0x1F, result); - } - - case 0xD9: // str 8 - { - uint8_t len; - return get_number(input_format_t::msgpack, len) and get_string(input_format_t::msgpack, len, result); - } - - case 0xDA: // str 16 - { - uint16_t len; - return get_number(input_format_t::msgpack, len) and get_string(input_format_t::msgpack, len, result); - } - - case 0xDB: // str 32 - { - uint32_t len; - return get_number(input_format_t::msgpack, len) and get_string(input_format_t::msgpack, len, result); - } - - default: - { - auto last_token = get_token_string(); - return sax->parse_error(chars_read, last_token, parse_error::create(113, chars_read, exception_message(input_format_t::msgpack, "expected length specification (0xA0-0xBF, 0xD9-0xDB); last byte: 0x" + last_token, "string"))); - } - } - } - - /*! - @param[in] len the length of the array - @return whether array creation completed - */ - bool get_msgpack_array(const std::size_t len) - { - if (JSON_UNLIKELY(not sax->start_array(len))) - { - return false; - } - - for (std::size_t i = 0; i < len; ++i) - { - if (JSON_UNLIKELY(not parse_msgpack_internal())) - { - return false; - } - } - - return sax->end_array(); - } - - /*! - @param[in] len the length of the object - @return whether object creation completed - */ - bool get_msgpack_object(const std::size_t len) - { - if (JSON_UNLIKELY(not sax->start_object(len))) - { - return false; - } - - string_t key; - for (std::size_t i = 0; i < len; ++i) - { - get(); - if (JSON_UNLIKELY(not get_msgpack_string(key) or not sax->key(key))) - { - return false; - } - - if (JSON_UNLIKELY(not parse_msgpack_internal())) - { - return false; - } - key.clear(); - } - - return sax->end_object(); - } - - //////////// - // UBJSON // - //////////// - - /*! - @param[in] get_char whether a new character should be retrieved from the - input (true, default) or whether the last read - character should be considered instead - - @return whether a valid UBJSON value was passed to the SAX parser - */ - bool parse_ubjson_internal(const bool get_char = true) - { - return get_ubjson_value(get_char ? get_ignore_noop() : current); - } - - /*! - @brief reads a UBJSON string - - This function is either called after reading the 'S' byte explicitly - indicating a string, or in case of an object key where the 'S' byte can be - left out. - - @param[out] result created string - @param[in] get_char whether a new character should be retrieved from the - input (true, default) or whether the last read - character should be considered instead - - @return whether string creation completed - */ - bool get_ubjson_string(string_t& result, const bool get_char = true) - { - if (get_char) - { - get(); // TODO: may we ignore N here? - } - - if (JSON_UNLIKELY(not unexpect_eof(input_format_t::ubjson, "value"))) - { - return false; - } - - switch (current) - { - case 'U': - { - uint8_t len; - return get_number(input_format_t::ubjson, len) and get_string(input_format_t::ubjson, len, result); - } - - case 'i': - { - int8_t len; - return get_number(input_format_t::ubjson, len) and get_string(input_format_t::ubjson, len, result); - } - - case 'I': - { - int16_t len; - return get_number(input_format_t::ubjson, len) and get_string(input_format_t::ubjson, len, result); - } - - case 'l': - { - int32_t len; - return get_number(input_format_t::ubjson, len) and get_string(input_format_t::ubjson, len, result); - } - - case 'L': - { - int64_t len; - return get_number(input_format_t::ubjson, len) and get_string(input_format_t::ubjson, len, result); - } - - default: - auto last_token = get_token_string(); - return sax->parse_error(chars_read, last_token, parse_error::create(113, chars_read, exception_message(input_format_t::ubjson, "expected length type specification (U, i, I, l, L); last byte: 0x" + last_token, "string"))); - } - } - - /*! - @param[out] result determined size - @return whether size determination completed - */ - bool get_ubjson_size_value(std::size_t& result) - { - switch (get_ignore_noop()) - { - case 'U': - { - uint8_t number; - if (JSON_UNLIKELY(not get_number(input_format_t::ubjson, number))) - { - return false; - } - result = static_cast(number); - return true; - } - - case 'i': - { - int8_t number; - if (JSON_UNLIKELY(not get_number(input_format_t::ubjson, number))) - { - return false; - } - result = static_cast(number); - return true; - } - - case 'I': - { - int16_t number; - if (JSON_UNLIKELY(not get_number(input_format_t::ubjson, number))) - { - return false; - } - result = static_cast(number); - return true; - } - - case 'l': - { - int32_t number; - if (JSON_UNLIKELY(not get_number(input_format_t::ubjson, number))) - { - return false; - } - result = static_cast(number); - return true; - } - - case 'L': - { - int64_t number; - if (JSON_UNLIKELY(not get_number(input_format_t::ubjson, number))) - { - return false; - } - result = static_cast(number); - return true; - } - - default: - { - auto last_token = get_token_string(); - return sax->parse_error(chars_read, last_token, parse_error::create(113, chars_read, exception_message(input_format_t::ubjson, "expected length type specification (U, i, I, l, L) after '#'; last byte: 0x" + last_token, "size"))); - } - } - } - - /*! - @brief determine the type and size for a container - - In the optimized UBJSON format, a type and a size can be provided to allow - for a more compact representation. - - @param[out] result pair of the size and the type - - @return whether pair creation completed - */ - bool get_ubjson_size_type(std::pair& result) - { - result.first = string_t::npos; // size - result.second = 0; // type - - get_ignore_noop(); - - if (current == '$') - { - result.second = get(); // must not ignore 'N', because 'N' maybe the type - if (JSON_UNLIKELY(not unexpect_eof(input_format_t::ubjson, "type"))) - { - return false; - } - - get_ignore_noop(); - if (JSON_UNLIKELY(current != '#')) - { - if (JSON_UNLIKELY(not unexpect_eof(input_format_t::ubjson, "value"))) - { - return false; - } - auto last_token = get_token_string(); - return sax->parse_error(chars_read, last_token, parse_error::create(112, chars_read, exception_message(input_format_t::ubjson, "expected '#' after type information; last byte: 0x" + last_token, "size"))); - } - - return get_ubjson_size_value(result.first); - } - else if (current == '#') - { - return get_ubjson_size_value(result.first); - } - return true; - } - - /*! - @param prefix the previously read or set type prefix - @return whether value creation completed - */ - bool get_ubjson_value(const int prefix) - { - switch (prefix) - { - case std::char_traits::eof(): // EOF - return unexpect_eof(input_format_t::ubjson, "value"); - - case 'T': // true - return sax->boolean(true); - case 'F': // false - return sax->boolean(false); - - case 'Z': // null - return sax->null(); - - case 'U': - { - uint8_t number; - return get_number(input_format_t::ubjson, number) and sax->number_unsigned(number); - } - - case 'i': - { - int8_t number; - return get_number(input_format_t::ubjson, number) and sax->number_integer(number); - } - - case 'I': - { - int16_t number; - return get_number(input_format_t::ubjson, number) and sax->number_integer(number); - } - - case 'l': - { - int32_t number; - return get_number(input_format_t::ubjson, number) and sax->number_integer(number); - } - - case 'L': - { - int64_t number; - return get_number(input_format_t::ubjson, number) and sax->number_integer(number); - } - - case 'd': - { - float number; - return get_number(input_format_t::ubjson, number) and sax->number_float(static_cast(number), ""); - } - - case 'D': - { - double number; - return get_number(input_format_t::ubjson, number) and sax->number_float(static_cast(number), ""); - } - - case 'C': // char - { - get(); - if (JSON_UNLIKELY(not unexpect_eof(input_format_t::ubjson, "char"))) - { - return false; - } - if (JSON_UNLIKELY(current > 127)) - { - auto last_token = get_token_string(); - return sax->parse_error(chars_read, last_token, parse_error::create(113, chars_read, exception_message(input_format_t::ubjson, "byte after 'C' must be in range 0x00..0x7F; last byte: 0x" + last_token, "char"))); - } - string_t s(1, static_cast(current)); - return sax->string(s); - } - - case 'S': // string - { - string_t s; - return get_ubjson_string(s) and sax->string(s); - } - - case '[': // array - return get_ubjson_array(); - - case '{': // object - return get_ubjson_object(); - - default: // anything else - { - auto last_token = get_token_string(); - return sax->parse_error(chars_read, last_token, parse_error::create(112, chars_read, exception_message(input_format_t::ubjson, "invalid byte: 0x" + last_token, "value"))); - } - } - } - - /*! - @return whether array creation completed - */ - bool get_ubjson_array() - { - std::pair size_and_type; - if (JSON_UNLIKELY(not get_ubjson_size_type(size_and_type))) - { - return false; - } - - if (size_and_type.first != string_t::npos) - { - if (JSON_UNLIKELY(not sax->start_array(size_and_type.first))) - { - return false; - } - - if (size_and_type.second != 0) - { - if (size_and_type.second != 'N') - { - for (std::size_t i = 0; i < size_and_type.first; ++i) - { - if (JSON_UNLIKELY(not get_ubjson_value(size_and_type.second))) - { - return false; - } - } - } - } - else - { - for (std::size_t i = 0; i < size_and_type.first; ++i) - { - if (JSON_UNLIKELY(not parse_ubjson_internal())) - { - return false; - } - } - } - } - else - { - if (JSON_UNLIKELY(not sax->start_array(std::size_t(-1)))) - { - return false; - } - - while (current != ']') - { - if (JSON_UNLIKELY(not parse_ubjson_internal(false))) - { - return false; - } - get_ignore_noop(); - } - } - - return sax->end_array(); - } - - /*! - @return whether object creation completed - */ - bool get_ubjson_object() - { - std::pair size_and_type; - if (JSON_UNLIKELY(not get_ubjson_size_type(size_and_type))) - { - return false; - } - - string_t key; - if (size_and_type.first != string_t::npos) - { - if (JSON_UNLIKELY(not sax->start_object(size_and_type.first))) - { - return false; - } - - if (size_and_type.second != 0) - { - for (std::size_t i = 0; i < size_and_type.first; ++i) - { - if (JSON_UNLIKELY(not get_ubjson_string(key) or not sax->key(key))) - { - return false; - } - if (JSON_UNLIKELY(not get_ubjson_value(size_and_type.second))) - { - return false; - } - key.clear(); - } - } - else - { - for (std::size_t i = 0; i < size_and_type.first; ++i) - { - if (JSON_UNLIKELY(not get_ubjson_string(key) or not sax->key(key))) - { - return false; - } - if (JSON_UNLIKELY(not parse_ubjson_internal())) - { - return false; - } - key.clear(); - } - } - } - else - { - if (JSON_UNLIKELY(not sax->start_object(std::size_t(-1)))) - { - return false; - } - - while (current != '}') - { - if (JSON_UNLIKELY(not get_ubjson_string(key, false) or not sax->key(key))) - { - return false; - } - if (JSON_UNLIKELY(not parse_ubjson_internal())) - { - return false; - } - get_ignore_noop(); - key.clear(); - } - } - - return sax->end_object(); - } - - /////////////////////// - // Utility functions // - /////////////////////// - - /*! - @brief get next character from the input - - This function provides the interface to the used input adapter. It does - not throw in case the input reached EOF, but returns a -'ve valued - `std::char_traits::eof()` in that case. - - @return character read from the input - */ - int get() - { - ++chars_read; - return (current = ia->get_character()); - } - - /*! - @return character read from the input after ignoring all 'N' entries - */ - int get_ignore_noop() - { - do - { - get(); - } - while (current == 'N'); - - return current; - } - - /* - @brief read a number from the input - - @tparam NumberType the type of the number - @param[in] format the current format (for diagnostics) - @param[out] result number of type @a NumberType - - @return whether conversion completed - - @note This function needs to respect the system's endianess, because - bytes in CBOR, MessagePack, and UBJSON are stored in network order - (big endian) and therefore need reordering on little endian systems. - */ - template - bool get_number(const input_format_t format, NumberType& result) - { - // step 1: read input into array with system's byte order - std::array vec; - for (std::size_t i = 0; i < sizeof(NumberType); ++i) - { - get(); - if (JSON_UNLIKELY(not unexpect_eof(format, "number"))) - { - return false; - } - - // reverse byte order prior to conversion if necessary - if (is_little_endian && !InputIsLittleEndian) - { - vec[sizeof(NumberType) - i - 1] = static_cast(current); - } - else - { - vec[i] = static_cast(current); // LCOV_EXCL_LINE - } - } - - // step 2: convert array into number of type T and return - std::memcpy(&result, vec.data(), sizeof(NumberType)); - return true; - } - - /*! - @brief create a string by reading characters from the input - - @tparam NumberType the type of the number - @param[in] format the current format (for diagnostics) - @param[in] len number of characters to read - @param[out] result string created by reading @a len bytes - - @return whether string creation completed - - @note We can not reserve @a len bytes for the result, because @a len - may be too large. Usually, @ref unexpect_eof() detects the end of - the input before we run out of string memory. - */ - template - bool get_string(const input_format_t format, - const NumberType len, - string_t& result) - { - bool success = true; - std::generate_n(std::back_inserter(result), len, [this, &success, &format]() - { - get(); - if (JSON_UNLIKELY(not unexpect_eof(format, "string"))) - { - success = false; - } - return static_cast(current); - }); - return success; - } - - /*! - @param[in] format the current format (for diagnostics) - @param[in] context further context information (for diagnostics) - @return whether the last read character is not EOF - */ - bool unexpect_eof(const input_format_t format, const char* context) const - { - if (JSON_UNLIKELY(current == std::char_traits::eof())) - { - return sax->parse_error(chars_read, "", - parse_error::create(110, chars_read, exception_message(format, "unexpected end of input", context))); - } - return true; - } - - /*! - @return a string representation of the last read byte - */ - std::string get_token_string() const - { - char cr[3]; - (std::snprintf)(cr, 3, "%.2hhX", static_cast(current)); - return std::string{cr}; - } - - /*! - @param[in] format the current format - @param[in] detail a detailed error message - @param[in] context further contect information - @return a message string to use in the parse_error exceptions - */ - std::string exception_message(const input_format_t format, - const std::string& detail, - const std::string& context) const - { - std::string error_msg = "syntax error while parsing "; - - switch (format) - { - case input_format_t::cbor: - error_msg += "CBOR"; - break; - - case input_format_t::msgpack: - error_msg += "MessagePack"; - break; - - case input_format_t::ubjson: - error_msg += "UBJSON"; - break; - - case input_format_t::bson: - error_msg += "BSON"; - break; - - // LCOV_EXCL_START - default: - assert(false); - // LCOV_EXCL_STOP - } - - return error_msg + " " + context + ": " + detail; - } - - private: - /// input adapter - input_adapter_t ia = nullptr; - - /// the current character - int current = std::char_traits::eof(); - - /// the number of characters read - std::size_t chars_read = 0; - - /// whether we can assume little endianess - const bool is_little_endian = little_endianess(); - - /// the SAX parser - json_sax_t* sax = nullptr; -}; -} // namespace detail -} // namespace nlohmann - -// #include - - -#include // reverse -#include // array -#include // uint8_t, uint16_t, uint32_t, uint64_t -#include // memcpy -#include // numeric_limits - -// #include - -// #include - - -namespace nlohmann -{ -namespace detail -{ -/////////////////// -// binary writer // -/////////////////// - -/*! -@brief serialization to CBOR and MessagePack values -*/ -template -class binary_writer -{ - using string_t = typename BasicJsonType::string_t; - - public: - /*! - @brief create a binary writer - - @param[in] adapter output adapter to write to - */ - explicit binary_writer(output_adapter_t adapter) : oa(adapter) - { - assert(oa); - } - - /*! - @param[in] j JSON value to serialize - @pre j.type() == value_t::object - */ - void write_bson(const BasicJsonType& j) - { - switch (j.type()) - { - case value_t::object: - { - write_bson_object(*j.m_value.object); - break; - } - - default: - { - JSON_THROW(type_error::create(317, "to serialize to BSON, top-level type must be object, but is " + std::string(j.type_name()))); - } - } - } - - /*! - @param[in] j JSON value to serialize - */ - void write_cbor(const BasicJsonType& j) - { - switch (j.type()) - { - case value_t::null: - { - oa->write_character(to_char_type(0xF6)); - break; - } - - case value_t::boolean: - { - oa->write_character(j.m_value.boolean - ? to_char_type(0xF5) - : to_char_type(0xF4)); - break; - } - - case value_t::number_integer: - { - if (j.m_value.number_integer >= 0) - { - // CBOR does not differentiate between positive signed - // integers and unsigned integers. Therefore, we used the - // code from the value_t::number_unsigned case here. - if (j.m_value.number_integer <= 0x17) - { - write_number(static_cast(j.m_value.number_integer)); - } - else if (j.m_value.number_integer <= (std::numeric_limits::max)()) - { - oa->write_character(to_char_type(0x18)); - write_number(static_cast(j.m_value.number_integer)); - } - else if (j.m_value.number_integer <= (std::numeric_limits::max)()) - { - oa->write_character(to_char_type(0x19)); - write_number(static_cast(j.m_value.number_integer)); - } - else if (j.m_value.number_integer <= (std::numeric_limits::max)()) - { - oa->write_character(to_char_type(0x1A)); - write_number(static_cast(j.m_value.number_integer)); - } - else - { - oa->write_character(to_char_type(0x1B)); - write_number(static_cast(j.m_value.number_integer)); - } - } - else - { - // The conversions below encode the sign in the first - // byte, and the value is converted to a positive number. - const auto positive_number = -1 - j.m_value.number_integer; - if (j.m_value.number_integer >= -24) - { - write_number(static_cast(0x20 + positive_number)); - } - else if (positive_number <= (std::numeric_limits::max)()) - { - oa->write_character(to_char_type(0x38)); - write_number(static_cast(positive_number)); - } - else if (positive_number <= (std::numeric_limits::max)()) - { - oa->write_character(to_char_type(0x39)); - write_number(static_cast(positive_number)); - } - else if (positive_number <= (std::numeric_limits::max)()) - { - oa->write_character(to_char_type(0x3A)); - write_number(static_cast(positive_number)); - } - else - { - oa->write_character(to_char_type(0x3B)); - write_number(static_cast(positive_number)); - } - } - break; - } - - case value_t::number_unsigned: - { - if (j.m_value.number_unsigned <= 0x17) - { - write_number(static_cast(j.m_value.number_unsigned)); - } - else if (j.m_value.number_unsigned <= (std::numeric_limits::max)()) - { - oa->write_character(to_char_type(0x18)); - write_number(static_cast(j.m_value.number_unsigned)); - } - else if (j.m_value.number_unsigned <= (std::numeric_limits::max)()) - { - oa->write_character(to_char_type(0x19)); - write_number(static_cast(j.m_value.number_unsigned)); - } - else if (j.m_value.number_unsigned <= (std::numeric_limits::max)()) - { - oa->write_character(to_char_type(0x1A)); - write_number(static_cast(j.m_value.number_unsigned)); - } - else - { - oa->write_character(to_char_type(0x1B)); - write_number(static_cast(j.m_value.number_unsigned)); - } - break; - } - - case value_t::number_float: - { - oa->write_character(get_cbor_float_prefix(j.m_value.number_float)); - write_number(j.m_value.number_float); - break; - } - - case value_t::string: - { - // step 1: write control byte and the string length - const auto N = j.m_value.string->size(); - if (N <= 0x17) - { - write_number(static_cast(0x60 + N)); - } - else if (N <= (std::numeric_limits::max)()) - { - oa->write_character(to_char_type(0x78)); - write_number(static_cast(N)); - } - else if (N <= (std::numeric_limits::max)()) - { - oa->write_character(to_char_type(0x79)); - write_number(static_cast(N)); - } - else if (N <= (std::numeric_limits::max)()) - { - oa->write_character(to_char_type(0x7A)); - write_number(static_cast(N)); - } - // LCOV_EXCL_START - else if (N <= (std::numeric_limits::max)()) - { - oa->write_character(to_char_type(0x7B)); - write_number(static_cast(N)); - } - // LCOV_EXCL_STOP - - // step 2: write the string - oa->write_characters( - reinterpret_cast(j.m_value.string->c_str()), - j.m_value.string->size()); - break; - } - - case value_t::array: - { - // step 1: write control byte and the array size - const auto N = j.m_value.array->size(); - if (N <= 0x17) - { - write_number(static_cast(0x80 + N)); - } - else if (N <= (std::numeric_limits::max)()) - { - oa->write_character(to_char_type(0x98)); - write_number(static_cast(N)); - } - else if (N <= (std::numeric_limits::max)()) - { - oa->write_character(to_char_type(0x99)); - write_number(static_cast(N)); - } - else if (N <= (std::numeric_limits::max)()) - { - oa->write_character(to_char_type(0x9A)); - write_number(static_cast(N)); - } - // LCOV_EXCL_START - else if (N <= (std::numeric_limits::max)()) - { - oa->write_character(to_char_type(0x9B)); - write_number(static_cast(N)); - } - // LCOV_EXCL_STOP - - // step 2: write each element - for (const auto& el : *j.m_value.array) - { - write_cbor(el); - } - break; - } - - case value_t::object: - { - // step 1: write control byte and the object size - const auto N = j.m_value.object->size(); - if (N <= 0x17) - { - write_number(static_cast(0xA0 + N)); - } - else if (N <= (std::numeric_limits::max)()) - { - oa->write_character(to_char_type(0xB8)); - write_number(static_cast(N)); - } - else if (N <= (std::numeric_limits::max)()) - { - oa->write_character(to_char_type(0xB9)); - write_number(static_cast(N)); - } - else if (N <= (std::numeric_limits::max)()) - { - oa->write_character(to_char_type(0xBA)); - write_number(static_cast(N)); - } - // LCOV_EXCL_START - else if (N <= (std::numeric_limits::max)()) - { - oa->write_character(to_char_type(0xBB)); - write_number(static_cast(N)); - } - // LCOV_EXCL_STOP - - // step 2: write each element - for (const auto& el : *j.m_value.object) - { - write_cbor(el.first); - write_cbor(el.second); - } - break; - } - - default: - break; - } - } - - /*! - @param[in] j JSON value to serialize - */ - void write_msgpack(const BasicJsonType& j) - { - switch (j.type()) - { - case value_t::null: // nil - { - oa->write_character(to_char_type(0xC0)); - break; - } - - case value_t::boolean: // true and false - { - oa->write_character(j.m_value.boolean - ? to_char_type(0xC3) - : to_char_type(0xC2)); - break; - } - - case value_t::number_integer: - { - if (j.m_value.number_integer >= 0) - { - // MessagePack does not differentiate between positive - // signed integers and unsigned integers. Therefore, we used - // the code from the value_t::number_unsigned case here. - if (j.m_value.number_unsigned < 128) - { - // positive fixnum - write_number(static_cast(j.m_value.number_integer)); - } - else if (j.m_value.number_unsigned <= (std::numeric_limits::max)()) - { - // uint 8 - oa->write_character(to_char_type(0xCC)); - write_number(static_cast(j.m_value.number_integer)); - } - else if (j.m_value.number_unsigned <= (std::numeric_limits::max)()) - { - // uint 16 - oa->write_character(to_char_type(0xCD)); - write_number(static_cast(j.m_value.number_integer)); - } - else if (j.m_value.number_unsigned <= (std::numeric_limits::max)()) - { - // uint 32 - oa->write_character(to_char_type(0xCE)); - write_number(static_cast(j.m_value.number_integer)); - } - else if (j.m_value.number_unsigned <= (std::numeric_limits::max)()) - { - // uint 64 - oa->write_character(to_char_type(0xCF)); - write_number(static_cast(j.m_value.number_integer)); - } - } - else - { - if (j.m_value.number_integer >= -32) - { - // negative fixnum - write_number(static_cast(j.m_value.number_integer)); - } - else if (j.m_value.number_integer >= (std::numeric_limits::min)() and - j.m_value.number_integer <= (std::numeric_limits::max)()) - { - // int 8 - oa->write_character(to_char_type(0xD0)); - write_number(static_cast(j.m_value.number_integer)); - } - else if (j.m_value.number_integer >= (std::numeric_limits::min)() and - j.m_value.number_integer <= (std::numeric_limits::max)()) - { - // int 16 - oa->write_character(to_char_type(0xD1)); - write_number(static_cast(j.m_value.number_integer)); - } - else if (j.m_value.number_integer >= (std::numeric_limits::min)() and - j.m_value.number_integer <= (std::numeric_limits::max)()) - { - // int 32 - oa->write_character(to_char_type(0xD2)); - write_number(static_cast(j.m_value.number_integer)); - } - else if (j.m_value.number_integer >= (std::numeric_limits::min)() and - j.m_value.number_integer <= (std::numeric_limits::max)()) - { - // int 64 - oa->write_character(to_char_type(0xD3)); - write_number(static_cast(j.m_value.number_integer)); - } - } - break; - } - - case value_t::number_unsigned: - { - if (j.m_value.number_unsigned < 128) - { - // positive fixnum - write_number(static_cast(j.m_value.number_integer)); - } - else if (j.m_value.number_unsigned <= (std::numeric_limits::max)()) - { - // uint 8 - oa->write_character(to_char_type(0xCC)); - write_number(static_cast(j.m_value.number_integer)); - } - else if (j.m_value.number_unsigned <= (std::numeric_limits::max)()) - { - // uint 16 - oa->write_character(to_char_type(0xCD)); - write_number(static_cast(j.m_value.number_integer)); - } - else if (j.m_value.number_unsigned <= (std::numeric_limits::max)()) - { - // uint 32 - oa->write_character(to_char_type(0xCE)); - write_number(static_cast(j.m_value.number_integer)); - } - else if (j.m_value.number_unsigned <= (std::numeric_limits::max)()) - { - // uint 64 - oa->write_character(to_char_type(0xCF)); - write_number(static_cast(j.m_value.number_integer)); - } - break; - } - - case value_t::number_float: - { - oa->write_character(get_msgpack_float_prefix(j.m_value.number_float)); - write_number(j.m_value.number_float); - break; - } - - case value_t::string: - { - // step 1: write control byte and the string length - const auto N = j.m_value.string->size(); - if (N <= 31) - { - // fixstr - write_number(static_cast(0xA0 | N)); - } - else if (N <= (std::numeric_limits::max)()) - { - // str 8 - oa->write_character(to_char_type(0xD9)); - write_number(static_cast(N)); - } - else if (N <= (std::numeric_limits::max)()) - { - // str 16 - oa->write_character(to_char_type(0xDA)); - write_number(static_cast(N)); - } - else if (N <= (std::numeric_limits::max)()) - { - // str 32 - oa->write_character(to_char_type(0xDB)); - write_number(static_cast(N)); - } - - // step 2: write the string - oa->write_characters( - reinterpret_cast(j.m_value.string->c_str()), - j.m_value.string->size()); - break; - } - - case value_t::array: - { - // step 1: write control byte and the array size - const auto N = j.m_value.array->size(); - if (N <= 15) - { - // fixarray - write_number(static_cast(0x90 | N)); - } - else if (N <= (std::numeric_limits::max)()) - { - // array 16 - oa->write_character(to_char_type(0xDC)); - write_number(static_cast(N)); - } - else if (N <= (std::numeric_limits::max)()) - { - // array 32 - oa->write_character(to_char_type(0xDD)); - write_number(static_cast(N)); - } - - // step 2: write each element - for (const auto& el : *j.m_value.array) - { - write_msgpack(el); - } - break; - } - - case value_t::object: - { - // step 1: write control byte and the object size - const auto N = j.m_value.object->size(); - if (N <= 15) - { - // fixmap - write_number(static_cast(0x80 | (N & 0xF))); - } - else if (N <= (std::numeric_limits::max)()) - { - // map 16 - oa->write_character(to_char_type(0xDE)); - write_number(static_cast(N)); - } - else if (N <= (std::numeric_limits::max)()) - { - // map 32 - oa->write_character(to_char_type(0xDF)); - write_number(static_cast(N)); - } - - // step 2: write each element - for (const auto& el : *j.m_value.object) - { - write_msgpack(el.first); - write_msgpack(el.second); - } - break; - } - - default: - break; - } - } - - /*! - @param[in] j JSON value to serialize - @param[in] use_count whether to use '#' prefixes (optimized format) - @param[in] use_type whether to use '$' prefixes (optimized format) - @param[in] add_prefix whether prefixes need to be used for this value - */ - void write_ubjson(const BasicJsonType& j, const bool use_count, - const bool use_type, const bool add_prefix = true) - { - switch (j.type()) - { - case value_t::null: - { - if (add_prefix) - { - oa->write_character(to_char_type('Z')); - } - break; - } - - case value_t::boolean: - { - if (add_prefix) - { - oa->write_character(j.m_value.boolean - ? to_char_type('T') - : to_char_type('F')); - } - break; - } - - case value_t::number_integer: - { - write_number_with_ubjson_prefix(j.m_value.number_integer, add_prefix); - break; - } - - case value_t::number_unsigned: - { - write_number_with_ubjson_prefix(j.m_value.number_unsigned, add_prefix); - break; - } - - case value_t::number_float: - { - write_number_with_ubjson_prefix(j.m_value.number_float, add_prefix); - break; - } - - case value_t::string: - { - if (add_prefix) - { - oa->write_character(to_char_type('S')); - } - write_number_with_ubjson_prefix(j.m_value.string->size(), true); - oa->write_characters( - reinterpret_cast(j.m_value.string->c_str()), - j.m_value.string->size()); - break; - } - - case value_t::array: - { - if (add_prefix) - { - oa->write_character(to_char_type('[')); - } - - bool prefix_required = true; - if (use_type and not j.m_value.array->empty()) - { - assert(use_count); - const CharType first_prefix = ubjson_prefix(j.front()); - const bool same_prefix = std::all_of(j.begin() + 1, j.end(), - [this, first_prefix](const BasicJsonType & v) - { - return ubjson_prefix(v) == first_prefix; - }); - - if (same_prefix) - { - prefix_required = false; - oa->write_character(to_char_type('$')); - oa->write_character(first_prefix); - } - } - - if (use_count) - { - oa->write_character(to_char_type('#')); - write_number_with_ubjson_prefix(j.m_value.array->size(), true); - } - - for (const auto& el : *j.m_value.array) - { - write_ubjson(el, use_count, use_type, prefix_required); - } - - if (not use_count) - { - oa->write_character(to_char_type(']')); - } - - break; - } - - case value_t::object: - { - if (add_prefix) - { - oa->write_character(to_char_type('{')); - } - - bool prefix_required = true; - if (use_type and not j.m_value.object->empty()) - { - assert(use_count); - const CharType first_prefix = ubjson_prefix(j.front()); - const bool same_prefix = std::all_of(j.begin(), j.end(), - [this, first_prefix](const BasicJsonType & v) - { - return ubjson_prefix(v) == first_prefix; - }); - - if (same_prefix) - { - prefix_required = false; - oa->write_character(to_char_type('$')); - oa->write_character(first_prefix); - } - } - - if (use_count) - { - oa->write_character(to_char_type('#')); - write_number_with_ubjson_prefix(j.m_value.object->size(), true); - } - - for (const auto& el : *j.m_value.object) - { - write_number_with_ubjson_prefix(el.first.size(), true); - oa->write_characters( - reinterpret_cast(el.first.c_str()), - el.first.size()); - write_ubjson(el.second, use_count, use_type, prefix_required); - } - - if (not use_count) - { - oa->write_character(to_char_type('}')); - } - - break; - } - - default: - break; - } - } - - private: - ////////// - // BSON // - ////////// - - /*! - @return The size of a BSON document entry header, including the id marker - and the entry name size (and its null-terminator). - */ - static std::size_t calc_bson_entry_header_size(const string_t& name) - { - const auto it = name.find(static_cast(0)); - if (JSON_UNLIKELY(it != BasicJsonType::string_t::npos)) - { - JSON_THROW(out_of_range::create(409, - "BSON key cannot contain code point U+0000 (at byte " + std::to_string(it) + ")")); - } - - return /*id*/ 1ul + name.size() + /*zero-terminator*/1u; - } - - /*! - @brief Writes the given @a element_type and @a name to the output adapter - */ - void write_bson_entry_header(const string_t& name, - const std::uint8_t element_type) - { - oa->write_character(to_char_type(element_type)); // boolean - oa->write_characters( - reinterpret_cast(name.c_str()), - name.size() + 1u); - } - - /*! - @brief Writes a BSON element with key @a name and boolean value @a value - */ - void write_bson_boolean(const string_t& name, - const bool value) - { - write_bson_entry_header(name, 0x08); - oa->write_character(value ? to_char_type(0x01) : to_char_type(0x00)); - } - - /*! - @brief Writes a BSON element with key @a name and double value @a value - */ - void write_bson_double(const string_t& name, - const double value) - { - write_bson_entry_header(name, 0x01); - write_number(value); - } - - /*! - @return The size of the BSON-encoded string in @a value - */ - static std::size_t calc_bson_string_size(const string_t& value) - { - return sizeof(std::int32_t) + value.size() + 1ul; - } - - /*! - @brief Writes a BSON element with key @a name and string value @a value - */ - void write_bson_string(const string_t& name, - const string_t& value) - { - write_bson_entry_header(name, 0x02); - - write_number(static_cast(value.size() + 1ul)); - oa->write_characters( - reinterpret_cast(value.c_str()), - value.size() + 1); - } - - /*! - @brief Writes a BSON element with key @a name and null value - */ - void write_bson_null(const string_t& name) - { - write_bson_entry_header(name, 0x0A); - } - - /*! - @return The size of the BSON-encoded integer @a value - */ - static std::size_t calc_bson_integer_size(const std::int64_t value) - { - if ((std::numeric_limits::min)() <= value and value <= (std::numeric_limits::max)()) - { - return sizeof(std::int32_t); - } - else - { - return sizeof(std::int64_t); - } - } - - /*! - @brief Writes a BSON element with key @a name and integer @a value - */ - void write_bson_integer(const string_t& name, - const std::int64_t value) - { - if ((std::numeric_limits::min)() <= value and value <= (std::numeric_limits::max)()) - { - write_bson_entry_header(name, 0x10); // int32 - write_number(static_cast(value)); - } - else - { - write_bson_entry_header(name, 0x12); // int64 - write_number(static_cast(value)); - } - } - - /*! - @return The size of the BSON-encoded unsigned integer in @a j - */ - static constexpr std::size_t calc_bson_unsigned_size(const std::uint64_t value) noexcept - { - return (value <= static_cast((std::numeric_limits::max)())) - ? sizeof(std::int32_t) - : sizeof(std::int64_t); - } - - /*! - @brief Writes a BSON element with key @a name and unsigned @a value - */ - void write_bson_unsigned(const string_t& name, - const std::uint64_t value) - { - if (value <= static_cast((std::numeric_limits::max)())) - { - write_bson_entry_header(name, 0x10 /* int32 */); - write_number(static_cast(value)); - } - else if (value <= static_cast((std::numeric_limits::max)())) - { - write_bson_entry_header(name, 0x12 /* int64 */); - write_number(static_cast(value)); - } - else - { - JSON_THROW(out_of_range::create(407, "integer number " + std::to_string(value) + " cannot be represented by BSON as it does not fit int64")); - } - } - - /*! - @brief Writes a BSON element with key @a name and object @a value - */ - void write_bson_object_entry(const string_t& name, - const typename BasicJsonType::object_t& value) - { - write_bson_entry_header(name, 0x03); // object - write_bson_object(value); - } - - /*! - @return The size of the BSON-encoded array @a value - */ - static std::size_t calc_bson_array_size(const typename BasicJsonType::array_t& value) - { - std::size_t embedded_document_size = 0ul; - std::size_t array_index = 0ul; - - for (const auto& el : value) - { - embedded_document_size += calc_bson_element_size(std::to_string(array_index++), el); - } - - return sizeof(std::int32_t) + embedded_document_size + 1ul; - } - - /*! - @brief Writes a BSON element with key @a name and array @a value - */ - void write_bson_array(const string_t& name, - const typename BasicJsonType::array_t& value) - { - write_bson_entry_header(name, 0x04); // array - write_number(static_cast(calc_bson_array_size(value))); - - std::size_t array_index = 0ul; - - for (const auto& el : value) - { - write_bson_element(std::to_string(array_index++), el); - } - - oa->write_character(to_char_type(0x00)); - } - - /*! - @brief Calculates the size necessary to serialize the JSON value @a j with its @a name - @return The calculated size for the BSON document entry for @a j with the given @a name. - */ - static std::size_t calc_bson_element_size(const string_t& name, - const BasicJsonType& j) - { - const auto header_size = calc_bson_entry_header_size(name); - switch (j.type()) - { - case value_t::object: - return header_size + calc_bson_object_size(*j.m_value.object); - - case value_t::array: - return header_size + calc_bson_array_size(*j.m_value.array); - - case value_t::boolean: - return header_size + 1ul; - - case value_t::number_float: - return header_size + 8ul; - - case value_t::number_integer: - return header_size + calc_bson_integer_size(j.m_value.number_integer); - - case value_t::number_unsigned: - return header_size + calc_bson_unsigned_size(j.m_value.number_unsigned); - - case value_t::string: - return header_size + calc_bson_string_size(*j.m_value.string); - - case value_t::null: - return header_size + 0ul; - - // LCOV_EXCL_START - default: - assert(false); - return 0ul; - // LCOV_EXCL_STOP - }; - } - - /*! - @brief Serializes the JSON value @a j to BSON and associates it with the - key @a name. - @param name The name to associate with the JSON entity @a j within the - current BSON document - @return The size of the BSON entry - */ - void write_bson_element(const string_t& name, - const BasicJsonType& j) - { - switch (j.type()) - { - case value_t::object: - return write_bson_object_entry(name, *j.m_value.object); - - case value_t::array: - return write_bson_array(name, *j.m_value.array); - - case value_t::boolean: - return write_bson_boolean(name, j.m_value.boolean); - - case value_t::number_float: - return write_bson_double(name, j.m_value.number_float); - - case value_t::number_integer: - return write_bson_integer(name, j.m_value.number_integer); - - case value_t::number_unsigned: - return write_bson_unsigned(name, j.m_value.number_unsigned); - - case value_t::string: - return write_bson_string(name, *j.m_value.string); - - case value_t::null: - return write_bson_null(name); - - // LCOV_EXCL_START - default: - assert(false); - return; - // LCOV_EXCL_STOP - }; - } - - /*! - @brief Calculates the size of the BSON serialization of the given - JSON-object @a j. - @param[in] j JSON value to serialize - @pre j.type() == value_t::object - */ - static std::size_t calc_bson_object_size(const typename BasicJsonType::object_t& value) - { - std::size_t document_size = std::accumulate(value.begin(), value.end(), 0ul, - [](size_t result, const typename BasicJsonType::object_t::value_type & el) - { - return result += calc_bson_element_size(el.first, el.second); - }); - - return sizeof(std::int32_t) + document_size + 1ul; - } - - /*! - @param[in] j JSON value to serialize - @pre j.type() == value_t::object - */ - void write_bson_object(const typename BasicJsonType::object_t& value) - { - write_number(static_cast(calc_bson_object_size(value))); - - for (const auto& el : value) - { - write_bson_element(el.first, el.second); - } - - oa->write_character(to_char_type(0x00)); - } - - ////////// - // CBOR // - ////////// - - static constexpr CharType get_cbor_float_prefix(float /*unused*/) - { - return to_char_type(0xFA); // Single-Precision Float - } - - static constexpr CharType get_cbor_float_prefix(double /*unused*/) - { - return to_char_type(0xFB); // Double-Precision Float - } - - ///////////// - // MsgPack // - ///////////// - - static constexpr CharType get_msgpack_float_prefix(float /*unused*/) - { - return to_char_type(0xCA); // float 32 - } - - static constexpr CharType get_msgpack_float_prefix(double /*unused*/) - { - return to_char_type(0xCB); // float 64 - } - - //////////// - // UBJSON // - //////////// - - // UBJSON: write number (floating point) - template::value, int>::type = 0> - void write_number_with_ubjson_prefix(const NumberType n, - const bool add_prefix) - { - if (add_prefix) - { - oa->write_character(get_ubjson_float_prefix(n)); - } - write_number(n); - } - - // UBJSON: write number (unsigned integer) - template::value, int>::type = 0> - void write_number_with_ubjson_prefix(const NumberType n, - const bool add_prefix) - { - if (n <= static_cast((std::numeric_limits::max)())) - { - if (add_prefix) - { - oa->write_character(to_char_type('i')); // int8 - } - write_number(static_cast(n)); - } - else if (n <= (std::numeric_limits::max)()) - { - if (add_prefix) - { - oa->write_character(to_char_type('U')); // uint8 - } - write_number(static_cast(n)); - } - else if (n <= static_cast((std::numeric_limits::max)())) - { - if (add_prefix) - { - oa->write_character(to_char_type('I')); // int16 - } - write_number(static_cast(n)); - } - else if (n <= static_cast((std::numeric_limits::max)())) - { - if (add_prefix) - { - oa->write_character(to_char_type('l')); // int32 - } - write_number(static_cast(n)); - } - else if (n <= static_cast((std::numeric_limits::max)())) - { - if (add_prefix) - { - oa->write_character(to_char_type('L')); // int64 - } - write_number(static_cast(n)); - } - else - { - JSON_THROW(out_of_range::create(407, "integer number " + std::to_string(n) + " cannot be represented by UBJSON as it does not fit int64")); - } - } - - // UBJSON: write number (signed integer) - template::value and - not std::is_floating_point::value, int>::type = 0> - void write_number_with_ubjson_prefix(const NumberType n, - const bool add_prefix) - { - if ((std::numeric_limits::min)() <= n and n <= (std::numeric_limits::max)()) - { - if (add_prefix) - { - oa->write_character(to_char_type('i')); // int8 - } - write_number(static_cast(n)); - } - else if (static_cast((std::numeric_limits::min)()) <= n and n <= static_cast((std::numeric_limits::max)())) - { - if (add_prefix) - { - oa->write_character(to_char_type('U')); // uint8 - } - write_number(static_cast(n)); - } - else if ((std::numeric_limits::min)() <= n and n <= (std::numeric_limits::max)()) - { - if (add_prefix) - { - oa->write_character(to_char_type('I')); // int16 - } - write_number(static_cast(n)); - } - else if ((std::numeric_limits::min)() <= n and n <= (std::numeric_limits::max)()) - { - if (add_prefix) - { - oa->write_character(to_char_type('l')); // int32 - } - write_number(static_cast(n)); - } - else if ((std::numeric_limits::min)() <= n and n <= (std::numeric_limits::max)()) - { - if (add_prefix) - { - oa->write_character(to_char_type('L')); // int64 - } - write_number(static_cast(n)); - } - // LCOV_EXCL_START - else - { - JSON_THROW(out_of_range::create(407, "integer number " + std::to_string(n) + " cannot be represented by UBJSON as it does not fit int64")); - } - // LCOV_EXCL_STOP - } - - /*! - @brief determine the type prefix of container values - - @note This function does not need to be 100% accurate when it comes to - integer limits. In case a number exceeds the limits of int64_t, - this will be detected by a later call to function - write_number_with_ubjson_prefix. Therefore, we return 'L' for any - value that does not fit the previous limits. - */ - CharType ubjson_prefix(const BasicJsonType& j) const noexcept - { - switch (j.type()) - { - case value_t::null: - return 'Z'; - - case value_t::boolean: - return j.m_value.boolean ? 'T' : 'F'; - - case value_t::number_integer: - { - if ((std::numeric_limits::min)() <= j.m_value.number_integer and j.m_value.number_integer <= (std::numeric_limits::max)()) - { - return 'i'; - } - if ((std::numeric_limits::min)() <= j.m_value.number_integer and j.m_value.number_integer <= (std::numeric_limits::max)()) - { - return 'U'; - } - if ((std::numeric_limits::min)() <= j.m_value.number_integer and j.m_value.number_integer <= (std::numeric_limits::max)()) - { - return 'I'; - } - if ((std::numeric_limits::min)() <= j.m_value.number_integer and j.m_value.number_integer <= (std::numeric_limits::max)()) - { - return 'l'; - } - // no check and assume int64_t (see note above) - return 'L'; - } - - case value_t::number_unsigned: - { - if (j.m_value.number_unsigned <= (std::numeric_limits::max)()) - { - return 'i'; - } - if (j.m_value.number_unsigned <= (std::numeric_limits::max)()) - { - return 'U'; - } - if (j.m_value.number_unsigned <= (std::numeric_limits::max)()) - { - return 'I'; - } - if (j.m_value.number_unsigned <= (std::numeric_limits::max)()) - { - return 'l'; - } - // no check and assume int64_t (see note above) - return 'L'; - } - - case value_t::number_float: - return get_ubjson_float_prefix(j.m_value.number_float); - - case value_t::string: - return 'S'; - - case value_t::array: - return '['; - - case value_t::object: - return '{'; - - default: // discarded values - return 'N'; - } - } - - static constexpr CharType get_ubjson_float_prefix(float /*unused*/) - { - return 'd'; // float 32 - } - - static constexpr CharType get_ubjson_float_prefix(double /*unused*/) - { - return 'D'; // float 64 - } - - /////////////////////// - // Utility functions // - /////////////////////// - - /* - @brief write a number to output input - @param[in] n number of type @a NumberType - @tparam NumberType the type of the number - @tparam OutputIsLittleEndian Set to true if output data is - required to be little endian - - @note This function needs to respect the system's endianess, because bytes - in CBOR, MessagePack, and UBJSON are stored in network order (big - endian) and therefore need reordering on little endian systems. - */ - template - void write_number(const NumberType n) - { - // step 1: write number to array of length NumberType - std::array vec; - std::memcpy(vec.data(), &n, sizeof(NumberType)); - - // step 2: write array to output (with possible reordering) - if (is_little_endian and not OutputIsLittleEndian) - { - // reverse byte order prior to conversion if necessary - std::reverse(vec.begin(), vec.end()); - } - - oa->write_characters(vec.data(), sizeof(NumberType)); - } - - public: - // The following to_char_type functions are implement the conversion - // between uint8_t and CharType. In case CharType is not unsigned, - // such a conversion is required to allow values greater than 128. - // See for a discussion. - template < typename C = CharType, - enable_if_t < std::is_signed::value and std::is_signed::value > * = nullptr > - static constexpr CharType to_char_type(std::uint8_t x) noexcept - { - return *reinterpret_cast(&x); - } - - template < typename C = CharType, - enable_if_t < std::is_signed::value and std::is_unsigned::value > * = nullptr > - static CharType to_char_type(std::uint8_t x) noexcept - { - static_assert(sizeof(std::uint8_t) == sizeof(CharType), "size of CharType must be equal to std::uint8_t"); - static_assert(std::is_pod::value, "CharType must be POD"); - CharType result; - std::memcpy(&result, &x, sizeof(x)); - return result; - } - - template::value>* = nullptr> - static constexpr CharType to_char_type(std::uint8_t x) noexcept - { - return x; - } - - template < typename InputCharType, typename C = CharType, - enable_if_t < - std::is_signed::value and - std::is_signed::value and - std::is_same::type>::value - > * = nullptr > - static constexpr CharType to_char_type(InputCharType x) noexcept - { - return x; - } - - private: - /// whether we can assume little endianess - const bool is_little_endian = binary_reader::little_endianess(); - - /// the output - output_adapter_t oa = nullptr; -}; -} // namespace detail -} // namespace nlohmann - -// #include - - -#include // reverse, remove, fill, find, none_of -#include // array -#include // assert -#include // and, or -#include // localeconv, lconv -#include // labs, isfinite, isnan, signbit -#include // size_t, ptrdiff_t -#include // uint8_t -#include // snprintf -#include // numeric_limits -#include // string -#include // is_same - -// #include - -// #include - - -#include // assert -#include // or, and, not -#include // signbit, isfinite -#include // intN_t, uintN_t -#include // memcpy, memmove - -namespace nlohmann -{ -namespace detail -{ - -/*! -@brief implements the Grisu2 algorithm for binary to decimal floating-point -conversion. - -This implementation is a slightly modified version of the reference -implementation which may be obtained from -http://florian.loitsch.com/publications (bench.tar.gz). - -The code is distributed under the MIT license, Copyright (c) 2009 Florian Loitsch. - -For a detailed description of the algorithm see: - -[1] Loitsch, "Printing Floating-Point Numbers Quickly and Accurately with - Integers", Proceedings of the ACM SIGPLAN 2010 Conference on Programming - Language Design and Implementation, PLDI 2010 -[2] Burger, Dybvig, "Printing Floating-Point Numbers Quickly and Accurately", - Proceedings of the ACM SIGPLAN 1996 Conference on Programming Language - Design and Implementation, PLDI 1996 -*/ -namespace dtoa_impl -{ - -template -Target reinterpret_bits(const Source source) -{ - static_assert(sizeof(Target) == sizeof(Source), "size mismatch"); - - Target target; - std::memcpy(&target, &source, sizeof(Source)); - return target; -} - -struct diyfp // f * 2^e -{ - static constexpr int kPrecision = 64; // = q - - uint64_t f = 0; - int e = 0; - - constexpr diyfp(uint64_t f_, int e_) noexcept : f(f_), e(e_) {} - - /*! - @brief returns x - y - @pre x.e == y.e and x.f >= y.f - */ - static diyfp sub(const diyfp& x, const diyfp& y) noexcept - { - assert(x.e == y.e); - assert(x.f >= y.f); - - return {x.f - y.f, x.e}; - } - - /*! - @brief returns x * y - @note The result is rounded. (Only the upper q bits are returned.) - */ - static diyfp mul(const diyfp& x, const diyfp& y) noexcept - { - static_assert(kPrecision == 64, "internal error"); - - // Computes: - // f = round((x.f * y.f) / 2^q) - // e = x.e + y.e + q - - // Emulate the 64-bit * 64-bit multiplication: - // - // p = u * v - // = (u_lo + 2^32 u_hi) (v_lo + 2^32 v_hi) - // = (u_lo v_lo ) + 2^32 ((u_lo v_hi ) + (u_hi v_lo )) + 2^64 (u_hi v_hi ) - // = (p0 ) + 2^32 ((p1 ) + (p2 )) + 2^64 (p3 ) - // = (p0_lo + 2^32 p0_hi) + 2^32 ((p1_lo + 2^32 p1_hi) + (p2_lo + 2^32 p2_hi)) + 2^64 (p3 ) - // = (p0_lo ) + 2^32 (p0_hi + p1_lo + p2_lo ) + 2^64 (p1_hi + p2_hi + p3) - // = (p0_lo ) + 2^32 (Q ) + 2^64 (H ) - // = (p0_lo ) + 2^32 (Q_lo + 2^32 Q_hi ) + 2^64 (H ) - // - // (Since Q might be larger than 2^32 - 1) - // - // = (p0_lo + 2^32 Q_lo) + 2^64 (Q_hi + H) - // - // (Q_hi + H does not overflow a 64-bit int) - // - // = p_lo + 2^64 p_hi - - const uint64_t u_lo = x.f & 0xFFFFFFFF; - const uint64_t u_hi = x.f >> 32; - const uint64_t v_lo = y.f & 0xFFFFFFFF; - const uint64_t v_hi = y.f >> 32; - - const uint64_t p0 = u_lo * v_lo; - const uint64_t p1 = u_lo * v_hi; - const uint64_t p2 = u_hi * v_lo; - const uint64_t p3 = u_hi * v_hi; - - const uint64_t p0_hi = p0 >> 32; - const uint64_t p1_lo = p1 & 0xFFFFFFFF; - const uint64_t p1_hi = p1 >> 32; - const uint64_t p2_lo = p2 & 0xFFFFFFFF; - const uint64_t p2_hi = p2 >> 32; - - uint64_t Q = p0_hi + p1_lo + p2_lo; - - // The full product might now be computed as - // - // p_hi = p3 + p2_hi + p1_hi + (Q >> 32) - // p_lo = p0_lo + (Q << 32) - // - // But in this particular case here, the full p_lo is not required. - // Effectively we only need to add the highest bit in p_lo to p_hi (and - // Q_hi + 1 does not overflow). - - Q += uint64_t{1} << (64 - 32 - 1); // round, ties up - - const uint64_t h = p3 + p2_hi + p1_hi + (Q >> 32); - - return {h, x.e + y.e + 64}; - } - - /*! - @brief normalize x such that the significand is >= 2^(q-1) - @pre x.f != 0 - */ - static diyfp normalize(diyfp x) noexcept - { - assert(x.f != 0); - - while ((x.f >> 63) == 0) - { - x.f <<= 1; - x.e--; - } - - return x; - } - - /*! - @brief normalize x such that the result has the exponent E - @pre e >= x.e and the upper e - x.e bits of x.f must be zero. - */ - static diyfp normalize_to(const diyfp& x, const int target_exponent) noexcept - { - const int delta = x.e - target_exponent; - - assert(delta >= 0); - assert(((x.f << delta) >> delta) == x.f); - - return {x.f << delta, target_exponent}; - } -}; - -struct boundaries -{ - diyfp w; - diyfp minus; - diyfp plus; -}; - -/*! -Compute the (normalized) diyfp representing the input number 'value' and its -boundaries. - -@pre value must be finite and positive -*/ -template -boundaries compute_boundaries(FloatType value) -{ - assert(std::isfinite(value)); - assert(value > 0); - - // Convert the IEEE representation into a diyfp. - // - // If v is denormal: - // value = 0.F * 2^(1 - bias) = ( F) * 2^(1 - bias - (p-1)) - // If v is normalized: - // value = 1.F * 2^(E - bias) = (2^(p-1) + F) * 2^(E - bias - (p-1)) - - static_assert(std::numeric_limits::is_iec559, - "internal error: dtoa_short requires an IEEE-754 floating-point implementation"); - - constexpr int kPrecision = std::numeric_limits::digits; // = p (includes the hidden bit) - constexpr int kBias = std::numeric_limits::max_exponent - 1 + (kPrecision - 1); - constexpr int kMinExp = 1 - kBias; - constexpr uint64_t kHiddenBit = uint64_t{1} << (kPrecision - 1); // = 2^(p-1) - - using bits_type = typename std::conditional< kPrecision == 24, uint32_t, uint64_t >::type; - - const uint64_t bits = reinterpret_bits(value); - const uint64_t E = bits >> (kPrecision - 1); - const uint64_t F = bits & (kHiddenBit - 1); - - const bool is_denormal = (E == 0); - const diyfp v = is_denormal - ? diyfp(F, kMinExp) - : diyfp(F + kHiddenBit, static_cast(E) - kBias); - - // Compute the boundaries m- and m+ of the floating-point value - // v = f * 2^e. - // - // Determine v- and v+, the floating-point predecessor and successor if v, - // respectively. - // - // v- = v - 2^e if f != 2^(p-1) or e == e_min (A) - // = v - 2^(e-1) if f == 2^(p-1) and e > e_min (B) - // - // v+ = v + 2^e - // - // Let m- = (v- + v) / 2 and m+ = (v + v+) / 2. All real numbers _strictly_ - // between m- and m+ round to v, regardless of how the input rounding - // algorithm breaks ties. - // - // ---+-------------+-------------+-------------+-------------+--- (A) - // v- m- v m+ v+ - // - // -----------------+------+------+-------------+-------------+--- (B) - // v- m- v m+ v+ - - const bool lower_boundary_is_closer = (F == 0 and E > 1); - const diyfp m_plus = diyfp(2 * v.f + 1, v.e - 1); - const diyfp m_minus = lower_boundary_is_closer - ? diyfp(4 * v.f - 1, v.e - 2) // (B) - : diyfp(2 * v.f - 1, v.e - 1); // (A) - - // Determine the normalized w+ = m+. - const diyfp w_plus = diyfp::normalize(m_plus); - - // Determine w- = m- such that e_(w-) = e_(w+). - const diyfp w_minus = diyfp::normalize_to(m_minus, w_plus.e); - - return {diyfp::normalize(v), w_minus, w_plus}; -} - -// Given normalized diyfp w, Grisu needs to find a (normalized) cached -// power-of-ten c, such that the exponent of the product c * w = f * 2^e lies -// within a certain range [alpha, gamma] (Definition 3.2 from [1]) -// -// alpha <= e = e_c + e_w + q <= gamma -// -// or -// -// f_c * f_w * 2^alpha <= f_c 2^(e_c) * f_w 2^(e_w) * 2^q -// <= f_c * f_w * 2^gamma -// -// Since c and w are normalized, i.e. 2^(q-1) <= f < 2^q, this implies -// -// 2^(q-1) * 2^(q-1) * 2^alpha <= c * w * 2^q < 2^q * 2^q * 2^gamma -// -// or -// -// 2^(q - 2 + alpha) <= c * w < 2^(q + gamma) -// -// The choice of (alpha,gamma) determines the size of the table and the form of -// the digit generation procedure. Using (alpha,gamma)=(-60,-32) works out well -// in practice: -// -// The idea is to cut the number c * w = f * 2^e into two parts, which can be -// processed independently: An integral part p1, and a fractional part p2: -// -// f * 2^e = ( (f div 2^-e) * 2^-e + (f mod 2^-e) ) * 2^e -// = (f div 2^-e) + (f mod 2^-e) * 2^e -// = p1 + p2 * 2^e -// -// The conversion of p1 into decimal form requires a series of divisions and -// modulos by (a power of) 10. These operations are faster for 32-bit than for -// 64-bit integers, so p1 should ideally fit into a 32-bit integer. This can be -// achieved by choosing -// -// -e >= 32 or e <= -32 := gamma -// -// In order to convert the fractional part -// -// p2 * 2^e = p2 / 2^-e = d[-1] / 10^1 + d[-2] / 10^2 + ... -// -// into decimal form, the fraction is repeatedly multiplied by 10 and the digits -// d[-i] are extracted in order: -// -// (10 * p2) div 2^-e = d[-1] -// (10 * p2) mod 2^-e = d[-2] / 10^1 + ... -// -// The multiplication by 10 must not overflow. It is sufficient to choose -// -// 10 * p2 < 16 * p2 = 2^4 * p2 <= 2^64. -// -// Since p2 = f mod 2^-e < 2^-e, -// -// -e <= 60 or e >= -60 := alpha - -constexpr int kAlpha = -60; -constexpr int kGamma = -32; - -struct cached_power // c = f * 2^e ~= 10^k -{ - uint64_t f; - int e; - int k; -}; - -/*! -For a normalized diyfp w = f * 2^e, this function returns a (normalized) cached -power-of-ten c = f_c * 2^e_c, such that the exponent of the product w * c -satisfies (Definition 3.2 from [1]) - - alpha <= e_c + e + q <= gamma. -*/ -inline cached_power get_cached_power_for_binary_exponent(int e) -{ - // Now - // - // alpha <= e_c + e + q <= gamma (1) - // ==> f_c * 2^alpha <= c * 2^e * 2^q - // - // and since the c's are normalized, 2^(q-1) <= f_c, - // - // ==> 2^(q - 1 + alpha) <= c * 2^(e + q) - // ==> 2^(alpha - e - 1) <= c - // - // If c were an exakt power of ten, i.e. c = 10^k, one may determine k as - // - // k = ceil( log_10( 2^(alpha - e - 1) ) ) - // = ceil( (alpha - e - 1) * log_10(2) ) - // - // From the paper: - // "In theory the result of the procedure could be wrong since c is rounded, - // and the computation itself is approximated [...]. In practice, however, - // this simple function is sufficient." - // - // For IEEE double precision floating-point numbers converted into - // normalized diyfp's w = f * 2^e, with q = 64, - // - // e >= -1022 (min IEEE exponent) - // -52 (p - 1) - // -52 (p - 1, possibly normalize denormal IEEE numbers) - // -11 (normalize the diyfp) - // = -1137 - // - // and - // - // e <= +1023 (max IEEE exponent) - // -52 (p - 1) - // -11 (normalize the diyfp) - // = 960 - // - // This binary exponent range [-1137,960] results in a decimal exponent - // range [-307,324]. One does not need to store a cached power for each - // k in this range. For each such k it suffices to find a cached power - // such that the exponent of the product lies in [alpha,gamma]. - // This implies that the difference of the decimal exponents of adjacent - // table entries must be less than or equal to - // - // floor( (gamma - alpha) * log_10(2) ) = 8. - // - // (A smaller distance gamma-alpha would require a larger table.) - - // NB: - // Actually this function returns c, such that -60 <= e_c + e + 64 <= -34. - - constexpr int kCachedPowersSize = 79; - constexpr int kCachedPowersMinDecExp = -300; - constexpr int kCachedPowersDecStep = 8; - - static constexpr cached_power kCachedPowers[] = - { - { 0xAB70FE17C79AC6CA, -1060, -300 }, - { 0xFF77B1FCBEBCDC4F, -1034, -292 }, - { 0xBE5691EF416BD60C, -1007, -284 }, - { 0x8DD01FAD907FFC3C, -980, -276 }, - { 0xD3515C2831559A83, -954, -268 }, - { 0x9D71AC8FADA6C9B5, -927, -260 }, - { 0xEA9C227723EE8BCB, -901, -252 }, - { 0xAECC49914078536D, -874, -244 }, - { 0x823C12795DB6CE57, -847, -236 }, - { 0xC21094364DFB5637, -821, -228 }, - { 0x9096EA6F3848984F, -794, -220 }, - { 0xD77485CB25823AC7, -768, -212 }, - { 0xA086CFCD97BF97F4, -741, -204 }, - { 0xEF340A98172AACE5, -715, -196 }, - { 0xB23867FB2A35B28E, -688, -188 }, - { 0x84C8D4DFD2C63F3B, -661, -180 }, - { 0xC5DD44271AD3CDBA, -635, -172 }, - { 0x936B9FCEBB25C996, -608, -164 }, - { 0xDBAC6C247D62A584, -582, -156 }, - { 0xA3AB66580D5FDAF6, -555, -148 }, - { 0xF3E2F893DEC3F126, -529, -140 }, - { 0xB5B5ADA8AAFF80B8, -502, -132 }, - { 0x87625F056C7C4A8B, -475, -124 }, - { 0xC9BCFF6034C13053, -449, -116 }, - { 0x964E858C91BA2655, -422, -108 }, - { 0xDFF9772470297EBD, -396, -100 }, - { 0xA6DFBD9FB8E5B88F, -369, -92 }, - { 0xF8A95FCF88747D94, -343, -84 }, - { 0xB94470938FA89BCF, -316, -76 }, - { 0x8A08F0F8BF0F156B, -289, -68 }, - { 0xCDB02555653131B6, -263, -60 }, - { 0x993FE2C6D07B7FAC, -236, -52 }, - { 0xE45C10C42A2B3B06, -210, -44 }, - { 0xAA242499697392D3, -183, -36 }, - { 0xFD87B5F28300CA0E, -157, -28 }, - { 0xBCE5086492111AEB, -130, -20 }, - { 0x8CBCCC096F5088CC, -103, -12 }, - { 0xD1B71758E219652C, -77, -4 }, - { 0x9C40000000000000, -50, 4 }, - { 0xE8D4A51000000000, -24, 12 }, - { 0xAD78EBC5AC620000, 3, 20 }, - { 0x813F3978F8940984, 30, 28 }, - { 0xC097CE7BC90715B3, 56, 36 }, - { 0x8F7E32CE7BEA5C70, 83, 44 }, - { 0xD5D238A4ABE98068, 109, 52 }, - { 0x9F4F2726179A2245, 136, 60 }, - { 0xED63A231D4C4FB27, 162, 68 }, - { 0xB0DE65388CC8ADA8, 189, 76 }, - { 0x83C7088E1AAB65DB, 216, 84 }, - { 0xC45D1DF942711D9A, 242, 92 }, - { 0x924D692CA61BE758, 269, 100 }, - { 0xDA01EE641A708DEA, 295, 108 }, - { 0xA26DA3999AEF774A, 322, 116 }, - { 0xF209787BB47D6B85, 348, 124 }, - { 0xB454E4A179DD1877, 375, 132 }, - { 0x865B86925B9BC5C2, 402, 140 }, - { 0xC83553C5C8965D3D, 428, 148 }, - { 0x952AB45CFA97A0B3, 455, 156 }, - { 0xDE469FBD99A05FE3, 481, 164 }, - { 0xA59BC234DB398C25, 508, 172 }, - { 0xF6C69A72A3989F5C, 534, 180 }, - { 0xB7DCBF5354E9BECE, 561, 188 }, - { 0x88FCF317F22241E2, 588, 196 }, - { 0xCC20CE9BD35C78A5, 614, 204 }, - { 0x98165AF37B2153DF, 641, 212 }, - { 0xE2A0B5DC971F303A, 667, 220 }, - { 0xA8D9D1535CE3B396, 694, 228 }, - { 0xFB9B7CD9A4A7443C, 720, 236 }, - { 0xBB764C4CA7A44410, 747, 244 }, - { 0x8BAB8EEFB6409C1A, 774, 252 }, - { 0xD01FEF10A657842C, 800, 260 }, - { 0x9B10A4E5E9913129, 827, 268 }, - { 0xE7109BFBA19C0C9D, 853, 276 }, - { 0xAC2820D9623BF429, 880, 284 }, - { 0x80444B5E7AA7CF85, 907, 292 }, - { 0xBF21E44003ACDD2D, 933, 300 }, - { 0x8E679C2F5E44FF8F, 960, 308 }, - { 0xD433179D9C8CB841, 986, 316 }, - { 0x9E19DB92B4E31BA9, 1013, 324 }, - }; - - // This computation gives exactly the same results for k as - // k = ceil((kAlpha - e - 1) * 0.30102999566398114) - // for |e| <= 1500, but doesn't require floating-point operations. - // NB: log_10(2) ~= 78913 / 2^18 - assert(e >= -1500); - assert(e <= 1500); - const int f = kAlpha - e - 1; - const int k = (f * 78913) / (1 << 18) + static_cast(f > 0); - - const int index = (-kCachedPowersMinDecExp + k + (kCachedPowersDecStep - 1)) / kCachedPowersDecStep; - assert(index >= 0); - assert(index < kCachedPowersSize); - static_cast(kCachedPowersSize); // Fix warning. - - const cached_power cached = kCachedPowers[index]; - assert(kAlpha <= cached.e + e + 64); - assert(kGamma >= cached.e + e + 64); - - return cached; -} - -/*! -For n != 0, returns k, such that pow10 := 10^(k-1) <= n < 10^k. -For n == 0, returns 1 and sets pow10 := 1. -*/ -inline int find_largest_pow10(const uint32_t n, uint32_t& pow10) -{ - // LCOV_EXCL_START - if (n >= 1000000000) - { - pow10 = 1000000000; - return 10; - } - // LCOV_EXCL_STOP - else if (n >= 100000000) - { - pow10 = 100000000; - return 9; - } - else if (n >= 10000000) - { - pow10 = 10000000; - return 8; - } - else if (n >= 1000000) - { - pow10 = 1000000; - return 7; - } - else if (n >= 100000) - { - pow10 = 100000; - return 6; - } - else if (n >= 10000) - { - pow10 = 10000; - return 5; - } - else if (n >= 1000) - { - pow10 = 1000; - return 4; - } - else if (n >= 100) - { - pow10 = 100; - return 3; - } - else if (n >= 10) - { - pow10 = 10; - return 2; - } - else - { - pow10 = 1; - return 1; - } -} - -inline void grisu2_round(char* buf, int len, uint64_t dist, uint64_t delta, - uint64_t rest, uint64_t ten_k) -{ - assert(len >= 1); - assert(dist <= delta); - assert(rest <= delta); - assert(ten_k > 0); - - // <--------------------------- delta ----> - // <---- dist ---------> - // --------------[------------------+-------------------]-------------- - // M- w M+ - // - // ten_k - // <------> - // <---- rest ----> - // --------------[------------------+----+--------------]-------------- - // w V - // = buf * 10^k - // - // ten_k represents a unit-in-the-last-place in the decimal representation - // stored in buf. - // Decrement buf by ten_k while this takes buf closer to w. - - // The tests are written in this order to avoid overflow in unsigned - // integer arithmetic. - - while (rest < dist - and delta - rest >= ten_k - and (rest + ten_k < dist or dist - rest > rest + ten_k - dist)) - { - assert(buf[len - 1] != '0'); - buf[len - 1]--; - rest += ten_k; - } -} - -/*! -Generates V = buffer * 10^decimal_exponent, such that M- <= V <= M+. -M- and M+ must be normalized and share the same exponent -60 <= e <= -32. -*/ -inline void grisu2_digit_gen(char* buffer, int& length, int& decimal_exponent, - diyfp M_minus, diyfp w, diyfp M_plus) -{ - static_assert(kAlpha >= -60, "internal error"); - static_assert(kGamma <= -32, "internal error"); - - // Generates the digits (and the exponent) of a decimal floating-point - // number V = buffer * 10^decimal_exponent in the range [M-, M+]. The diyfp's - // w, M- and M+ share the same exponent e, which satisfies alpha <= e <= gamma. - // - // <--------------------------- delta ----> - // <---- dist ---------> - // --------------[------------------+-------------------]-------------- - // M- w M+ - // - // Grisu2 generates the digits of M+ from left to right and stops as soon as - // V is in [M-,M+]. - - assert(M_plus.e >= kAlpha); - assert(M_plus.e <= kGamma); - - uint64_t delta = diyfp::sub(M_plus, M_minus).f; // (significand of (M+ - M-), implicit exponent is e) - uint64_t dist = diyfp::sub(M_plus, w ).f; // (significand of (M+ - w ), implicit exponent is e) - - // Split M+ = f * 2^e into two parts p1 and p2 (note: e < 0): - // - // M+ = f * 2^e - // = ((f div 2^-e) * 2^-e + (f mod 2^-e)) * 2^e - // = ((p1 ) * 2^-e + (p2 )) * 2^e - // = p1 + p2 * 2^e - - const diyfp one(uint64_t{1} << -M_plus.e, M_plus.e); - - auto p1 = static_cast(M_plus.f >> -one.e); // p1 = f div 2^-e (Since -e >= 32, p1 fits into a 32-bit int.) - uint64_t p2 = M_plus.f & (one.f - 1); // p2 = f mod 2^-e - - // 1) - // - // Generate the digits of the integral part p1 = d[n-1]...d[1]d[0] - - assert(p1 > 0); - - uint32_t pow10; - const int k = find_largest_pow10(p1, pow10); - - // 10^(k-1) <= p1 < 10^k, pow10 = 10^(k-1) - // - // p1 = (p1 div 10^(k-1)) * 10^(k-1) + (p1 mod 10^(k-1)) - // = (d[k-1] ) * 10^(k-1) + (p1 mod 10^(k-1)) - // - // M+ = p1 + p2 * 2^e - // = d[k-1] * 10^(k-1) + (p1 mod 10^(k-1)) + p2 * 2^e - // = d[k-1] * 10^(k-1) + ((p1 mod 10^(k-1)) * 2^-e + p2) * 2^e - // = d[k-1] * 10^(k-1) + ( rest) * 2^e - // - // Now generate the digits d[n] of p1 from left to right (n = k-1,...,0) - // - // p1 = d[k-1]...d[n] * 10^n + d[n-1]...d[0] - // - // but stop as soon as - // - // rest * 2^e = (d[n-1]...d[0] * 2^-e + p2) * 2^e <= delta * 2^e - - int n = k; - while (n > 0) - { - // Invariants: - // M+ = buffer * 10^n + (p1 + p2 * 2^e) (buffer = 0 for n = k) - // pow10 = 10^(n-1) <= p1 < 10^n - // - const uint32_t d = p1 / pow10; // d = p1 div 10^(n-1) - const uint32_t r = p1 % pow10; // r = p1 mod 10^(n-1) - // - // M+ = buffer * 10^n + (d * 10^(n-1) + r) + p2 * 2^e - // = (buffer * 10 + d) * 10^(n-1) + (r + p2 * 2^e) - // - assert(d <= 9); - buffer[length++] = static_cast('0' + d); // buffer := buffer * 10 + d - // - // M+ = buffer * 10^(n-1) + (r + p2 * 2^e) - // - p1 = r; - n--; - // - // M+ = buffer * 10^n + (p1 + p2 * 2^e) - // pow10 = 10^n - // - - // Now check if enough digits have been generated. - // Compute - // - // p1 + p2 * 2^e = (p1 * 2^-e + p2) * 2^e = rest * 2^e - // - // Note: - // Since rest and delta share the same exponent e, it suffices to - // compare the significands. - const uint64_t rest = (uint64_t{p1} << -one.e) + p2; - if (rest <= delta) - { - // V = buffer * 10^n, with M- <= V <= M+. - - decimal_exponent += n; - - // We may now just stop. But instead look if the buffer could be - // decremented to bring V closer to w. - // - // pow10 = 10^n is now 1 ulp in the decimal representation V. - // The rounding procedure works with diyfp's with an implicit - // exponent of e. - // - // 10^n = (10^n * 2^-e) * 2^e = ulp * 2^e - // - const uint64_t ten_n = uint64_t{pow10} << -one.e; - grisu2_round(buffer, length, dist, delta, rest, ten_n); - - return; - } - - pow10 /= 10; - // - // pow10 = 10^(n-1) <= p1 < 10^n - // Invariants restored. - } - - // 2) - // - // The digits of the integral part have been generated: - // - // M+ = d[k-1]...d[1]d[0] + p2 * 2^e - // = buffer + p2 * 2^e - // - // Now generate the digits of the fractional part p2 * 2^e. - // - // Note: - // No decimal point is generated: the exponent is adjusted instead. - // - // p2 actually represents the fraction - // - // p2 * 2^e - // = p2 / 2^-e - // = d[-1] / 10^1 + d[-2] / 10^2 + ... - // - // Now generate the digits d[-m] of p1 from left to right (m = 1,2,...) - // - // p2 * 2^e = d[-1]d[-2]...d[-m] * 10^-m - // + 10^-m * (d[-m-1] / 10^1 + d[-m-2] / 10^2 + ...) - // - // using - // - // 10^m * p2 = ((10^m * p2) div 2^-e) * 2^-e + ((10^m * p2) mod 2^-e) - // = ( d) * 2^-e + ( r) - // - // or - // 10^m * p2 * 2^e = d + r * 2^e - // - // i.e. - // - // M+ = buffer + p2 * 2^e - // = buffer + 10^-m * (d + r * 2^e) - // = (buffer * 10^m + d) * 10^-m + 10^-m * r * 2^e - // - // and stop as soon as 10^-m * r * 2^e <= delta * 2^e - - assert(p2 > delta); - - int m = 0; - for (;;) - { - // Invariant: - // M+ = buffer * 10^-m + 10^-m * (d[-m-1] / 10 + d[-m-2] / 10^2 + ...) * 2^e - // = buffer * 10^-m + 10^-m * (p2 ) * 2^e - // = buffer * 10^-m + 10^-m * (1/10 * (10 * p2) ) * 2^e - // = buffer * 10^-m + 10^-m * (1/10 * ((10*p2 div 2^-e) * 2^-e + (10*p2 mod 2^-e)) * 2^e - // - assert(p2 <= UINT64_MAX / 10); - p2 *= 10; - const uint64_t d = p2 >> -one.e; // d = (10 * p2) div 2^-e - const uint64_t r = p2 & (one.f - 1); // r = (10 * p2) mod 2^-e - // - // M+ = buffer * 10^-m + 10^-m * (1/10 * (d * 2^-e + r) * 2^e - // = buffer * 10^-m + 10^-m * (1/10 * (d + r * 2^e)) - // = (buffer * 10 + d) * 10^(-m-1) + 10^(-m-1) * r * 2^e - // - assert(d <= 9); - buffer[length++] = static_cast('0' + d); // buffer := buffer * 10 + d - // - // M+ = buffer * 10^(-m-1) + 10^(-m-1) * r * 2^e - // - p2 = r; - m++; - // - // M+ = buffer * 10^-m + 10^-m * p2 * 2^e - // Invariant restored. - - // Check if enough digits have been generated. - // - // 10^-m * p2 * 2^e <= delta * 2^e - // p2 * 2^e <= 10^m * delta * 2^e - // p2 <= 10^m * delta - delta *= 10; - dist *= 10; - if (p2 <= delta) - { - break; - } - } - - // V = buffer * 10^-m, with M- <= V <= M+. - - decimal_exponent -= m; - - // 1 ulp in the decimal representation is now 10^-m. - // Since delta and dist are now scaled by 10^m, we need to do the - // same with ulp in order to keep the units in sync. - // - // 10^m * 10^-m = 1 = 2^-e * 2^e = ten_m * 2^e - // - const uint64_t ten_m = one.f; - grisu2_round(buffer, length, dist, delta, p2, ten_m); - - // By construction this algorithm generates the shortest possible decimal - // number (Loitsch, Theorem 6.2) which rounds back to w. - // For an input number of precision p, at least - // - // N = 1 + ceil(p * log_10(2)) - // - // decimal digits are sufficient to identify all binary floating-point - // numbers (Matula, "In-and-Out conversions"). - // This implies that the algorithm does not produce more than N decimal - // digits. - // - // N = 17 for p = 53 (IEEE double precision) - // N = 9 for p = 24 (IEEE single precision) -} - -/*! -v = buf * 10^decimal_exponent -len is the length of the buffer (number of decimal digits) -The buffer must be large enough, i.e. >= max_digits10. -*/ -inline void grisu2(char* buf, int& len, int& decimal_exponent, - diyfp m_minus, diyfp v, diyfp m_plus) -{ - assert(m_plus.e == m_minus.e); - assert(m_plus.e == v.e); - - // --------(-----------------------+-----------------------)-------- (A) - // m- v m+ - // - // --------------------(-----------+-----------------------)-------- (B) - // m- v m+ - // - // First scale v (and m- and m+) such that the exponent is in the range - // [alpha, gamma]. - - const cached_power cached = get_cached_power_for_binary_exponent(m_plus.e); - - const diyfp c_minus_k(cached.f, cached.e); // = c ~= 10^-k - - // The exponent of the products is = v.e + c_minus_k.e + q and is in the range [alpha,gamma] - const diyfp w = diyfp::mul(v, c_minus_k); - const diyfp w_minus = diyfp::mul(m_minus, c_minus_k); - const diyfp w_plus = diyfp::mul(m_plus, c_minus_k); - - // ----(---+---)---------------(---+---)---------------(---+---)---- - // w- w w+ - // = c*m- = c*v = c*m+ - // - // diyfp::mul rounds its result and c_minus_k is approximated too. w, w- and - // w+ are now off by a small amount. - // In fact: - // - // w - v * 10^k < 1 ulp - // - // To account for this inaccuracy, add resp. subtract 1 ulp. - // - // --------+---[---------------(---+---)---------------]---+-------- - // w- M- w M+ w+ - // - // Now any number in [M-, M+] (bounds included) will round to w when input, - // regardless of how the input rounding algorithm breaks ties. - // - // And digit_gen generates the shortest possible such number in [M-, M+]. - // Note that this does not mean that Grisu2 always generates the shortest - // possible number in the interval (m-, m+). - const diyfp M_minus(w_minus.f + 1, w_minus.e); - const diyfp M_plus (w_plus.f - 1, w_plus.e ); - - decimal_exponent = -cached.k; // = -(-k) = k - - grisu2_digit_gen(buf, len, decimal_exponent, M_minus, w, M_plus); -} - -/*! -v = buf * 10^decimal_exponent -len is the length of the buffer (number of decimal digits) -The buffer must be large enough, i.e. >= max_digits10. -*/ -template -void grisu2(char* buf, int& len, int& decimal_exponent, FloatType value) -{ - static_assert(diyfp::kPrecision >= std::numeric_limits::digits + 3, - "internal error: not enough precision"); - - assert(std::isfinite(value)); - assert(value > 0); - - // If the neighbors (and boundaries) of 'value' are always computed for double-precision - // numbers, all float's can be recovered using strtod (and strtof). However, the resulting - // decimal representations are not exactly "short". - // - // The documentation for 'std::to_chars' (https://en.cppreference.com/w/cpp/utility/to_chars) - // says "value is converted to a string as if by std::sprintf in the default ("C") locale" - // and since sprintf promotes float's to double's, I think this is exactly what 'std::to_chars' - // does. - // On the other hand, the documentation for 'std::to_chars' requires that "parsing the - // representation using the corresponding std::from_chars function recovers value exactly". That - // indicates that single precision floating-point numbers should be recovered using - // 'std::strtof'. - // - // NB: If the neighbors are computed for single-precision numbers, there is a single float - // (7.0385307e-26f) which can't be recovered using strtod. The resulting double precision - // value is off by 1 ulp. -#if 0 - const boundaries w = compute_boundaries(static_cast(value)); -#else - const boundaries w = compute_boundaries(value); -#endif - - grisu2(buf, len, decimal_exponent, w.minus, w.w, w.plus); -} - -/*! -@brief appends a decimal representation of e to buf -@return a pointer to the element following the exponent. -@pre -1000 < e < 1000 -*/ -inline char* append_exponent(char* buf, int e) -{ - assert(e > -1000); - assert(e < 1000); - - if (e < 0) - { - e = -e; - *buf++ = '-'; - } - else - { - *buf++ = '+'; - } - - auto k = static_cast(e); - if (k < 10) - { - // Always print at least two digits in the exponent. - // This is for compatibility with printf("%g"). - *buf++ = '0'; - *buf++ = static_cast('0' + k); - } - else if (k < 100) - { - *buf++ = static_cast('0' + k / 10); - k %= 10; - *buf++ = static_cast('0' + k); - } - else - { - *buf++ = static_cast('0' + k / 100); - k %= 100; - *buf++ = static_cast('0' + k / 10); - k %= 10; - *buf++ = static_cast('0' + k); - } - - return buf; -} - -/*! -@brief prettify v = buf * 10^decimal_exponent - -If v is in the range [10^min_exp, 10^max_exp) it will be printed in fixed-point -notation. Otherwise it will be printed in exponential notation. - -@pre min_exp < 0 -@pre max_exp > 0 -*/ -inline char* format_buffer(char* buf, int len, int decimal_exponent, - int min_exp, int max_exp) -{ - assert(min_exp < 0); - assert(max_exp > 0); - - const int k = len; - const int n = len + decimal_exponent; - - // v = buf * 10^(n-k) - // k is the length of the buffer (number of decimal digits) - // n is the position of the decimal point relative to the start of the buffer. - - if (k <= n and n <= max_exp) - { - // digits[000] - // len <= max_exp + 2 - - std::memset(buf + k, '0', static_cast(n - k)); - // Make it look like a floating-point number (#362, #378) - buf[n + 0] = '.'; - buf[n + 1] = '0'; - return buf + (n + 2); - } - - if (0 < n and n <= max_exp) - { - // dig.its - // len <= max_digits10 + 1 - - assert(k > n); - - std::memmove(buf + (n + 1), buf + n, static_cast(k - n)); - buf[n] = '.'; - return buf + (k + 1); - } - - if (min_exp < n and n <= 0) - { - // 0.[000]digits - // len <= 2 + (-min_exp - 1) + max_digits10 - - std::memmove(buf + (2 + -n), buf, static_cast(k)); - buf[0] = '0'; - buf[1] = '.'; - std::memset(buf + 2, '0', static_cast(-n)); - return buf + (2 + (-n) + k); - } - - if (k == 1) - { - // dE+123 - // len <= 1 + 5 - - buf += 1; - } - else - { - // d.igitsE+123 - // len <= max_digits10 + 1 + 5 - - std::memmove(buf + 2, buf + 1, static_cast(k - 1)); - buf[1] = '.'; - buf += 1 + k; - } - - *buf++ = 'e'; - return append_exponent(buf, n - 1); -} - -} // namespace dtoa_impl - -/*! -@brief generates a decimal representation of the floating-point number value in [first, last). - -The format of the resulting decimal representation is similar to printf's %g -format. Returns an iterator pointing past-the-end of the decimal representation. - -@note The input number must be finite, i.e. NaN's and Inf's are not supported. -@note The buffer must be large enough. -@note The result is NOT null-terminated. -*/ -template -char* to_chars(char* first, const char* last, FloatType value) -{ - static_cast(last); // maybe unused - fix warning - assert(std::isfinite(value)); - - // Use signbit(value) instead of (value < 0) since signbit works for -0. - if (std::signbit(value)) - { - value = -value; - *first++ = '-'; - } - - if (value == 0) // +-0 - { - *first++ = '0'; - // Make it look like a floating-point number (#362, #378) - *first++ = '.'; - *first++ = '0'; - return first; - } - - assert(last - first >= std::numeric_limits::max_digits10); - - // Compute v = buffer * 10^decimal_exponent. - // The decimal digits are stored in the buffer, which needs to be interpreted - // as an unsigned decimal integer. - // len is the length of the buffer, i.e. the number of decimal digits. - int len = 0; - int decimal_exponent = 0; - dtoa_impl::grisu2(first, len, decimal_exponent, value); - - assert(len <= std::numeric_limits::max_digits10); - - // Format the buffer like printf("%.*g", prec, value) - constexpr int kMinExp = -4; - // Use digits10 here to increase compatibility with version 2. - constexpr int kMaxExp = std::numeric_limits::digits10; - - assert(last - first >= kMaxExp + 2); - assert(last - first >= 2 + (-kMinExp - 1) + std::numeric_limits::max_digits10); - assert(last - first >= std::numeric_limits::max_digits10 + 6); - - return dtoa_impl::format_buffer(first, len, decimal_exponent, kMinExp, kMaxExp); -} - -} // namespace detail -} // namespace nlohmann - -// #include - -// #include - -// #include - -// #include - -// #include - - -namespace nlohmann -{ -namespace detail -{ -/////////////////// -// serialization // -/////////////////// - -/// how to treat decoding errors -enum class error_handler_t -{ - strict, ///< throw a type_error exception in case of invalid UTF-8 - replace, ///< replace invalid UTF-8 sequences with U+FFFD - ignore ///< ignore invalid UTF-8 sequences -}; - -template -class serializer -{ - using string_t = typename BasicJsonType::string_t; - using number_float_t = typename BasicJsonType::number_float_t; - using number_integer_t = typename BasicJsonType::number_integer_t; - using number_unsigned_t = typename BasicJsonType::number_unsigned_t; - static constexpr uint8_t UTF8_ACCEPT = 0; - static constexpr uint8_t UTF8_REJECT = 1; - - public: - /*! - @param[in] s output stream to serialize to - @param[in] ichar indentation character to use - @param[in] error_handler_ how to react on decoding errors - */ - serializer(output_adapter_t s, const char ichar, - error_handler_t error_handler_ = error_handler_t::strict) - : o(std::move(s)) - , loc(std::localeconv()) - , thousands_sep(loc->thousands_sep == nullptr ? '\0' : * (loc->thousands_sep)) - , decimal_point(loc->decimal_point == nullptr ? '\0' : * (loc->decimal_point)) - , indent_char(ichar) - , indent_string(512, indent_char) - , error_handler(error_handler_) - {} - - // delete because of pointer members - serializer(const serializer&) = delete; - serializer& operator=(const serializer&) = delete; - serializer(serializer&&) = delete; - serializer& operator=(serializer&&) = delete; - ~serializer() = default; - - /*! - @brief internal implementation of the serialization function - - This function is called by the public member function dump and organizes - the serialization internally. The indentation level is propagated as - additional parameter. In case of arrays and objects, the function is - called recursively. - - - strings and object keys are escaped using `escape_string()` - - integer numbers are converted implicitly via `operator<<` - - floating-point numbers are converted to a string using `"%g"` format - - @param[in] val value to serialize - @param[in] pretty_print whether the output shall be pretty-printed - @param[in] indent_step the indent level - @param[in] current_indent the current indent level (only used internally) - */ - void dump(const BasicJsonType& val, const bool pretty_print, - const bool ensure_ascii, - const unsigned int indent_step, - const unsigned int current_indent = 0) - { - switch (val.m_type) - { - case value_t::object: - { - if (val.m_value.object->empty()) - { - o->write_characters("{}", 2); - return; - } - - if (pretty_print) - { - o->write_characters("{\n", 2); - - // variable to hold indentation for recursive calls - const auto new_indent = current_indent + indent_step; - if (JSON_UNLIKELY(indent_string.size() < new_indent)) - { - indent_string.resize(indent_string.size() * 2, ' '); - } - - // first n-1 elements - auto i = val.m_value.object->cbegin(); - for (std::size_t cnt = 0; cnt < val.m_value.object->size() - 1; ++cnt, ++i) - { - o->write_characters(indent_string.c_str(), new_indent); - o->write_character('\"'); - dump_escaped(i->first, ensure_ascii); - o->write_characters("\": ", 3); - dump(i->second, true, ensure_ascii, indent_step, new_indent); - o->write_characters(",\n", 2); - } - - // last element - assert(i != val.m_value.object->cend()); - assert(std::next(i) == val.m_value.object->cend()); - o->write_characters(indent_string.c_str(), new_indent); - o->write_character('\"'); - dump_escaped(i->first, ensure_ascii); - o->write_characters("\": ", 3); - dump(i->second, true, ensure_ascii, indent_step, new_indent); - - o->write_character('\n'); - o->write_characters(indent_string.c_str(), current_indent); - o->write_character('}'); - } - else - { - o->write_character('{'); - - // first n-1 elements - auto i = val.m_value.object->cbegin(); - for (std::size_t cnt = 0; cnt < val.m_value.object->size() - 1; ++cnt, ++i) - { - o->write_character('\"'); - dump_escaped(i->first, ensure_ascii); - o->write_characters("\":", 2); - dump(i->second, false, ensure_ascii, indent_step, current_indent); - o->write_character(','); - } - - // last element - assert(i != val.m_value.object->cend()); - assert(std::next(i) == val.m_value.object->cend()); - o->write_character('\"'); - dump_escaped(i->first, ensure_ascii); - o->write_characters("\":", 2); - dump(i->second, false, ensure_ascii, indent_step, current_indent); - - o->write_character('}'); - } - - return; - } - - case value_t::array: - { - if (val.m_value.array->empty()) - { - o->write_characters("[]", 2); - return; - } - - if (pretty_print) - { - o->write_characters("[\n", 2); - - // variable to hold indentation for recursive calls - const auto new_indent = current_indent + indent_step; - if (JSON_UNLIKELY(indent_string.size() < new_indent)) - { - indent_string.resize(indent_string.size() * 2, ' '); - } - - // first n-1 elements - for (auto i = val.m_value.array->cbegin(); - i != val.m_value.array->cend() - 1; ++i) - { - o->write_characters(indent_string.c_str(), new_indent); - dump(*i, true, ensure_ascii, indent_step, new_indent); - o->write_characters(",\n", 2); - } - - // last element - assert(not val.m_value.array->empty()); - o->write_characters(indent_string.c_str(), new_indent); - dump(val.m_value.array->back(), true, ensure_ascii, indent_step, new_indent); - - o->write_character('\n'); - o->write_characters(indent_string.c_str(), current_indent); - o->write_character(']'); - } - else - { - o->write_character('['); - - // first n-1 elements - for (auto i = val.m_value.array->cbegin(); - i != val.m_value.array->cend() - 1; ++i) - { - dump(*i, false, ensure_ascii, indent_step, current_indent); - o->write_character(','); - } - - // last element - assert(not val.m_value.array->empty()); - dump(val.m_value.array->back(), false, ensure_ascii, indent_step, current_indent); - - o->write_character(']'); - } - - return; - } - - case value_t::string: - { - o->write_character('\"'); - dump_escaped(*val.m_value.string, ensure_ascii); - o->write_character('\"'); - return; - } - - case value_t::boolean: - { - if (val.m_value.boolean) - { - o->write_characters("true", 4); - } - else - { - o->write_characters("false", 5); - } - return; - } - - case value_t::number_integer: - { - dump_integer(val.m_value.number_integer); - return; - } - - case value_t::number_unsigned: - { - dump_integer(val.m_value.number_unsigned); - return; - } - - case value_t::number_float: - { - dump_float(val.m_value.number_float); - return; - } - - case value_t::discarded: - { - o->write_characters("", 11); - return; - } - - case value_t::null: - { - o->write_characters("null", 4); - return; - } - } - } - - private: - /*! - @brief dump escaped string - - Escape a string by replacing certain special characters by a sequence of an - escape character (backslash) and another character and other control - characters by a sequence of "\u" followed by a four-digit hex - representation. The escaped string is written to output stream @a o. - - @param[in] s the string to escape - @param[in] ensure_ascii whether to escape non-ASCII characters with - \uXXXX sequences - - @complexity Linear in the length of string @a s. - */ - void dump_escaped(const string_t& s, const bool ensure_ascii) - { - uint32_t codepoint; - uint8_t state = UTF8_ACCEPT; - std::size_t bytes = 0; // number of bytes written to string_buffer - - // number of bytes written at the point of the last valid byte - std::size_t bytes_after_last_accept = 0; - std::size_t undumped_chars = 0; - - for (std::size_t i = 0; i < s.size(); ++i) - { - const auto byte = static_cast(s[i]); - - switch (decode(state, codepoint, byte)) - { - case UTF8_ACCEPT: // decode found a new code point - { - switch (codepoint) - { - case 0x08: // backspace - { - string_buffer[bytes++] = '\\'; - string_buffer[bytes++] = 'b'; - break; - } - - case 0x09: // horizontal tab - { - string_buffer[bytes++] = '\\'; - string_buffer[bytes++] = 't'; - break; - } - - case 0x0A: // newline - { - string_buffer[bytes++] = '\\'; - string_buffer[bytes++] = 'n'; - break; - } - - case 0x0C: // formfeed - { - string_buffer[bytes++] = '\\'; - string_buffer[bytes++] = 'f'; - break; - } - - case 0x0D: // carriage return - { - string_buffer[bytes++] = '\\'; - string_buffer[bytes++] = 'r'; - break; - } - - case 0x22: // quotation mark - { - string_buffer[bytes++] = '\\'; - string_buffer[bytes++] = '\"'; - break; - } - - case 0x5C: // reverse solidus - { - string_buffer[bytes++] = '\\'; - string_buffer[bytes++] = '\\'; - break; - } - - default: - { - // escape control characters (0x00..0x1F) or, if - // ensure_ascii parameter is used, non-ASCII characters - if ((codepoint <= 0x1F) or (ensure_ascii and (codepoint >= 0x7F))) - { - if (codepoint <= 0xFFFF) - { - (std::snprintf)(string_buffer.data() + bytes, 7, "\\u%04x", - static_cast(codepoint)); - bytes += 6; - } - else - { - (std::snprintf)(string_buffer.data() + bytes, 13, "\\u%04x\\u%04x", - static_cast(0xD7C0 + (codepoint >> 10)), - static_cast(0xDC00 + (codepoint & 0x3FF))); - bytes += 12; - } - } - else - { - // copy byte to buffer (all previous bytes - // been copied have in default case above) - string_buffer[bytes++] = s[i]; - } - break; - } - } - - // write buffer and reset index; there must be 13 bytes - // left, as this is the maximal number of bytes to be - // written ("\uxxxx\uxxxx\0") for one code point - if (string_buffer.size() - bytes < 13) - { - o->write_characters(string_buffer.data(), bytes); - bytes = 0; - } - - // remember the byte position of this accept - bytes_after_last_accept = bytes; - undumped_chars = 0; - break; - } - - case UTF8_REJECT: // decode found invalid UTF-8 byte - { - switch (error_handler) - { - case error_handler_t::strict: - { - std::string sn(3, '\0'); - (std::snprintf)(&sn[0], sn.size(), "%.2X", byte); - JSON_THROW(type_error::create(316, "invalid UTF-8 byte at index " + std::to_string(i) + ": 0x" + sn)); - } - - case error_handler_t::ignore: - case error_handler_t::replace: - { - // in case we saw this character the first time, we - // would like to read it again, because the byte - // may be OK for itself, but just not OK for the - // previous sequence - if (undumped_chars > 0) - { - --i; - } - - // reset length buffer to the last accepted index; - // thus removing/ignoring the invalid characters - bytes = bytes_after_last_accept; - - if (error_handler == error_handler_t::replace) - { - // add a replacement character - if (ensure_ascii) - { - string_buffer[bytes++] = '\\'; - string_buffer[bytes++] = 'u'; - string_buffer[bytes++] = 'f'; - string_buffer[bytes++] = 'f'; - string_buffer[bytes++] = 'f'; - string_buffer[bytes++] = 'd'; - } - else - { - string_buffer[bytes++] = detail::binary_writer::to_char_type('\xEF'); - string_buffer[bytes++] = detail::binary_writer::to_char_type('\xBF'); - string_buffer[bytes++] = detail::binary_writer::to_char_type('\xBD'); - } - bytes_after_last_accept = bytes; - } - - undumped_chars = 0; - - // continue processing the string - state = UTF8_ACCEPT; - break; - } - } - break; - } - - default: // decode found yet incomplete multi-byte code point - { - if (not ensure_ascii) - { - // code point will not be escaped - copy byte to buffer - string_buffer[bytes++] = s[i]; - } - ++undumped_chars; - break; - } - } - } - - // we finished processing the string - if (JSON_LIKELY(state == UTF8_ACCEPT)) - { - // write buffer - if (bytes > 0) - { - o->write_characters(string_buffer.data(), bytes); - } - } - else - { - // we finish reading, but do not accept: string was incomplete - switch (error_handler) - { - case error_handler_t::strict: - { - std::string sn(3, '\0'); - (std::snprintf)(&sn[0], sn.size(), "%.2X", static_cast(s.back())); - JSON_THROW(type_error::create(316, "incomplete UTF-8 string; last byte: 0x" + sn)); - } - - case error_handler_t::ignore: - { - // write all accepted bytes - o->write_characters(string_buffer.data(), bytes_after_last_accept); - break; - } - - case error_handler_t::replace: - { - // write all accepted bytes - o->write_characters(string_buffer.data(), bytes_after_last_accept); - // add a replacement character - if (ensure_ascii) - { - o->write_characters("\\ufffd", 6); - } - else - { - o->write_characters("\xEF\xBF\xBD", 3); - } - break; - } - } - } - } - - /*! - @brief dump an integer - - Dump a given integer to output stream @a o. Works internally with - @a number_buffer. - - @param[in] x integer number (signed or unsigned) to dump - @tparam NumberType either @a number_integer_t or @a number_unsigned_t - */ - template::value or - std::is_same::value, - int> = 0> - void dump_integer(NumberType x) - { - // special case for "0" - if (x == 0) - { - o->write_character('0'); - return; - } - - const bool is_negative = std::is_same::value and not (x >= 0); // see issue #755 - std::size_t i = 0; - - while (x != 0) - { - // spare 1 byte for '\0' - assert(i < number_buffer.size() - 1); - - const auto digit = std::labs(static_cast(x % 10)); - number_buffer[i++] = static_cast('0' + digit); - x /= 10; - } - - if (is_negative) - { - // make sure there is capacity for the '-' - assert(i < number_buffer.size() - 2); - number_buffer[i++] = '-'; - } - - std::reverse(number_buffer.begin(), number_buffer.begin() + i); - o->write_characters(number_buffer.data(), i); - } - - /*! - @brief dump a floating-point number - - Dump a given floating-point number to output stream @a o. Works internally - with @a number_buffer. - - @param[in] x floating-point number to dump - */ - void dump_float(number_float_t x) - { - // NaN / inf - if (not std::isfinite(x)) - { - o->write_characters("null", 4); - return; - } - - // If number_float_t is an IEEE-754 single or double precision number, - // use the Grisu2 algorithm to produce short numbers which are - // guaranteed to round-trip, using strtof and strtod, resp. - // - // NB: The test below works if == . - static constexpr bool is_ieee_single_or_double - = (std::numeric_limits::is_iec559 and std::numeric_limits::digits == 24 and std::numeric_limits::max_exponent == 128) or - (std::numeric_limits::is_iec559 and std::numeric_limits::digits == 53 and std::numeric_limits::max_exponent == 1024); - - dump_float(x, std::integral_constant()); - } - - void dump_float(number_float_t x, std::true_type /*is_ieee_single_or_double*/) - { - char* begin = number_buffer.data(); - char* end = ::nlohmann::detail::to_chars(begin, begin + number_buffer.size(), x); - - o->write_characters(begin, static_cast(end - begin)); - } - - void dump_float(number_float_t x, std::false_type /*is_ieee_single_or_double*/) - { - // get number of digits for a float -> text -> float round-trip - static constexpr auto d = std::numeric_limits::max_digits10; - - // the actual conversion - std::ptrdiff_t len = (std::snprintf)(number_buffer.data(), number_buffer.size(), "%.*g", d, x); - - // negative value indicates an error - assert(len > 0); - // check if buffer was large enough - assert(static_cast(len) < number_buffer.size()); - - // erase thousands separator - if (thousands_sep != '\0') - { - const auto end = std::remove(number_buffer.begin(), - number_buffer.begin() + len, thousands_sep); - std::fill(end, number_buffer.end(), '\0'); - assert((end - number_buffer.begin()) <= len); - len = (end - number_buffer.begin()); - } - - // convert decimal point to '.' - if (decimal_point != '\0' and decimal_point != '.') - { - const auto dec_pos = std::find(number_buffer.begin(), number_buffer.end(), decimal_point); - if (dec_pos != number_buffer.end()) - { - *dec_pos = '.'; - } - } - - o->write_characters(number_buffer.data(), static_cast(len)); - - // determine if need to append ".0" - const bool value_is_int_like = - std::none_of(number_buffer.begin(), number_buffer.begin() + len + 1, - [](char c) - { - return (c == '.' or c == 'e'); - }); - - if (value_is_int_like) - { - o->write_characters(".0", 2); - } - } - - /*! - @brief check whether a string is UTF-8 encoded - - The function checks each byte of a string whether it is UTF-8 encoded. The - result of the check is stored in the @a state parameter. The function must - be called initially with state 0 (accept). State 1 means the string must - be rejected, because the current byte is not allowed. If the string is - completely processed, but the state is non-zero, the string ended - prematurely; that is, the last byte indicated more bytes should have - followed. - - @param[in,out] state the state of the decoding - @param[in,out] codep codepoint (valid only if resulting state is UTF8_ACCEPT) - @param[in] byte next byte to decode - @return new state - - @note The function has been edited: a std::array is used. - - @copyright Copyright (c) 2008-2009 Bjoern Hoehrmann - @sa http://bjoern.hoehrmann.de/utf-8/decoder/dfa/ - */ - static uint8_t decode(uint8_t& state, uint32_t& codep, const uint8_t byte) noexcept - { - static const std::array utf8d = - { - { - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 00..1F - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 20..3F - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 40..5F - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 60..7F - 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, // 80..9F - 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, // A0..BF - 8, 8, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, // C0..DF - 0xA, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x4, 0x3, 0x3, // E0..EF - 0xB, 0x6, 0x6, 0x6, 0x5, 0x8, 0x8, 0x8, 0x8, 0x8, 0x8, 0x8, 0x8, 0x8, 0x8, 0x8, // F0..FF - 0x0, 0x1, 0x2, 0x3, 0x5, 0x8, 0x7, 0x1, 0x1, 0x1, 0x4, 0x6, 0x1, 0x1, 0x1, 0x1, // s0..s0 - 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, // s1..s2 - 1, 2, 1, 1, 1, 1, 1, 2, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, // s3..s4 - 1, 2, 1, 1, 1, 1, 1, 1, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 3, 1, 3, 1, 1, 1, 1, 1, 1, // s5..s6 - 1, 3, 1, 1, 1, 1, 1, 3, 1, 3, 1, 1, 1, 1, 1, 1, 1, 3, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 // s7..s8 - } - }; - - const uint8_t type = utf8d[byte]; - - codep = (state != UTF8_ACCEPT) - ? (byte & 0x3fu) | (codep << 6) - : static_cast(0xff >> type) & (byte); - - state = utf8d[256u + state * 16u + type]; - return state; - } - - private: - /// the output of the serializer - output_adapter_t o = nullptr; - - /// a (hopefully) large enough character buffer - std::array number_buffer{{}}; - - /// the locale - const std::lconv* loc = nullptr; - /// the locale's thousand separator character - const char thousands_sep = '\0'; - /// the locale's decimal point character - const char decimal_point = '\0'; - - /// string buffer - std::array string_buffer{{}}; - - /// the indentation character - const char indent_char; - /// the indentation string - string_t indent_string; - - /// error_handler how to react on decoding errors - const error_handler_t error_handler; -}; -} // namespace detail -} // namespace nlohmann - -// #include - - -#include -#include - -// #include - - -namespace nlohmann -{ -namespace detail -{ -template -class json_ref -{ - public: - using value_type = BasicJsonType; - - json_ref(value_type&& value) - : owned_value(std::move(value)), value_ref(&owned_value), is_rvalue(true) - {} - - json_ref(const value_type& value) - : value_ref(const_cast(&value)), is_rvalue(false) - {} - - json_ref(std::initializer_list init) - : owned_value(init), value_ref(&owned_value), is_rvalue(true) - {} - - template < - class... Args, - enable_if_t::value, int> = 0 > - json_ref(Args && ... args) - : owned_value(std::forward(args)...), value_ref(&owned_value), - is_rvalue(true) {} - - // class should be movable only - json_ref(json_ref&&) = default; - json_ref(const json_ref&) = delete; - json_ref& operator=(const json_ref&) = delete; - json_ref& operator=(json_ref&&) = delete; - ~json_ref() = default; - - value_type moved_or_copied() const - { - if (is_rvalue) - { - return std::move(*value_ref); - } - return *value_ref; - } - - value_type const& operator*() const - { - return *static_cast(value_ref); - } - - value_type const* operator->() const - { - return static_cast(value_ref); - } - - private: - mutable value_type owned_value = nullptr; - value_type* value_ref = nullptr; - const bool is_rvalue; -}; -} // namespace detail -} // namespace nlohmann +// #include // #include +#include // all_of #include // assert #include // accumulate #include // string +#include // move #include // vector -// #include - // #include +// #include + // #include @@ -11755,8 +8466,7 @@ class json_pointer @return a string representation of the JSON pointer - @liveexample{The example shows the result of `to_string`., - json_pointer__to_string} + @liveexample{The example shows the result of `to_string`.,json_pointer__to_string} @since version 2.0.0 */ @@ -11776,6 +8486,249 @@ class json_pointer return to_string(); } + /*! + @brief append another JSON pointer at the end of this JSON pointer + + @param[in] ptr JSON pointer to append + @return JSON pointer with @a ptr appended + + @liveexample{The example shows the usage of `operator/=`.,json_pointer__operator_add} + + @complexity Linear in the length of @a ptr. + + @sa @ref operator/=(std::string) to append a reference token + @sa @ref operator/=(std::size_t) to append an array index + @sa @ref operator/(const json_pointer&, const json_pointer&) for a binary operator + + @since version 3.6.0 + */ + json_pointer& operator/=(const json_pointer& ptr) + { + reference_tokens.insert(reference_tokens.end(), + ptr.reference_tokens.begin(), + ptr.reference_tokens.end()); + return *this; + } + + /*! + @brief append an unescaped reference token at the end of this JSON pointer + + @param[in] token reference token to append + @return JSON pointer with @a token appended without escaping @a token + + @liveexample{The example shows the usage of `operator/=`.,json_pointer__operator_add} + + @complexity Amortized constant. + + @sa @ref operator/=(const json_pointer&) to append a JSON pointer + @sa @ref operator/=(std::size_t) to append an array index + @sa @ref operator/(const json_pointer&, std::size_t) for a binary operator + + @since version 3.6.0 + */ + json_pointer& operator/=(std::string token) + { + push_back(std::move(token)); + return *this; + } + + /*! + @brief append an array index at the end of this JSON pointer + + @param[in] array_index array index ot append + @return JSON pointer with @a array_index appended + + @liveexample{The example shows the usage of `operator/=`.,json_pointer__operator_add} + + @complexity Amortized constant. + + @sa @ref operator/=(const json_pointer&) to append a JSON pointer + @sa @ref operator/=(std::string) to append a reference token + @sa @ref operator/(const json_pointer&, std::string) for a binary operator + + @since version 3.6.0 + */ + json_pointer& operator/=(std::size_t array_index) + { + return *this /= std::to_string(array_index); + } + + /*! + @brief create a new JSON pointer by appending the right JSON pointer at the end of the left JSON pointer + + @param[in] lhs JSON pointer + @param[in] rhs JSON pointer + @return a new JSON pointer with @a rhs appended to @a lhs + + @liveexample{The example shows the usage of `operator/`.,json_pointer__operator_add_binary} + + @complexity Linear in the length of @a lhs and @a rhs. + + @sa @ref operator/=(const json_pointer&) to append a JSON pointer + + @since version 3.6.0 + */ + friend json_pointer operator/(const json_pointer& lhs, + const json_pointer& rhs) + { + return json_pointer(lhs) /= rhs; + } + + /*! + @brief create a new JSON pointer by appending the unescaped token at the end of the JSON pointer + + @param[in] ptr JSON pointer + @param[in] token reference token + @return a new JSON pointer with unescaped @a token appended to @a ptr + + @liveexample{The example shows the usage of `operator/`.,json_pointer__operator_add_binary} + + @complexity Linear in the length of @a ptr. + + @sa @ref operator/=(std::string) to append a reference token + + @since version 3.6.0 + */ + friend json_pointer operator/(const json_pointer& ptr, std::string token) + { + return json_pointer(ptr) /= std::move(token); + } + + /*! + @brief create a new JSON pointer by appending the array-index-token at the end of the JSON pointer + + @param[in] ptr JSON pointer + @param[in] array_index array index + @return a new JSON pointer with @a array_index appended to @a ptr + + @liveexample{The example shows the usage of `operator/`.,json_pointer__operator_add_binary} + + @complexity Linear in the length of @a ptr. + + @sa @ref operator/=(std::size_t) to append an array index + + @since version 3.6.0 + */ + friend json_pointer operator/(const json_pointer& ptr, std::size_t array_index) + { + return json_pointer(ptr) /= array_index; + } + + /*! + @brief returns the parent of this JSON pointer + + @return parent of this JSON pointer; in case this JSON pointer is the root, + the root itself is returned + + @complexity Linear in the length of the JSON pointer. + + @liveexample{The example shows the result of `parent_pointer` for different + JSON Pointers.,json_pointer__parent_pointer} + + @since version 3.6.0 + */ + json_pointer parent_pointer() const + { + if (empty()) + { + return *this; + } + + json_pointer res = *this; + res.pop_back(); + return res; + } + + /*! + @brief remove last reference token + + @pre not `empty()` + + @liveexample{The example shows the usage of `pop_back`.,json_pointer__pop_back} + + @complexity Constant. + + @throw out_of_range.405 if JSON pointer has no parent + + @since version 3.6.0 + */ + void pop_back() + { + if (JSON_UNLIKELY(empty())) + { + JSON_THROW(detail::out_of_range::create(405, "JSON pointer has no parent")); + } + + reference_tokens.pop_back(); + } + + /*! + @brief return last reference token + + @pre not `empty()` + @return last reference token + + @liveexample{The example shows the usage of `back`.,json_pointer__back} + + @complexity Constant. + + @throw out_of_range.405 if JSON pointer has no parent + + @since version 3.6.0 + */ + const std::string& back() + { + if (JSON_UNLIKELY(empty())) + { + JSON_THROW(detail::out_of_range::create(405, "JSON pointer has no parent")); + } + + return reference_tokens.back(); + } + + /*! + @brief append an unescaped token at the end of the reference pointer + + @param[in] token token to add + + @complexity Amortized constant. + + @liveexample{The example shows the result of `push_back` for different + JSON Pointers.,json_pointer__push_back} + + @since version 3.6.0 + */ + void push_back(const std::string& token) + { + reference_tokens.push_back(token); + } + + /// @copydoc push_back(const std::string&) + void push_back(std::string&& token) + { + reference_tokens.push_back(std::move(token)); + } + + /*! + @brief return whether pointer points to the root document + + @return true iff the JSON pointer points to the root document + + @complexity Constant. + + @exceptionsafety No-throw guarantee: this function never throws exceptions. + + @liveexample{The example shows the result of `empty` for different JSON + Pointers.,json_pointer__empty} + + @since version 3.6.0 + */ + bool empty() const noexcept + { + return reference_tokens.empty(); + } + + private: /*! @param[in] s reference token to be converted into an array index @@ -11797,32 +8750,9 @@ class json_pointer return res; } - private: - /*! - @brief remove and return last reference pointer - @throw out_of_range.405 if JSON pointer has no parent - */ - std::string pop_back() - { - if (JSON_UNLIKELY(is_root())) - { - JSON_THROW(detail::out_of_range::create(405, "JSON pointer has no parent")); - } - - auto last = reference_tokens.back(); - reference_tokens.pop_back(); - return last; - } - - /// return whether pointer points to the root document - bool is_root() const noexcept - { - return reference_tokens.empty(); - } - json_pointer top() const { - if (JSON_UNLIKELY(is_root())) + if (JSON_UNLIKELY(empty())) { JSON_THROW(detail::out_of_range::create(405, "JSON pointer has no parent")); } @@ -11933,7 +8863,7 @@ class json_pointer std::all_of(reference_token.begin(), reference_token.end(), [](const char x) { - return (x >= '0' and x <= '9'); + return x >= '0' and x <= '9'; }); // change value to array for numbers or "-" or to object otherwise @@ -12379,12 +9309,34 @@ class json_pointer return result; } + /*! + @brief compares two JSON pointers for equality + + @param[in] lhs JSON pointer to compare + @param[in] rhs JSON pointer to compare + @return whether @a lhs is equal to @a rhs + + @complexity Linear in the length of the JSON pointer + + @exceptionsafety No-throw guarantee: this function never throws exceptions. + */ friend bool operator==(json_pointer const& lhs, json_pointer const& rhs) noexcept { - return (lhs.reference_tokens == rhs.reference_tokens); + return lhs.reference_tokens == rhs.reference_tokens; } + /*! + @brief compares two JSON pointers for inequality + + @param[in] lhs JSON pointer to compare + @param[in] rhs JSON pointer to compare + @return whether @a lhs is not equal @a rhs + + @complexity Linear in the length of the JSON pointer + + @exceptionsafety No-throw guarantee: this function never throws exceptions. + */ friend bool operator!=(json_pointer const& lhs, json_pointer const& rhs) noexcept { @@ -12396,59 +9348,3494 @@ class json_pointer }; } // namespace nlohmann -// #include +// #include +#include #include -// #include - -// #include +// #include namespace nlohmann { - -template -struct adl_serializer +namespace detail { - /*! - @brief convert a JSON value to any value type +template +class json_ref +{ + public: + using value_type = BasicJsonType; - This function is usually called by the `get()` function of the - @ref basic_json class (either explicit or via conversion operators). + json_ref(value_type&& value) + : owned_value(std::move(value)), value_ref(&owned_value), is_rvalue(true) + {} - @param[in] j JSON value to read from - @param[in,out] val value to write to - */ - template - static auto from_json(BasicJsonType&& j, ValueType& val) noexcept( - noexcept(::nlohmann::from_json(std::forward(j), val))) - -> decltype(::nlohmann::from_json(std::forward(j), val), void()) + json_ref(const value_type& value) + : value_ref(const_cast(&value)), is_rvalue(false) + {} + + json_ref(std::initializer_list init) + : owned_value(init), value_ref(&owned_value), is_rvalue(true) + {} + + template < + class... Args, + enable_if_t::value, int> = 0 > + json_ref(Args && ... args) + : owned_value(std::forward(args)...), value_ref(&owned_value), + is_rvalue(true) {} + + // class should be movable only + json_ref(json_ref&&) = default; + json_ref(const json_ref&) = delete; + json_ref& operator=(const json_ref&) = delete; + json_ref& operator=(json_ref&&) = delete; + ~json_ref() = default; + + value_type moved_or_copied() const { - ::nlohmann::from_json(std::forward(j), val); + if (is_rvalue) + { + return std::move(*value_ref); + } + return *value_ref; + } + + value_type const& operator*() const + { + return *static_cast(value_ref); + } + + value_type const* operator->() const + { + return static_cast(value_ref); + } + + private: + mutable value_type owned_value = nullptr; + value_type* value_ref = nullptr; + const bool is_rvalue; +}; +} // namespace detail +} // namespace nlohmann + +// #include + +// #include + +// #include + +// #include + + +#include // reverse +#include // array +#include // uint8_t, uint16_t, uint32_t, uint64_t +#include // memcpy +#include // numeric_limits +#include // string + +// #include + +// #include + + +#include // copy +#include // size_t +#include // streamsize +#include // back_inserter +#include // shared_ptr, make_shared +#include // basic_ostream +#include // basic_string +#include // vector + +namespace nlohmann +{ +namespace detail +{ +/// abstract output adapter interface +template struct output_adapter_protocol +{ + virtual void write_character(CharType c) = 0; + virtual void write_characters(const CharType* s, std::size_t length) = 0; + virtual ~output_adapter_protocol() = default; +}; + +/// a type to simplify interfaces +template +using output_adapter_t = std::shared_ptr>; + +/// output adapter for byte vectors +template +class output_vector_adapter : public output_adapter_protocol +{ + public: + explicit output_vector_adapter(std::vector& vec) noexcept + : v(vec) + {} + + void write_character(CharType c) override + { + v.push_back(c); + } + + void write_characters(const CharType* s, std::size_t length) override + { + std::copy(s, s + length, std::back_inserter(v)); + } + + private: + std::vector& v; +}; + +/// output adapter for output streams +template +class output_stream_adapter : public output_adapter_protocol +{ + public: + explicit output_stream_adapter(std::basic_ostream& s) noexcept + : stream(s) + {} + + void write_character(CharType c) override + { + stream.put(c); + } + + void write_characters(const CharType* s, std::size_t length) override + { + stream.write(s, static_cast(length)); + } + + private: + std::basic_ostream& stream; +}; + +/// output adapter for basic_string +template> +class output_string_adapter : public output_adapter_protocol +{ + public: + explicit output_string_adapter(StringType& s) noexcept + : str(s) + {} + + void write_character(CharType c) override + { + str.push_back(c); + } + + void write_characters(const CharType* s, std::size_t length) override + { + str.append(s, length); + } + + private: + StringType& str; +}; + +template> +class output_adapter +{ + public: + output_adapter(std::vector& vec) + : oa(std::make_shared>(vec)) {} + + output_adapter(std::basic_ostream& s) + : oa(std::make_shared>(s)) {} + + output_adapter(StringType& s) + : oa(std::make_shared>(s)) {} + + operator output_adapter_t() + { + return oa; + } + + private: + output_adapter_t oa = nullptr; +}; +} // namespace detail +} // namespace nlohmann + + +namespace nlohmann +{ +namespace detail +{ +/////////////////// +// binary writer // +/////////////////// + +/*! +@brief serialization to CBOR and MessagePack values +*/ +template +class binary_writer +{ + using string_t = typename BasicJsonType::string_t; + + public: + /*! + @brief create a binary writer + + @param[in] adapter output adapter to write to + */ + explicit binary_writer(output_adapter_t adapter) : oa(adapter) + { + assert(oa); } /*! - @brief convert any value type to a JSON value - - This function is usually called by the constructors of the @ref basic_json - class. - - @param[in,out] j JSON value to write to - @param[in] val value to read from + @param[in] j JSON value to serialize + @pre j.type() == value_t::object */ - template - static auto to_json(BasicJsonType& j, ValueType&& val) noexcept( - noexcept(::nlohmann::to_json(j, std::forward(val)))) - -> decltype(::nlohmann::to_json(j, std::forward(val)), void()) + void write_bson(const BasicJsonType& j) { - ::nlohmann::to_json(j, std::forward(val)); + switch (j.type()) + { + case value_t::object: + { + write_bson_object(*j.m_value.object); + break; + } + + default: + { + JSON_THROW(type_error::create(317, "to serialize to BSON, top-level type must be object, but is " + std::string(j.type_name()))); + } + } + } + + /*! + @param[in] j JSON value to serialize + */ + void write_cbor(const BasicJsonType& j) + { + switch (j.type()) + { + case value_t::null: + { + oa->write_character(to_char_type(0xF6)); + break; + } + + case value_t::boolean: + { + oa->write_character(j.m_value.boolean + ? to_char_type(0xF5) + : to_char_type(0xF4)); + break; + } + + case value_t::number_integer: + { + if (j.m_value.number_integer >= 0) + { + // CBOR does not differentiate between positive signed + // integers and unsigned integers. Therefore, we used the + // code from the value_t::number_unsigned case here. + if (j.m_value.number_integer <= 0x17) + { + write_number(static_cast(j.m_value.number_integer)); + } + else if (j.m_value.number_integer <= (std::numeric_limits::max)()) + { + oa->write_character(to_char_type(0x18)); + write_number(static_cast(j.m_value.number_integer)); + } + else if (j.m_value.number_integer <= (std::numeric_limits::max)()) + { + oa->write_character(to_char_type(0x19)); + write_number(static_cast(j.m_value.number_integer)); + } + else if (j.m_value.number_integer <= (std::numeric_limits::max)()) + { + oa->write_character(to_char_type(0x1A)); + write_number(static_cast(j.m_value.number_integer)); + } + else + { + oa->write_character(to_char_type(0x1B)); + write_number(static_cast(j.m_value.number_integer)); + } + } + else + { + // The conversions below encode the sign in the first + // byte, and the value is converted to a positive number. + const auto positive_number = -1 - j.m_value.number_integer; + if (j.m_value.number_integer >= -24) + { + write_number(static_cast(0x20 + positive_number)); + } + else if (positive_number <= (std::numeric_limits::max)()) + { + oa->write_character(to_char_type(0x38)); + write_number(static_cast(positive_number)); + } + else if (positive_number <= (std::numeric_limits::max)()) + { + oa->write_character(to_char_type(0x39)); + write_number(static_cast(positive_number)); + } + else if (positive_number <= (std::numeric_limits::max)()) + { + oa->write_character(to_char_type(0x3A)); + write_number(static_cast(positive_number)); + } + else + { + oa->write_character(to_char_type(0x3B)); + write_number(static_cast(positive_number)); + } + } + break; + } + + case value_t::number_unsigned: + { + if (j.m_value.number_unsigned <= 0x17) + { + write_number(static_cast(j.m_value.number_unsigned)); + } + else if (j.m_value.number_unsigned <= (std::numeric_limits::max)()) + { + oa->write_character(to_char_type(0x18)); + write_number(static_cast(j.m_value.number_unsigned)); + } + else if (j.m_value.number_unsigned <= (std::numeric_limits::max)()) + { + oa->write_character(to_char_type(0x19)); + write_number(static_cast(j.m_value.number_unsigned)); + } + else if (j.m_value.number_unsigned <= (std::numeric_limits::max)()) + { + oa->write_character(to_char_type(0x1A)); + write_number(static_cast(j.m_value.number_unsigned)); + } + else + { + oa->write_character(to_char_type(0x1B)); + write_number(static_cast(j.m_value.number_unsigned)); + } + break; + } + + case value_t::number_float: + { + oa->write_character(get_cbor_float_prefix(j.m_value.number_float)); + write_number(j.m_value.number_float); + break; + } + + case value_t::string: + { + // step 1: write control byte and the string length + const auto N = j.m_value.string->size(); + if (N <= 0x17) + { + write_number(static_cast(0x60 + N)); + } + else if (N <= (std::numeric_limits::max)()) + { + oa->write_character(to_char_type(0x78)); + write_number(static_cast(N)); + } + else if (N <= (std::numeric_limits::max)()) + { + oa->write_character(to_char_type(0x79)); + write_number(static_cast(N)); + } + else if (N <= (std::numeric_limits::max)()) + { + oa->write_character(to_char_type(0x7A)); + write_number(static_cast(N)); + } + // LCOV_EXCL_START + else if (N <= (std::numeric_limits::max)()) + { + oa->write_character(to_char_type(0x7B)); + write_number(static_cast(N)); + } + // LCOV_EXCL_STOP + + // step 2: write the string + oa->write_characters( + reinterpret_cast(j.m_value.string->c_str()), + j.m_value.string->size()); + break; + } + + case value_t::array: + { + // step 1: write control byte and the array size + const auto N = j.m_value.array->size(); + if (N <= 0x17) + { + write_number(static_cast(0x80 + N)); + } + else if (N <= (std::numeric_limits::max)()) + { + oa->write_character(to_char_type(0x98)); + write_number(static_cast(N)); + } + else if (N <= (std::numeric_limits::max)()) + { + oa->write_character(to_char_type(0x99)); + write_number(static_cast(N)); + } + else if (N <= (std::numeric_limits::max)()) + { + oa->write_character(to_char_type(0x9A)); + write_number(static_cast(N)); + } + // LCOV_EXCL_START + else if (N <= (std::numeric_limits::max)()) + { + oa->write_character(to_char_type(0x9B)); + write_number(static_cast(N)); + } + // LCOV_EXCL_STOP + + // step 2: write each element + for (const auto& el : *j.m_value.array) + { + write_cbor(el); + } + break; + } + + case value_t::object: + { + // step 1: write control byte and the object size + const auto N = j.m_value.object->size(); + if (N <= 0x17) + { + write_number(static_cast(0xA0 + N)); + } + else if (N <= (std::numeric_limits::max)()) + { + oa->write_character(to_char_type(0xB8)); + write_number(static_cast(N)); + } + else if (N <= (std::numeric_limits::max)()) + { + oa->write_character(to_char_type(0xB9)); + write_number(static_cast(N)); + } + else if (N <= (std::numeric_limits::max)()) + { + oa->write_character(to_char_type(0xBA)); + write_number(static_cast(N)); + } + // LCOV_EXCL_START + else if (N <= (std::numeric_limits::max)()) + { + oa->write_character(to_char_type(0xBB)); + write_number(static_cast(N)); + } + // LCOV_EXCL_STOP + + // step 2: write each element + for (const auto& el : *j.m_value.object) + { + write_cbor(el.first); + write_cbor(el.second); + } + break; + } + + default: + break; + } + } + + /*! + @param[in] j JSON value to serialize + */ + void write_msgpack(const BasicJsonType& j) + { + switch (j.type()) + { + case value_t::null: // nil + { + oa->write_character(to_char_type(0xC0)); + break; + } + + case value_t::boolean: // true and false + { + oa->write_character(j.m_value.boolean + ? to_char_type(0xC3) + : to_char_type(0xC2)); + break; + } + + case value_t::number_integer: + { + if (j.m_value.number_integer >= 0) + { + // MessagePack does not differentiate between positive + // signed integers and unsigned integers. Therefore, we used + // the code from the value_t::number_unsigned case here. + if (j.m_value.number_unsigned < 128) + { + // positive fixnum + write_number(static_cast(j.m_value.number_integer)); + } + else if (j.m_value.number_unsigned <= (std::numeric_limits::max)()) + { + // uint 8 + oa->write_character(to_char_type(0xCC)); + write_number(static_cast(j.m_value.number_integer)); + } + else if (j.m_value.number_unsigned <= (std::numeric_limits::max)()) + { + // uint 16 + oa->write_character(to_char_type(0xCD)); + write_number(static_cast(j.m_value.number_integer)); + } + else if (j.m_value.number_unsigned <= (std::numeric_limits::max)()) + { + // uint 32 + oa->write_character(to_char_type(0xCE)); + write_number(static_cast(j.m_value.number_integer)); + } + else if (j.m_value.number_unsigned <= (std::numeric_limits::max)()) + { + // uint 64 + oa->write_character(to_char_type(0xCF)); + write_number(static_cast(j.m_value.number_integer)); + } + } + else + { + if (j.m_value.number_integer >= -32) + { + // negative fixnum + write_number(static_cast(j.m_value.number_integer)); + } + else if (j.m_value.number_integer >= (std::numeric_limits::min)() and + j.m_value.number_integer <= (std::numeric_limits::max)()) + { + // int 8 + oa->write_character(to_char_type(0xD0)); + write_number(static_cast(j.m_value.number_integer)); + } + else if (j.m_value.number_integer >= (std::numeric_limits::min)() and + j.m_value.number_integer <= (std::numeric_limits::max)()) + { + // int 16 + oa->write_character(to_char_type(0xD1)); + write_number(static_cast(j.m_value.number_integer)); + } + else if (j.m_value.number_integer >= (std::numeric_limits::min)() and + j.m_value.number_integer <= (std::numeric_limits::max)()) + { + // int 32 + oa->write_character(to_char_type(0xD2)); + write_number(static_cast(j.m_value.number_integer)); + } + else if (j.m_value.number_integer >= (std::numeric_limits::min)() and + j.m_value.number_integer <= (std::numeric_limits::max)()) + { + // int 64 + oa->write_character(to_char_type(0xD3)); + write_number(static_cast(j.m_value.number_integer)); + } + } + break; + } + + case value_t::number_unsigned: + { + if (j.m_value.number_unsigned < 128) + { + // positive fixnum + write_number(static_cast(j.m_value.number_integer)); + } + else if (j.m_value.number_unsigned <= (std::numeric_limits::max)()) + { + // uint 8 + oa->write_character(to_char_type(0xCC)); + write_number(static_cast(j.m_value.number_integer)); + } + else if (j.m_value.number_unsigned <= (std::numeric_limits::max)()) + { + // uint 16 + oa->write_character(to_char_type(0xCD)); + write_number(static_cast(j.m_value.number_integer)); + } + else if (j.m_value.number_unsigned <= (std::numeric_limits::max)()) + { + // uint 32 + oa->write_character(to_char_type(0xCE)); + write_number(static_cast(j.m_value.number_integer)); + } + else if (j.m_value.number_unsigned <= (std::numeric_limits::max)()) + { + // uint 64 + oa->write_character(to_char_type(0xCF)); + write_number(static_cast(j.m_value.number_integer)); + } + break; + } + + case value_t::number_float: + { + oa->write_character(get_msgpack_float_prefix(j.m_value.number_float)); + write_number(j.m_value.number_float); + break; + } + + case value_t::string: + { + // step 1: write control byte and the string length + const auto N = j.m_value.string->size(); + if (N <= 31) + { + // fixstr + write_number(static_cast(0xA0 | N)); + } + else if (N <= (std::numeric_limits::max)()) + { + // str 8 + oa->write_character(to_char_type(0xD9)); + write_number(static_cast(N)); + } + else if (N <= (std::numeric_limits::max)()) + { + // str 16 + oa->write_character(to_char_type(0xDA)); + write_number(static_cast(N)); + } + else if (N <= (std::numeric_limits::max)()) + { + // str 32 + oa->write_character(to_char_type(0xDB)); + write_number(static_cast(N)); + } + + // step 2: write the string + oa->write_characters( + reinterpret_cast(j.m_value.string->c_str()), + j.m_value.string->size()); + break; + } + + case value_t::array: + { + // step 1: write control byte and the array size + const auto N = j.m_value.array->size(); + if (N <= 15) + { + // fixarray + write_number(static_cast(0x90 | N)); + } + else if (N <= (std::numeric_limits::max)()) + { + // array 16 + oa->write_character(to_char_type(0xDC)); + write_number(static_cast(N)); + } + else if (N <= (std::numeric_limits::max)()) + { + // array 32 + oa->write_character(to_char_type(0xDD)); + write_number(static_cast(N)); + } + + // step 2: write each element + for (const auto& el : *j.m_value.array) + { + write_msgpack(el); + } + break; + } + + case value_t::object: + { + // step 1: write control byte and the object size + const auto N = j.m_value.object->size(); + if (N <= 15) + { + // fixmap + write_number(static_cast(0x80 | (N & 0xF))); + } + else if (N <= (std::numeric_limits::max)()) + { + // map 16 + oa->write_character(to_char_type(0xDE)); + write_number(static_cast(N)); + } + else if (N <= (std::numeric_limits::max)()) + { + // map 32 + oa->write_character(to_char_type(0xDF)); + write_number(static_cast(N)); + } + + // step 2: write each element + for (const auto& el : *j.m_value.object) + { + write_msgpack(el.first); + write_msgpack(el.second); + } + break; + } + + default: + break; + } + } + + /*! + @param[in] j JSON value to serialize + @param[in] use_count whether to use '#' prefixes (optimized format) + @param[in] use_type whether to use '$' prefixes (optimized format) + @param[in] add_prefix whether prefixes need to be used for this value + */ + void write_ubjson(const BasicJsonType& j, const bool use_count, + const bool use_type, const bool add_prefix = true) + { + switch (j.type()) + { + case value_t::null: + { + if (add_prefix) + { + oa->write_character(to_char_type('Z')); + } + break; + } + + case value_t::boolean: + { + if (add_prefix) + { + oa->write_character(j.m_value.boolean + ? to_char_type('T') + : to_char_type('F')); + } + break; + } + + case value_t::number_integer: + { + write_number_with_ubjson_prefix(j.m_value.number_integer, add_prefix); + break; + } + + case value_t::number_unsigned: + { + write_number_with_ubjson_prefix(j.m_value.number_unsigned, add_prefix); + break; + } + + case value_t::number_float: + { + write_number_with_ubjson_prefix(j.m_value.number_float, add_prefix); + break; + } + + case value_t::string: + { + if (add_prefix) + { + oa->write_character(to_char_type('S')); + } + write_number_with_ubjson_prefix(j.m_value.string->size(), true); + oa->write_characters( + reinterpret_cast(j.m_value.string->c_str()), + j.m_value.string->size()); + break; + } + + case value_t::array: + { + if (add_prefix) + { + oa->write_character(to_char_type('[')); + } + + bool prefix_required = true; + if (use_type and not j.m_value.array->empty()) + { + assert(use_count); + const CharType first_prefix = ubjson_prefix(j.front()); + const bool same_prefix = std::all_of(j.begin() + 1, j.end(), + [this, first_prefix](const BasicJsonType & v) + { + return ubjson_prefix(v) == first_prefix; + }); + + if (same_prefix) + { + prefix_required = false; + oa->write_character(to_char_type('$')); + oa->write_character(first_prefix); + } + } + + if (use_count) + { + oa->write_character(to_char_type('#')); + write_number_with_ubjson_prefix(j.m_value.array->size(), true); + } + + for (const auto& el : *j.m_value.array) + { + write_ubjson(el, use_count, use_type, prefix_required); + } + + if (not use_count) + { + oa->write_character(to_char_type(']')); + } + + break; + } + + case value_t::object: + { + if (add_prefix) + { + oa->write_character(to_char_type('{')); + } + + bool prefix_required = true; + if (use_type and not j.m_value.object->empty()) + { + assert(use_count); + const CharType first_prefix = ubjson_prefix(j.front()); + const bool same_prefix = std::all_of(j.begin(), j.end(), + [this, first_prefix](const BasicJsonType & v) + { + return ubjson_prefix(v) == first_prefix; + }); + + if (same_prefix) + { + prefix_required = false; + oa->write_character(to_char_type('$')); + oa->write_character(first_prefix); + } + } + + if (use_count) + { + oa->write_character(to_char_type('#')); + write_number_with_ubjson_prefix(j.m_value.object->size(), true); + } + + for (const auto& el : *j.m_value.object) + { + write_number_with_ubjson_prefix(el.first.size(), true); + oa->write_characters( + reinterpret_cast(el.first.c_str()), + el.first.size()); + write_ubjson(el.second, use_count, use_type, prefix_required); + } + + if (not use_count) + { + oa->write_character(to_char_type('}')); + } + + break; + } + + default: + break; + } + } + + private: + ////////// + // BSON // + ////////// + + /*! + @return The size of a BSON document entry header, including the id marker + and the entry name size (and its null-terminator). + */ + static std::size_t calc_bson_entry_header_size(const string_t& name) + { + const auto it = name.find(static_cast(0)); + if (JSON_UNLIKELY(it != BasicJsonType::string_t::npos)) + { + JSON_THROW(out_of_range::create(409, + "BSON key cannot contain code point U+0000 (at byte " + std::to_string(it) + ")")); + } + + return /*id*/ 1ul + name.size() + /*zero-terminator*/1u; + } + + /*! + @brief Writes the given @a element_type and @a name to the output adapter + */ + void write_bson_entry_header(const string_t& name, + const std::uint8_t element_type) + { + oa->write_character(to_char_type(element_type)); // boolean + oa->write_characters( + reinterpret_cast(name.c_str()), + name.size() + 1u); + } + + /*! + @brief Writes a BSON element with key @a name and boolean value @a value + */ + void write_bson_boolean(const string_t& name, + const bool value) + { + write_bson_entry_header(name, 0x08); + oa->write_character(value ? to_char_type(0x01) : to_char_type(0x00)); + } + + /*! + @brief Writes a BSON element with key @a name and double value @a value + */ + void write_bson_double(const string_t& name, + const double value) + { + write_bson_entry_header(name, 0x01); + write_number(value); + } + + /*! + @return The size of the BSON-encoded string in @a value + */ + static std::size_t calc_bson_string_size(const string_t& value) + { + return sizeof(std::int32_t) + value.size() + 1ul; + } + + /*! + @brief Writes a BSON element with key @a name and string value @a value + */ + void write_bson_string(const string_t& name, + const string_t& value) + { + write_bson_entry_header(name, 0x02); + + write_number(static_cast(value.size() + 1ul)); + oa->write_characters( + reinterpret_cast(value.c_str()), + value.size() + 1); + } + + /*! + @brief Writes a BSON element with key @a name and null value + */ + void write_bson_null(const string_t& name) + { + write_bson_entry_header(name, 0x0A); + } + + /*! + @return The size of the BSON-encoded integer @a value + */ + static std::size_t calc_bson_integer_size(const std::int64_t value) + { + return (std::numeric_limits::min)() <= value and value <= (std::numeric_limits::max)() + ? sizeof(std::int32_t) + : sizeof(std::int64_t); + } + + /*! + @brief Writes a BSON element with key @a name and integer @a value + */ + void write_bson_integer(const string_t& name, + const std::int64_t value) + { + if ((std::numeric_limits::min)() <= value and value <= (std::numeric_limits::max)()) + { + write_bson_entry_header(name, 0x10); // int32 + write_number(static_cast(value)); + } + else + { + write_bson_entry_header(name, 0x12); // int64 + write_number(static_cast(value)); + } + } + + /*! + @return The size of the BSON-encoded unsigned integer in @a j + */ + static constexpr std::size_t calc_bson_unsigned_size(const std::uint64_t value) noexcept + { + return (value <= static_cast((std::numeric_limits::max)())) + ? sizeof(std::int32_t) + : sizeof(std::int64_t); + } + + /*! + @brief Writes a BSON element with key @a name and unsigned @a value + */ + void write_bson_unsigned(const string_t& name, + const std::uint64_t value) + { + if (value <= static_cast((std::numeric_limits::max)())) + { + write_bson_entry_header(name, 0x10 /* int32 */); + write_number(static_cast(value)); + } + else if (value <= static_cast((std::numeric_limits::max)())) + { + write_bson_entry_header(name, 0x12 /* int64 */); + write_number(static_cast(value)); + } + else + { + JSON_THROW(out_of_range::create(407, "integer number " + std::to_string(value) + " cannot be represented by BSON as it does not fit int64")); + } + } + + /*! + @brief Writes a BSON element with key @a name and object @a value + */ + void write_bson_object_entry(const string_t& name, + const typename BasicJsonType::object_t& value) + { + write_bson_entry_header(name, 0x03); // object + write_bson_object(value); + } + + /*! + @return The size of the BSON-encoded array @a value + */ + static std::size_t calc_bson_array_size(const typename BasicJsonType::array_t& value) + { + std::size_t embedded_document_size = 0ul; + std::size_t array_index = 0ul; + + for (const auto& el : value) + { + embedded_document_size += calc_bson_element_size(std::to_string(array_index++), el); + } + + return sizeof(std::int32_t) + embedded_document_size + 1ul; + } + + /*! + @brief Writes a BSON element with key @a name and array @a value + */ + void write_bson_array(const string_t& name, + const typename BasicJsonType::array_t& value) + { + write_bson_entry_header(name, 0x04); // array + write_number(static_cast(calc_bson_array_size(value))); + + std::size_t array_index = 0ul; + + for (const auto& el : value) + { + write_bson_element(std::to_string(array_index++), el); + } + + oa->write_character(to_char_type(0x00)); + } + + /*! + @brief Calculates the size necessary to serialize the JSON value @a j with its @a name + @return The calculated size for the BSON document entry for @a j with the given @a name. + */ + static std::size_t calc_bson_element_size(const string_t& name, + const BasicJsonType& j) + { + const auto header_size = calc_bson_entry_header_size(name); + switch (j.type()) + { + case value_t::object: + return header_size + calc_bson_object_size(*j.m_value.object); + + case value_t::array: + return header_size + calc_bson_array_size(*j.m_value.array); + + case value_t::boolean: + return header_size + 1ul; + + case value_t::number_float: + return header_size + 8ul; + + case value_t::number_integer: + return header_size + calc_bson_integer_size(j.m_value.number_integer); + + case value_t::number_unsigned: + return header_size + calc_bson_unsigned_size(j.m_value.number_unsigned); + + case value_t::string: + return header_size + calc_bson_string_size(*j.m_value.string); + + case value_t::null: + return header_size + 0ul; + + // LCOV_EXCL_START + default: + assert(false); + return 0ul; + // LCOV_EXCL_STOP + } + } + + /*! + @brief Serializes the JSON value @a j to BSON and associates it with the + key @a name. + @param name The name to associate with the JSON entity @a j within the + current BSON document + @return The size of the BSON entry + */ + void write_bson_element(const string_t& name, + const BasicJsonType& j) + { + switch (j.type()) + { + case value_t::object: + return write_bson_object_entry(name, *j.m_value.object); + + case value_t::array: + return write_bson_array(name, *j.m_value.array); + + case value_t::boolean: + return write_bson_boolean(name, j.m_value.boolean); + + case value_t::number_float: + return write_bson_double(name, j.m_value.number_float); + + case value_t::number_integer: + return write_bson_integer(name, j.m_value.number_integer); + + case value_t::number_unsigned: + return write_bson_unsigned(name, j.m_value.number_unsigned); + + case value_t::string: + return write_bson_string(name, *j.m_value.string); + + case value_t::null: + return write_bson_null(name); + + // LCOV_EXCL_START + default: + assert(false); + return; + // LCOV_EXCL_STOP + } + } + + /*! + @brief Calculates the size of the BSON serialization of the given + JSON-object @a j. + @param[in] j JSON value to serialize + @pre j.type() == value_t::object + */ + static std::size_t calc_bson_object_size(const typename BasicJsonType::object_t& value) + { + std::size_t document_size = std::accumulate(value.begin(), value.end(), 0ul, + [](size_t result, const typename BasicJsonType::object_t::value_type & el) + { + return result += calc_bson_element_size(el.first, el.second); + }); + + return sizeof(std::int32_t) + document_size + 1ul; + } + + /*! + @param[in] j JSON value to serialize + @pre j.type() == value_t::object + */ + void write_bson_object(const typename BasicJsonType::object_t& value) + { + write_number(static_cast(calc_bson_object_size(value))); + + for (const auto& el : value) + { + write_bson_element(el.first, el.second); + } + + oa->write_character(to_char_type(0x00)); + } + + ////////// + // CBOR // + ////////// + + static constexpr CharType get_cbor_float_prefix(float /*unused*/) + { + return to_char_type(0xFA); // Single-Precision Float + } + + static constexpr CharType get_cbor_float_prefix(double /*unused*/) + { + return to_char_type(0xFB); // Double-Precision Float + } + + ///////////// + // MsgPack // + ///////////// + + static constexpr CharType get_msgpack_float_prefix(float /*unused*/) + { + return to_char_type(0xCA); // float 32 + } + + static constexpr CharType get_msgpack_float_prefix(double /*unused*/) + { + return to_char_type(0xCB); // float 64 + } + + //////////// + // UBJSON // + //////////// + + // UBJSON: write number (floating point) + template::value, int>::type = 0> + void write_number_with_ubjson_prefix(const NumberType n, + const bool add_prefix) + { + if (add_prefix) + { + oa->write_character(get_ubjson_float_prefix(n)); + } + write_number(n); + } + + // UBJSON: write number (unsigned integer) + template::value, int>::type = 0> + void write_number_with_ubjson_prefix(const NumberType n, + const bool add_prefix) + { + if (n <= static_cast((std::numeric_limits::max)())) + { + if (add_prefix) + { + oa->write_character(to_char_type('i')); // int8 + } + write_number(static_cast(n)); + } + else if (n <= (std::numeric_limits::max)()) + { + if (add_prefix) + { + oa->write_character(to_char_type('U')); // uint8 + } + write_number(static_cast(n)); + } + else if (n <= static_cast((std::numeric_limits::max)())) + { + if (add_prefix) + { + oa->write_character(to_char_type('I')); // int16 + } + write_number(static_cast(n)); + } + else if (n <= static_cast((std::numeric_limits::max)())) + { + if (add_prefix) + { + oa->write_character(to_char_type('l')); // int32 + } + write_number(static_cast(n)); + } + else if (n <= static_cast((std::numeric_limits::max)())) + { + if (add_prefix) + { + oa->write_character(to_char_type('L')); // int64 + } + write_number(static_cast(n)); + } + else + { + JSON_THROW(out_of_range::create(407, "integer number " + std::to_string(n) + " cannot be represented by UBJSON as it does not fit int64")); + } + } + + // UBJSON: write number (signed integer) + template::value and + not std::is_floating_point::value, int>::type = 0> + void write_number_with_ubjson_prefix(const NumberType n, + const bool add_prefix) + { + if ((std::numeric_limits::min)() <= n and n <= (std::numeric_limits::max)()) + { + if (add_prefix) + { + oa->write_character(to_char_type('i')); // int8 + } + write_number(static_cast(n)); + } + else if (static_cast((std::numeric_limits::min)()) <= n and n <= static_cast((std::numeric_limits::max)())) + { + if (add_prefix) + { + oa->write_character(to_char_type('U')); // uint8 + } + write_number(static_cast(n)); + } + else if ((std::numeric_limits::min)() <= n and n <= (std::numeric_limits::max)()) + { + if (add_prefix) + { + oa->write_character(to_char_type('I')); // int16 + } + write_number(static_cast(n)); + } + else if ((std::numeric_limits::min)() <= n and n <= (std::numeric_limits::max)()) + { + if (add_prefix) + { + oa->write_character(to_char_type('l')); // int32 + } + write_number(static_cast(n)); + } + else if ((std::numeric_limits::min)() <= n and n <= (std::numeric_limits::max)()) + { + if (add_prefix) + { + oa->write_character(to_char_type('L')); // int64 + } + write_number(static_cast(n)); + } + // LCOV_EXCL_START + else + { + JSON_THROW(out_of_range::create(407, "integer number " + std::to_string(n) + " cannot be represented by UBJSON as it does not fit int64")); + } + // LCOV_EXCL_STOP + } + + /*! + @brief determine the type prefix of container values + + @note This function does not need to be 100% accurate when it comes to + integer limits. In case a number exceeds the limits of int64_t, + this will be detected by a later call to function + write_number_with_ubjson_prefix. Therefore, we return 'L' for any + value that does not fit the previous limits. + */ + CharType ubjson_prefix(const BasicJsonType& j) const noexcept + { + switch (j.type()) + { + case value_t::null: + return 'Z'; + + case value_t::boolean: + return j.m_value.boolean ? 'T' : 'F'; + + case value_t::number_integer: + { + if ((std::numeric_limits::min)() <= j.m_value.number_integer and j.m_value.number_integer <= (std::numeric_limits::max)()) + { + return 'i'; + } + if ((std::numeric_limits::min)() <= j.m_value.number_integer and j.m_value.number_integer <= (std::numeric_limits::max)()) + { + return 'U'; + } + if ((std::numeric_limits::min)() <= j.m_value.number_integer and j.m_value.number_integer <= (std::numeric_limits::max)()) + { + return 'I'; + } + if ((std::numeric_limits::min)() <= j.m_value.number_integer and j.m_value.number_integer <= (std::numeric_limits::max)()) + { + return 'l'; + } + // no check and assume int64_t (see note above) + return 'L'; + } + + case value_t::number_unsigned: + { + if (j.m_value.number_unsigned <= (std::numeric_limits::max)()) + { + return 'i'; + } + if (j.m_value.number_unsigned <= (std::numeric_limits::max)()) + { + return 'U'; + } + if (j.m_value.number_unsigned <= (std::numeric_limits::max)()) + { + return 'I'; + } + if (j.m_value.number_unsigned <= (std::numeric_limits::max)()) + { + return 'l'; + } + // no check and assume int64_t (see note above) + return 'L'; + } + + case value_t::number_float: + return get_ubjson_float_prefix(j.m_value.number_float); + + case value_t::string: + return 'S'; + + case value_t::array: + return '['; + + case value_t::object: + return '{'; + + default: // discarded values + return 'N'; + } + } + + static constexpr CharType get_ubjson_float_prefix(float /*unused*/) + { + return 'd'; // float 32 + } + + static constexpr CharType get_ubjson_float_prefix(double /*unused*/) + { + return 'D'; // float 64 + } + + /////////////////////// + // Utility functions // + /////////////////////// + + /* + @brief write a number to output input + @param[in] n number of type @a NumberType + @tparam NumberType the type of the number + @tparam OutputIsLittleEndian Set to true if output data is + required to be little endian + + @note This function needs to respect the system's endianess, because bytes + in CBOR, MessagePack, and UBJSON are stored in network order (big + endian) and therefore need reordering on little endian systems. + */ + template + void write_number(const NumberType n) + { + // step 1: write number to array of length NumberType + std::array vec; + std::memcpy(vec.data(), &n, sizeof(NumberType)); + + // step 2: write array to output (with possible reordering) + if (is_little_endian != OutputIsLittleEndian) + { + // reverse byte order prior to conversion if necessary + std::reverse(vec.begin(), vec.end()); + } + + oa->write_characters(vec.data(), sizeof(NumberType)); + } + + public: + // The following to_char_type functions are implement the conversion + // between uint8_t and CharType. In case CharType is not unsigned, + // such a conversion is required to allow values greater than 128. + // See for a discussion. + template < typename C = CharType, + enable_if_t < std::is_signed::value and std::is_signed::value > * = nullptr > + static constexpr CharType to_char_type(std::uint8_t x) noexcept + { + return *reinterpret_cast(&x); + } + + template < typename C = CharType, + enable_if_t < std::is_signed::value and std::is_unsigned::value > * = nullptr > + static CharType to_char_type(std::uint8_t x) noexcept + { + static_assert(sizeof(std::uint8_t) == sizeof(CharType), "size of CharType must be equal to std::uint8_t"); + static_assert(std::is_pod::value, "CharType must be POD"); + CharType result; + std::memcpy(&result, &x, sizeof(x)); + return result; + } + + template::value>* = nullptr> + static constexpr CharType to_char_type(std::uint8_t x) noexcept + { + return x; + } + + template < typename InputCharType, typename C = CharType, + enable_if_t < + std::is_signed::value and + std::is_signed::value and + std::is_same::type>::value + > * = nullptr > + static constexpr CharType to_char_type(InputCharType x) noexcept + { + return x; + } + + private: + /// whether we can assume little endianess + const bool is_little_endian = binary_reader::little_endianess(); + + /// the output + output_adapter_t oa = nullptr; +}; +} // namespace detail +} // namespace nlohmann + +// #include + +// #include + + +#include // reverse, remove, fill, find, none_of +#include // array +#include // assert +#include // and, or +#include // localeconv, lconv +#include // labs, isfinite, isnan, signbit +#include // size_t, ptrdiff_t +#include // uint8_t +#include // snprintf +#include // numeric_limits +#include // string +#include // is_same +#include // move + +// #include + + +#include // array +#include // assert +#include // or, and, not +#include // signbit, isfinite +#include // intN_t, uintN_t +#include // memcpy, memmove +#include // numeric_limits +#include // conditional + +namespace nlohmann +{ +namespace detail +{ + +/*! +@brief implements the Grisu2 algorithm for binary to decimal floating-point +conversion. + +This implementation is a slightly modified version of the reference +implementation which may be obtained from +http://florian.loitsch.com/publications (bench.tar.gz). + +The code is distributed under the MIT license, Copyright (c) 2009 Florian Loitsch. + +For a detailed description of the algorithm see: + +[1] Loitsch, "Printing Floating-Point Numbers Quickly and Accurately with + Integers", Proceedings of the ACM SIGPLAN 2010 Conference on Programming + Language Design and Implementation, PLDI 2010 +[2] Burger, Dybvig, "Printing Floating-Point Numbers Quickly and Accurately", + Proceedings of the ACM SIGPLAN 1996 Conference on Programming Language + Design and Implementation, PLDI 1996 +*/ +namespace dtoa_impl +{ + +template +Target reinterpret_bits(const Source source) +{ + static_assert(sizeof(Target) == sizeof(Source), "size mismatch"); + + Target target; + std::memcpy(&target, &source, sizeof(Source)); + return target; +} + +struct diyfp // f * 2^e +{ + static constexpr int kPrecision = 64; // = q + + std::uint64_t f = 0; + int e = 0; + + constexpr diyfp(std::uint64_t f_, int e_) noexcept : f(f_), e(e_) {} + + /*! + @brief returns x - y + @pre x.e == y.e and x.f >= y.f + */ + static diyfp sub(const diyfp& x, const diyfp& y) noexcept + { + assert(x.e == y.e); + assert(x.f >= y.f); + + return {x.f - y.f, x.e}; + } + + /*! + @brief returns x * y + @note The result is rounded. (Only the upper q bits are returned.) + */ + static diyfp mul(const diyfp& x, const diyfp& y) noexcept + { + static_assert(kPrecision == 64, "internal error"); + + // Computes: + // f = round((x.f * y.f) / 2^q) + // e = x.e + y.e + q + + // Emulate the 64-bit * 64-bit multiplication: + // + // p = u * v + // = (u_lo + 2^32 u_hi) (v_lo + 2^32 v_hi) + // = (u_lo v_lo ) + 2^32 ((u_lo v_hi ) + (u_hi v_lo )) + 2^64 (u_hi v_hi ) + // = (p0 ) + 2^32 ((p1 ) + (p2 )) + 2^64 (p3 ) + // = (p0_lo + 2^32 p0_hi) + 2^32 ((p1_lo + 2^32 p1_hi) + (p2_lo + 2^32 p2_hi)) + 2^64 (p3 ) + // = (p0_lo ) + 2^32 (p0_hi + p1_lo + p2_lo ) + 2^64 (p1_hi + p2_hi + p3) + // = (p0_lo ) + 2^32 (Q ) + 2^64 (H ) + // = (p0_lo ) + 2^32 (Q_lo + 2^32 Q_hi ) + 2^64 (H ) + // + // (Since Q might be larger than 2^32 - 1) + // + // = (p0_lo + 2^32 Q_lo) + 2^64 (Q_hi + H) + // + // (Q_hi + H does not overflow a 64-bit int) + // + // = p_lo + 2^64 p_hi + + const std::uint64_t u_lo = x.f & 0xFFFFFFFFu; + const std::uint64_t u_hi = x.f >> 32u; + const std::uint64_t v_lo = y.f & 0xFFFFFFFFu; + const std::uint64_t v_hi = y.f >> 32u; + + const std::uint64_t p0 = u_lo * v_lo; + const std::uint64_t p1 = u_lo * v_hi; + const std::uint64_t p2 = u_hi * v_lo; + const std::uint64_t p3 = u_hi * v_hi; + + const std::uint64_t p0_hi = p0 >> 32u; + const std::uint64_t p1_lo = p1 & 0xFFFFFFFFu; + const std::uint64_t p1_hi = p1 >> 32u; + const std::uint64_t p2_lo = p2 & 0xFFFFFFFFu; + const std::uint64_t p2_hi = p2 >> 32u; + + std::uint64_t Q = p0_hi + p1_lo + p2_lo; + + // The full product might now be computed as + // + // p_hi = p3 + p2_hi + p1_hi + (Q >> 32) + // p_lo = p0_lo + (Q << 32) + // + // But in this particular case here, the full p_lo is not required. + // Effectively we only need to add the highest bit in p_lo to p_hi (and + // Q_hi + 1 does not overflow). + + Q += std::uint64_t{1} << (64u - 32u - 1u); // round, ties up + + const std::uint64_t h = p3 + p2_hi + p1_hi + (Q >> 32u); + + return {h, x.e + y.e + 64}; + } + + /*! + @brief normalize x such that the significand is >= 2^(q-1) + @pre x.f != 0 + */ + static diyfp normalize(diyfp x) noexcept + { + assert(x.f != 0); + + while ((x.f >> 63u) == 0) + { + x.f <<= 1u; + x.e--; + } + + return x; + } + + /*! + @brief normalize x such that the result has the exponent E + @pre e >= x.e and the upper e - x.e bits of x.f must be zero. + */ + static diyfp normalize_to(const diyfp& x, const int target_exponent) noexcept + { + const int delta = x.e - target_exponent; + + assert(delta >= 0); + assert(((x.f << delta) >> delta) == x.f); + + return {x.f << delta, target_exponent}; } }; +struct boundaries +{ + diyfp w; + diyfp minus; + diyfp plus; +}; + +/*! +Compute the (normalized) diyfp representing the input number 'value' and its +boundaries. + +@pre value must be finite and positive +*/ +template +boundaries compute_boundaries(FloatType value) +{ + assert(std::isfinite(value)); + assert(value > 0); + + // Convert the IEEE representation into a diyfp. + // + // If v is denormal: + // value = 0.F * 2^(1 - bias) = ( F) * 2^(1 - bias - (p-1)) + // If v is normalized: + // value = 1.F * 2^(E - bias) = (2^(p-1) + F) * 2^(E - bias - (p-1)) + + static_assert(std::numeric_limits::is_iec559, + "internal error: dtoa_short requires an IEEE-754 floating-point implementation"); + + constexpr int kPrecision = std::numeric_limits::digits; // = p (includes the hidden bit) + constexpr int kBias = std::numeric_limits::max_exponent - 1 + (kPrecision - 1); + constexpr int kMinExp = 1 - kBias; + constexpr std::uint64_t kHiddenBit = std::uint64_t{1} << (kPrecision - 1); // = 2^(p-1) + + using bits_type = typename std::conditional::type; + + const std::uint64_t bits = reinterpret_bits(value); + const std::uint64_t E = bits >> (kPrecision - 1); + const std::uint64_t F = bits & (kHiddenBit - 1); + + const bool is_denormal = E == 0; + const diyfp v = is_denormal + ? diyfp(F, kMinExp) + : diyfp(F + kHiddenBit, static_cast(E) - kBias); + + // Compute the boundaries m- and m+ of the floating-point value + // v = f * 2^e. + // + // Determine v- and v+, the floating-point predecessor and successor if v, + // respectively. + // + // v- = v - 2^e if f != 2^(p-1) or e == e_min (A) + // = v - 2^(e-1) if f == 2^(p-1) and e > e_min (B) + // + // v+ = v + 2^e + // + // Let m- = (v- + v) / 2 and m+ = (v + v+) / 2. All real numbers _strictly_ + // between m- and m+ round to v, regardless of how the input rounding + // algorithm breaks ties. + // + // ---+-------------+-------------+-------------+-------------+--- (A) + // v- m- v m+ v+ + // + // -----------------+------+------+-------------+-------------+--- (B) + // v- m- v m+ v+ + + const bool lower_boundary_is_closer = F == 0 and E > 1; + const diyfp m_plus = diyfp(2 * v.f + 1, v.e - 1); + const diyfp m_minus = lower_boundary_is_closer + ? diyfp(4 * v.f - 1, v.e - 2) // (B) + : diyfp(2 * v.f - 1, v.e - 1); // (A) + + // Determine the normalized w+ = m+. + const diyfp w_plus = diyfp::normalize(m_plus); + + // Determine w- = m- such that e_(w-) = e_(w+). + const diyfp w_minus = diyfp::normalize_to(m_minus, w_plus.e); + + return {diyfp::normalize(v), w_minus, w_plus}; +} + +// Given normalized diyfp w, Grisu needs to find a (normalized) cached +// power-of-ten c, such that the exponent of the product c * w = f * 2^e lies +// within a certain range [alpha, gamma] (Definition 3.2 from [1]) +// +// alpha <= e = e_c + e_w + q <= gamma +// +// or +// +// f_c * f_w * 2^alpha <= f_c 2^(e_c) * f_w 2^(e_w) * 2^q +// <= f_c * f_w * 2^gamma +// +// Since c and w are normalized, i.e. 2^(q-1) <= f < 2^q, this implies +// +// 2^(q-1) * 2^(q-1) * 2^alpha <= c * w * 2^q < 2^q * 2^q * 2^gamma +// +// or +// +// 2^(q - 2 + alpha) <= c * w < 2^(q + gamma) +// +// The choice of (alpha,gamma) determines the size of the table and the form of +// the digit generation procedure. Using (alpha,gamma)=(-60,-32) works out well +// in practice: +// +// The idea is to cut the number c * w = f * 2^e into two parts, which can be +// processed independently: An integral part p1, and a fractional part p2: +// +// f * 2^e = ( (f div 2^-e) * 2^-e + (f mod 2^-e) ) * 2^e +// = (f div 2^-e) + (f mod 2^-e) * 2^e +// = p1 + p2 * 2^e +// +// The conversion of p1 into decimal form requires a series of divisions and +// modulos by (a power of) 10. These operations are faster for 32-bit than for +// 64-bit integers, so p1 should ideally fit into a 32-bit integer. This can be +// achieved by choosing +// +// -e >= 32 or e <= -32 := gamma +// +// In order to convert the fractional part +// +// p2 * 2^e = p2 / 2^-e = d[-1] / 10^1 + d[-2] / 10^2 + ... +// +// into decimal form, the fraction is repeatedly multiplied by 10 and the digits +// d[-i] are extracted in order: +// +// (10 * p2) div 2^-e = d[-1] +// (10 * p2) mod 2^-e = d[-2] / 10^1 + ... +// +// The multiplication by 10 must not overflow. It is sufficient to choose +// +// 10 * p2 < 16 * p2 = 2^4 * p2 <= 2^64. +// +// Since p2 = f mod 2^-e < 2^-e, +// +// -e <= 60 or e >= -60 := alpha + +constexpr int kAlpha = -60; +constexpr int kGamma = -32; + +struct cached_power // c = f * 2^e ~= 10^k +{ + std::uint64_t f; + int e; + int k; +}; + +/*! +For a normalized diyfp w = f * 2^e, this function returns a (normalized) cached +power-of-ten c = f_c * 2^e_c, such that the exponent of the product w * c +satisfies (Definition 3.2 from [1]) + + alpha <= e_c + e + q <= gamma. +*/ +inline cached_power get_cached_power_for_binary_exponent(int e) +{ + // Now + // + // alpha <= e_c + e + q <= gamma (1) + // ==> f_c * 2^alpha <= c * 2^e * 2^q + // + // and since the c's are normalized, 2^(q-1) <= f_c, + // + // ==> 2^(q - 1 + alpha) <= c * 2^(e + q) + // ==> 2^(alpha - e - 1) <= c + // + // If c were an exakt power of ten, i.e. c = 10^k, one may determine k as + // + // k = ceil( log_10( 2^(alpha - e - 1) ) ) + // = ceil( (alpha - e - 1) * log_10(2) ) + // + // From the paper: + // "In theory the result of the procedure could be wrong since c is rounded, + // and the computation itself is approximated [...]. In practice, however, + // this simple function is sufficient." + // + // For IEEE double precision floating-point numbers converted into + // normalized diyfp's w = f * 2^e, with q = 64, + // + // e >= -1022 (min IEEE exponent) + // -52 (p - 1) + // -52 (p - 1, possibly normalize denormal IEEE numbers) + // -11 (normalize the diyfp) + // = -1137 + // + // and + // + // e <= +1023 (max IEEE exponent) + // -52 (p - 1) + // -11 (normalize the diyfp) + // = 960 + // + // This binary exponent range [-1137,960] results in a decimal exponent + // range [-307,324]. One does not need to store a cached power for each + // k in this range. For each such k it suffices to find a cached power + // such that the exponent of the product lies in [alpha,gamma]. + // This implies that the difference of the decimal exponents of adjacent + // table entries must be less than or equal to + // + // floor( (gamma - alpha) * log_10(2) ) = 8. + // + // (A smaller distance gamma-alpha would require a larger table.) + + // NB: + // Actually this function returns c, such that -60 <= e_c + e + 64 <= -34. + + constexpr int kCachedPowersMinDecExp = -300; + constexpr int kCachedPowersDecStep = 8; + + static constexpr std::array kCachedPowers = + { + { + { 0xAB70FE17C79AC6CA, -1060, -300 }, + { 0xFF77B1FCBEBCDC4F, -1034, -292 }, + { 0xBE5691EF416BD60C, -1007, -284 }, + { 0x8DD01FAD907FFC3C, -980, -276 }, + { 0xD3515C2831559A83, -954, -268 }, + { 0x9D71AC8FADA6C9B5, -927, -260 }, + { 0xEA9C227723EE8BCB, -901, -252 }, + { 0xAECC49914078536D, -874, -244 }, + { 0x823C12795DB6CE57, -847, -236 }, + { 0xC21094364DFB5637, -821, -228 }, + { 0x9096EA6F3848984F, -794, -220 }, + { 0xD77485CB25823AC7, -768, -212 }, + { 0xA086CFCD97BF97F4, -741, -204 }, + { 0xEF340A98172AACE5, -715, -196 }, + { 0xB23867FB2A35B28E, -688, -188 }, + { 0x84C8D4DFD2C63F3B, -661, -180 }, + { 0xC5DD44271AD3CDBA, -635, -172 }, + { 0x936B9FCEBB25C996, -608, -164 }, + { 0xDBAC6C247D62A584, -582, -156 }, + { 0xA3AB66580D5FDAF6, -555, -148 }, + { 0xF3E2F893DEC3F126, -529, -140 }, + { 0xB5B5ADA8AAFF80B8, -502, -132 }, + { 0x87625F056C7C4A8B, -475, -124 }, + { 0xC9BCFF6034C13053, -449, -116 }, + { 0x964E858C91BA2655, -422, -108 }, + { 0xDFF9772470297EBD, -396, -100 }, + { 0xA6DFBD9FB8E5B88F, -369, -92 }, + { 0xF8A95FCF88747D94, -343, -84 }, + { 0xB94470938FA89BCF, -316, -76 }, + { 0x8A08F0F8BF0F156B, -289, -68 }, + { 0xCDB02555653131B6, -263, -60 }, + { 0x993FE2C6D07B7FAC, -236, -52 }, + { 0xE45C10C42A2B3B06, -210, -44 }, + { 0xAA242499697392D3, -183, -36 }, + { 0xFD87B5F28300CA0E, -157, -28 }, + { 0xBCE5086492111AEB, -130, -20 }, + { 0x8CBCCC096F5088CC, -103, -12 }, + { 0xD1B71758E219652C, -77, -4 }, + { 0x9C40000000000000, -50, 4 }, + { 0xE8D4A51000000000, -24, 12 }, + { 0xAD78EBC5AC620000, 3, 20 }, + { 0x813F3978F8940984, 30, 28 }, + { 0xC097CE7BC90715B3, 56, 36 }, + { 0x8F7E32CE7BEA5C70, 83, 44 }, + { 0xD5D238A4ABE98068, 109, 52 }, + { 0x9F4F2726179A2245, 136, 60 }, + { 0xED63A231D4C4FB27, 162, 68 }, + { 0xB0DE65388CC8ADA8, 189, 76 }, + { 0x83C7088E1AAB65DB, 216, 84 }, + { 0xC45D1DF942711D9A, 242, 92 }, + { 0x924D692CA61BE758, 269, 100 }, + { 0xDA01EE641A708DEA, 295, 108 }, + { 0xA26DA3999AEF774A, 322, 116 }, + { 0xF209787BB47D6B85, 348, 124 }, + { 0xB454E4A179DD1877, 375, 132 }, + { 0x865B86925B9BC5C2, 402, 140 }, + { 0xC83553C5C8965D3D, 428, 148 }, + { 0x952AB45CFA97A0B3, 455, 156 }, + { 0xDE469FBD99A05FE3, 481, 164 }, + { 0xA59BC234DB398C25, 508, 172 }, + { 0xF6C69A72A3989F5C, 534, 180 }, + { 0xB7DCBF5354E9BECE, 561, 188 }, + { 0x88FCF317F22241E2, 588, 196 }, + { 0xCC20CE9BD35C78A5, 614, 204 }, + { 0x98165AF37B2153DF, 641, 212 }, + { 0xE2A0B5DC971F303A, 667, 220 }, + { 0xA8D9D1535CE3B396, 694, 228 }, + { 0xFB9B7CD9A4A7443C, 720, 236 }, + { 0xBB764C4CA7A44410, 747, 244 }, + { 0x8BAB8EEFB6409C1A, 774, 252 }, + { 0xD01FEF10A657842C, 800, 260 }, + { 0x9B10A4E5E9913129, 827, 268 }, + { 0xE7109BFBA19C0C9D, 853, 276 }, + { 0xAC2820D9623BF429, 880, 284 }, + { 0x80444B5E7AA7CF85, 907, 292 }, + { 0xBF21E44003ACDD2D, 933, 300 }, + { 0x8E679C2F5E44FF8F, 960, 308 }, + { 0xD433179D9C8CB841, 986, 316 }, + { 0x9E19DB92B4E31BA9, 1013, 324 }, + } + }; + + // This computation gives exactly the same results for k as + // k = ceil((kAlpha - e - 1) * 0.30102999566398114) + // for |e| <= 1500, but doesn't require floating-point operations. + // NB: log_10(2) ~= 78913 / 2^18 + assert(e >= -1500); + assert(e <= 1500); + const int f = kAlpha - e - 1; + const int k = (f * 78913) / (1 << 18) + static_cast(f > 0); + + const int index = (-kCachedPowersMinDecExp + k + (kCachedPowersDecStep - 1)) / kCachedPowersDecStep; + assert(index >= 0); + assert(static_cast(index) < kCachedPowers.size()); + + const cached_power cached = kCachedPowers[static_cast(index)]; + assert(kAlpha <= cached.e + e + 64); + assert(kGamma >= cached.e + e + 64); + + return cached; +} + +/*! +For n != 0, returns k, such that pow10 := 10^(k-1) <= n < 10^k. +For n == 0, returns 1 and sets pow10 := 1. +*/ +inline int find_largest_pow10(const std::uint32_t n, std::uint32_t& pow10) +{ + // LCOV_EXCL_START + if (n >= 1000000000) + { + pow10 = 1000000000; + return 10; + } + // LCOV_EXCL_STOP + else if (n >= 100000000) + { + pow10 = 100000000; + return 9; + } + else if (n >= 10000000) + { + pow10 = 10000000; + return 8; + } + else if (n >= 1000000) + { + pow10 = 1000000; + return 7; + } + else if (n >= 100000) + { + pow10 = 100000; + return 6; + } + else if (n >= 10000) + { + pow10 = 10000; + return 5; + } + else if (n >= 1000) + { + pow10 = 1000; + return 4; + } + else if (n >= 100) + { + pow10 = 100; + return 3; + } + else if (n >= 10) + { + pow10 = 10; + return 2; + } + else + { + pow10 = 1; + return 1; + } +} + +inline void grisu2_round(char* buf, int len, std::uint64_t dist, std::uint64_t delta, + std::uint64_t rest, std::uint64_t ten_k) +{ + assert(len >= 1); + assert(dist <= delta); + assert(rest <= delta); + assert(ten_k > 0); + + // <--------------------------- delta ----> + // <---- dist ---------> + // --------------[------------------+-------------------]-------------- + // M- w M+ + // + // ten_k + // <------> + // <---- rest ----> + // --------------[------------------+----+--------------]-------------- + // w V + // = buf * 10^k + // + // ten_k represents a unit-in-the-last-place in the decimal representation + // stored in buf. + // Decrement buf by ten_k while this takes buf closer to w. + + // The tests are written in this order to avoid overflow in unsigned + // integer arithmetic. + + while (rest < dist + and delta - rest >= ten_k + and (rest + ten_k < dist or dist - rest > rest + ten_k - dist)) + { + assert(buf[len - 1] != '0'); + buf[len - 1]--; + rest += ten_k; + } +} + +/*! +Generates V = buffer * 10^decimal_exponent, such that M- <= V <= M+. +M- and M+ must be normalized and share the same exponent -60 <= e <= -32. +*/ +inline void grisu2_digit_gen(char* buffer, int& length, int& decimal_exponent, + diyfp M_minus, diyfp w, diyfp M_plus) +{ + static_assert(kAlpha >= -60, "internal error"); + static_assert(kGamma <= -32, "internal error"); + + // Generates the digits (and the exponent) of a decimal floating-point + // number V = buffer * 10^decimal_exponent in the range [M-, M+]. The diyfp's + // w, M- and M+ share the same exponent e, which satisfies alpha <= e <= gamma. + // + // <--------------------------- delta ----> + // <---- dist ---------> + // --------------[------------------+-------------------]-------------- + // M- w M+ + // + // Grisu2 generates the digits of M+ from left to right and stops as soon as + // V is in [M-,M+]. + + assert(M_plus.e >= kAlpha); + assert(M_plus.e <= kGamma); + + std::uint64_t delta = diyfp::sub(M_plus, M_minus).f; // (significand of (M+ - M-), implicit exponent is e) + std::uint64_t dist = diyfp::sub(M_plus, w ).f; // (significand of (M+ - w ), implicit exponent is e) + + // Split M+ = f * 2^e into two parts p1 and p2 (note: e < 0): + // + // M+ = f * 2^e + // = ((f div 2^-e) * 2^-e + (f mod 2^-e)) * 2^e + // = ((p1 ) * 2^-e + (p2 )) * 2^e + // = p1 + p2 * 2^e + + const diyfp one(std::uint64_t{1} << -M_plus.e, M_plus.e); + + auto p1 = static_cast(M_plus.f >> -one.e); // p1 = f div 2^-e (Since -e >= 32, p1 fits into a 32-bit int.) + std::uint64_t p2 = M_plus.f & (one.f - 1); // p2 = f mod 2^-e + + // 1) + // + // Generate the digits of the integral part p1 = d[n-1]...d[1]d[0] + + assert(p1 > 0); + + std::uint32_t pow10; + const int k = find_largest_pow10(p1, pow10); + + // 10^(k-1) <= p1 < 10^k, pow10 = 10^(k-1) + // + // p1 = (p1 div 10^(k-1)) * 10^(k-1) + (p1 mod 10^(k-1)) + // = (d[k-1] ) * 10^(k-1) + (p1 mod 10^(k-1)) + // + // M+ = p1 + p2 * 2^e + // = d[k-1] * 10^(k-1) + (p1 mod 10^(k-1)) + p2 * 2^e + // = d[k-1] * 10^(k-1) + ((p1 mod 10^(k-1)) * 2^-e + p2) * 2^e + // = d[k-1] * 10^(k-1) + ( rest) * 2^e + // + // Now generate the digits d[n] of p1 from left to right (n = k-1,...,0) + // + // p1 = d[k-1]...d[n] * 10^n + d[n-1]...d[0] + // + // but stop as soon as + // + // rest * 2^e = (d[n-1]...d[0] * 2^-e + p2) * 2^e <= delta * 2^e + + int n = k; + while (n > 0) + { + // Invariants: + // M+ = buffer * 10^n + (p1 + p2 * 2^e) (buffer = 0 for n = k) + // pow10 = 10^(n-1) <= p1 < 10^n + // + const std::uint32_t d = p1 / pow10; // d = p1 div 10^(n-1) + const std::uint32_t r = p1 % pow10; // r = p1 mod 10^(n-1) + // + // M+ = buffer * 10^n + (d * 10^(n-1) + r) + p2 * 2^e + // = (buffer * 10 + d) * 10^(n-1) + (r + p2 * 2^e) + // + assert(d <= 9); + buffer[length++] = static_cast('0' + d); // buffer := buffer * 10 + d + // + // M+ = buffer * 10^(n-1) + (r + p2 * 2^e) + // + p1 = r; + n--; + // + // M+ = buffer * 10^n + (p1 + p2 * 2^e) + // pow10 = 10^n + // + + // Now check if enough digits have been generated. + // Compute + // + // p1 + p2 * 2^e = (p1 * 2^-e + p2) * 2^e = rest * 2^e + // + // Note: + // Since rest and delta share the same exponent e, it suffices to + // compare the significands. + const std::uint64_t rest = (std::uint64_t{p1} << -one.e) + p2; + if (rest <= delta) + { + // V = buffer * 10^n, with M- <= V <= M+. + + decimal_exponent += n; + + // We may now just stop. But instead look if the buffer could be + // decremented to bring V closer to w. + // + // pow10 = 10^n is now 1 ulp in the decimal representation V. + // The rounding procedure works with diyfp's with an implicit + // exponent of e. + // + // 10^n = (10^n * 2^-e) * 2^e = ulp * 2^e + // + const std::uint64_t ten_n = std::uint64_t{pow10} << -one.e; + grisu2_round(buffer, length, dist, delta, rest, ten_n); + + return; + } + + pow10 /= 10; + // + // pow10 = 10^(n-1) <= p1 < 10^n + // Invariants restored. + } + + // 2) + // + // The digits of the integral part have been generated: + // + // M+ = d[k-1]...d[1]d[0] + p2 * 2^e + // = buffer + p2 * 2^e + // + // Now generate the digits of the fractional part p2 * 2^e. + // + // Note: + // No decimal point is generated: the exponent is adjusted instead. + // + // p2 actually represents the fraction + // + // p2 * 2^e + // = p2 / 2^-e + // = d[-1] / 10^1 + d[-2] / 10^2 + ... + // + // Now generate the digits d[-m] of p1 from left to right (m = 1,2,...) + // + // p2 * 2^e = d[-1]d[-2]...d[-m] * 10^-m + // + 10^-m * (d[-m-1] / 10^1 + d[-m-2] / 10^2 + ...) + // + // using + // + // 10^m * p2 = ((10^m * p2) div 2^-e) * 2^-e + ((10^m * p2) mod 2^-e) + // = ( d) * 2^-e + ( r) + // + // or + // 10^m * p2 * 2^e = d + r * 2^e + // + // i.e. + // + // M+ = buffer + p2 * 2^e + // = buffer + 10^-m * (d + r * 2^e) + // = (buffer * 10^m + d) * 10^-m + 10^-m * r * 2^e + // + // and stop as soon as 10^-m * r * 2^e <= delta * 2^e + + assert(p2 > delta); + + int m = 0; + for (;;) + { + // Invariant: + // M+ = buffer * 10^-m + 10^-m * (d[-m-1] / 10 + d[-m-2] / 10^2 + ...) * 2^e + // = buffer * 10^-m + 10^-m * (p2 ) * 2^e + // = buffer * 10^-m + 10^-m * (1/10 * (10 * p2) ) * 2^e + // = buffer * 10^-m + 10^-m * (1/10 * ((10*p2 div 2^-e) * 2^-e + (10*p2 mod 2^-e)) * 2^e + // + assert(p2 <= (std::numeric_limits::max)() / 10); + p2 *= 10; + const std::uint64_t d = p2 >> -one.e; // d = (10 * p2) div 2^-e + const std::uint64_t r = p2 & (one.f - 1); // r = (10 * p2) mod 2^-e + // + // M+ = buffer * 10^-m + 10^-m * (1/10 * (d * 2^-e + r) * 2^e + // = buffer * 10^-m + 10^-m * (1/10 * (d + r * 2^e)) + // = (buffer * 10 + d) * 10^(-m-1) + 10^(-m-1) * r * 2^e + // + assert(d <= 9); + buffer[length++] = static_cast('0' + d); // buffer := buffer * 10 + d + // + // M+ = buffer * 10^(-m-1) + 10^(-m-1) * r * 2^e + // + p2 = r; + m++; + // + // M+ = buffer * 10^-m + 10^-m * p2 * 2^e + // Invariant restored. + + // Check if enough digits have been generated. + // + // 10^-m * p2 * 2^e <= delta * 2^e + // p2 * 2^e <= 10^m * delta * 2^e + // p2 <= 10^m * delta + delta *= 10; + dist *= 10; + if (p2 <= delta) + { + break; + } + } + + // V = buffer * 10^-m, with M- <= V <= M+. + + decimal_exponent -= m; + + // 1 ulp in the decimal representation is now 10^-m. + // Since delta and dist are now scaled by 10^m, we need to do the + // same with ulp in order to keep the units in sync. + // + // 10^m * 10^-m = 1 = 2^-e * 2^e = ten_m * 2^e + // + const std::uint64_t ten_m = one.f; + grisu2_round(buffer, length, dist, delta, p2, ten_m); + + // By construction this algorithm generates the shortest possible decimal + // number (Loitsch, Theorem 6.2) which rounds back to w. + // For an input number of precision p, at least + // + // N = 1 + ceil(p * log_10(2)) + // + // decimal digits are sufficient to identify all binary floating-point + // numbers (Matula, "In-and-Out conversions"). + // This implies that the algorithm does not produce more than N decimal + // digits. + // + // N = 17 for p = 53 (IEEE double precision) + // N = 9 for p = 24 (IEEE single precision) +} + +/*! +v = buf * 10^decimal_exponent +len is the length of the buffer (number of decimal digits) +The buffer must be large enough, i.e. >= max_digits10. +*/ +inline void grisu2(char* buf, int& len, int& decimal_exponent, + diyfp m_minus, diyfp v, diyfp m_plus) +{ + assert(m_plus.e == m_minus.e); + assert(m_plus.e == v.e); + + // --------(-----------------------+-----------------------)-------- (A) + // m- v m+ + // + // --------------------(-----------+-----------------------)-------- (B) + // m- v m+ + // + // First scale v (and m- and m+) such that the exponent is in the range + // [alpha, gamma]. + + const cached_power cached = get_cached_power_for_binary_exponent(m_plus.e); + + const diyfp c_minus_k(cached.f, cached.e); // = c ~= 10^-k + + // The exponent of the products is = v.e + c_minus_k.e + q and is in the range [alpha,gamma] + const diyfp w = diyfp::mul(v, c_minus_k); + const diyfp w_minus = diyfp::mul(m_minus, c_minus_k); + const diyfp w_plus = diyfp::mul(m_plus, c_minus_k); + + // ----(---+---)---------------(---+---)---------------(---+---)---- + // w- w w+ + // = c*m- = c*v = c*m+ + // + // diyfp::mul rounds its result and c_minus_k is approximated too. w, w- and + // w+ are now off by a small amount. + // In fact: + // + // w - v * 10^k < 1 ulp + // + // To account for this inaccuracy, add resp. subtract 1 ulp. + // + // --------+---[---------------(---+---)---------------]---+-------- + // w- M- w M+ w+ + // + // Now any number in [M-, M+] (bounds included) will round to w when input, + // regardless of how the input rounding algorithm breaks ties. + // + // And digit_gen generates the shortest possible such number in [M-, M+]. + // Note that this does not mean that Grisu2 always generates the shortest + // possible number in the interval (m-, m+). + const diyfp M_minus(w_minus.f + 1, w_minus.e); + const diyfp M_plus (w_plus.f - 1, w_plus.e ); + + decimal_exponent = -cached.k; // = -(-k) = k + + grisu2_digit_gen(buf, len, decimal_exponent, M_minus, w, M_plus); +} + +/*! +v = buf * 10^decimal_exponent +len is the length of the buffer (number of decimal digits) +The buffer must be large enough, i.e. >= max_digits10. +*/ +template +void grisu2(char* buf, int& len, int& decimal_exponent, FloatType value) +{ + static_assert(diyfp::kPrecision >= std::numeric_limits::digits + 3, + "internal error: not enough precision"); + + assert(std::isfinite(value)); + assert(value > 0); + + // If the neighbors (and boundaries) of 'value' are always computed for double-precision + // numbers, all float's can be recovered using strtod (and strtof). However, the resulting + // decimal representations are not exactly "short". + // + // The documentation for 'std::to_chars' (https://en.cppreference.com/w/cpp/utility/to_chars) + // says "value is converted to a string as if by std::sprintf in the default ("C") locale" + // and since sprintf promotes float's to double's, I think this is exactly what 'std::to_chars' + // does. + // On the other hand, the documentation for 'std::to_chars' requires that "parsing the + // representation using the corresponding std::from_chars function recovers value exactly". That + // indicates that single precision floating-point numbers should be recovered using + // 'std::strtof'. + // + // NB: If the neighbors are computed for single-precision numbers, there is a single float + // (7.0385307e-26f) which can't be recovered using strtod. The resulting double precision + // value is off by 1 ulp. +#if 0 + const boundaries w = compute_boundaries(static_cast(value)); +#else + const boundaries w = compute_boundaries(value); +#endif + + grisu2(buf, len, decimal_exponent, w.minus, w.w, w.plus); +} + +/*! +@brief appends a decimal representation of e to buf +@return a pointer to the element following the exponent. +@pre -1000 < e < 1000 +*/ +inline char* append_exponent(char* buf, int e) +{ + assert(e > -1000); + assert(e < 1000); + + if (e < 0) + { + e = -e; + *buf++ = '-'; + } + else + { + *buf++ = '+'; + } + + auto k = static_cast(e); + if (k < 10) + { + // Always print at least two digits in the exponent. + // This is for compatibility with printf("%g"). + *buf++ = '0'; + *buf++ = static_cast('0' + k); + } + else if (k < 100) + { + *buf++ = static_cast('0' + k / 10); + k %= 10; + *buf++ = static_cast('0' + k); + } + else + { + *buf++ = static_cast('0' + k / 100); + k %= 100; + *buf++ = static_cast('0' + k / 10); + k %= 10; + *buf++ = static_cast('0' + k); + } + + return buf; +} + +/*! +@brief prettify v = buf * 10^decimal_exponent + +If v is in the range [10^min_exp, 10^max_exp) it will be printed in fixed-point +notation. Otherwise it will be printed in exponential notation. + +@pre min_exp < 0 +@pre max_exp > 0 +*/ +inline char* format_buffer(char* buf, int len, int decimal_exponent, + int min_exp, int max_exp) +{ + assert(min_exp < 0); + assert(max_exp > 0); + + const int k = len; + const int n = len + decimal_exponent; + + // v = buf * 10^(n-k) + // k is the length of the buffer (number of decimal digits) + // n is the position of the decimal point relative to the start of the buffer. + + if (k <= n and n <= max_exp) + { + // digits[000] + // len <= max_exp + 2 + + std::memset(buf + k, '0', static_cast(n - k)); + // Make it look like a floating-point number (#362, #378) + buf[n + 0] = '.'; + buf[n + 1] = '0'; + return buf + (n + 2); + } + + if (0 < n and n <= max_exp) + { + // dig.its + // len <= max_digits10 + 1 + + assert(k > n); + + std::memmove(buf + (n + 1), buf + n, static_cast(k - n)); + buf[n] = '.'; + return buf + (k + 1); + } + + if (min_exp < n and n <= 0) + { + // 0.[000]digits + // len <= 2 + (-min_exp - 1) + max_digits10 + + std::memmove(buf + (2 + -n), buf, static_cast(k)); + buf[0] = '0'; + buf[1] = '.'; + std::memset(buf + 2, '0', static_cast(-n)); + return buf + (2 + (-n) + k); + } + + if (k == 1) + { + // dE+123 + // len <= 1 + 5 + + buf += 1; + } + else + { + // d.igitsE+123 + // len <= max_digits10 + 1 + 5 + + std::memmove(buf + 2, buf + 1, static_cast(k - 1)); + buf[1] = '.'; + buf += 1 + k; + } + + *buf++ = 'e'; + return append_exponent(buf, n - 1); +} + +} // namespace dtoa_impl + +/*! +@brief generates a decimal representation of the floating-point number value in [first, last). + +The format of the resulting decimal representation is similar to printf's %g +format. Returns an iterator pointing past-the-end of the decimal representation. + +@note The input number must be finite, i.e. NaN's and Inf's are not supported. +@note The buffer must be large enough. +@note The result is NOT null-terminated. +*/ +template +char* to_chars(char* first, const char* last, FloatType value) +{ + static_cast(last); // maybe unused - fix warning + assert(std::isfinite(value)); + + // Use signbit(value) instead of (value < 0) since signbit works for -0. + if (std::signbit(value)) + { + value = -value; + *first++ = '-'; + } + + if (value == 0) // +-0 + { + *first++ = '0'; + // Make it look like a floating-point number (#362, #378) + *first++ = '.'; + *first++ = '0'; + return first; + } + + assert(last - first >= std::numeric_limits::max_digits10); + + // Compute v = buffer * 10^decimal_exponent. + // The decimal digits are stored in the buffer, which needs to be interpreted + // as an unsigned decimal integer. + // len is the length of the buffer, i.e. the number of decimal digits. + int len = 0; + int decimal_exponent = 0; + dtoa_impl::grisu2(first, len, decimal_exponent, value); + + assert(len <= std::numeric_limits::max_digits10); + + // Format the buffer like printf("%.*g", prec, value) + constexpr int kMinExp = -4; + // Use digits10 here to increase compatibility with version 2. + constexpr int kMaxExp = std::numeric_limits::digits10; + + assert(last - first >= kMaxExp + 2); + assert(last - first >= 2 + (-kMinExp - 1) + std::numeric_limits::max_digits10); + assert(last - first >= std::numeric_limits::max_digits10 + 6); + + return dtoa_impl::format_buffer(first, len, decimal_exponent, kMinExp, kMaxExp); +} + +} // namespace detail +} // namespace nlohmann + +// #include + +// #include + +// #include + +// #include + +// #include + +// #include + + +namespace nlohmann +{ +namespace detail +{ +/////////////////// +// serialization // +/////////////////// + +/// how to treat decoding errors +enum class error_handler_t +{ + strict, ///< throw a type_error exception in case of invalid UTF-8 + replace, ///< replace invalid UTF-8 sequences with U+FFFD + ignore ///< ignore invalid UTF-8 sequences +}; + +template +class serializer +{ + using string_t = typename BasicJsonType::string_t; + using number_float_t = typename BasicJsonType::number_float_t; + using number_integer_t = typename BasicJsonType::number_integer_t; + using number_unsigned_t = typename BasicJsonType::number_unsigned_t; + static constexpr std::uint8_t UTF8_ACCEPT = 0; + static constexpr std::uint8_t UTF8_REJECT = 1; + + public: + /*! + @param[in] s output stream to serialize to + @param[in] ichar indentation character to use + @param[in] error_handler_ how to react on decoding errors + */ + serializer(output_adapter_t s, const char ichar, + error_handler_t error_handler_ = error_handler_t::strict) + : o(std::move(s)) + , loc(std::localeconv()) + , thousands_sep(loc->thousands_sep == nullptr ? '\0' : * (loc->thousands_sep)) + , decimal_point(loc->decimal_point == nullptr ? '\0' : * (loc->decimal_point)) + , indent_char(ichar) + , indent_string(512, indent_char) + , error_handler(error_handler_) + {} + + // delete because of pointer members + serializer(const serializer&) = delete; + serializer& operator=(const serializer&) = delete; + serializer(serializer&&) = delete; + serializer& operator=(serializer&&) = delete; + ~serializer() = default; + + /*! + @brief internal implementation of the serialization function + + This function is called by the public member function dump and organizes + the serialization internally. The indentation level is propagated as + additional parameter. In case of arrays and objects, the function is + called recursively. + + - strings and object keys are escaped using `escape_string()` + - integer numbers are converted implicitly via `operator<<` + - floating-point numbers are converted to a string using `"%g"` format + + @param[in] val value to serialize + @param[in] pretty_print whether the output shall be pretty-printed + @param[in] indent_step the indent level + @param[in] current_indent the current indent level (only used internally) + */ + void dump(const BasicJsonType& val, const bool pretty_print, + const bool ensure_ascii, + const unsigned int indent_step, + const unsigned int current_indent = 0) + { + switch (val.m_type) + { + case value_t::object: + { + if (val.m_value.object->empty()) + { + o->write_characters("{}", 2); + return; + } + + if (pretty_print) + { + o->write_characters("{\n", 2); + + // variable to hold indentation for recursive calls + const auto new_indent = current_indent + indent_step; + if (JSON_UNLIKELY(indent_string.size() < new_indent)) + { + indent_string.resize(indent_string.size() * 2, ' '); + } + + // first n-1 elements + auto i = val.m_value.object->cbegin(); + for (std::size_t cnt = 0; cnt < val.m_value.object->size() - 1; ++cnt, ++i) + { + o->write_characters(indent_string.c_str(), new_indent); + o->write_character('\"'); + dump_escaped(i->first, ensure_ascii); + o->write_characters("\": ", 3); + dump(i->second, true, ensure_ascii, indent_step, new_indent); + o->write_characters(",\n", 2); + } + + // last element + assert(i != val.m_value.object->cend()); + assert(std::next(i) == val.m_value.object->cend()); + o->write_characters(indent_string.c_str(), new_indent); + o->write_character('\"'); + dump_escaped(i->first, ensure_ascii); + o->write_characters("\": ", 3); + dump(i->second, true, ensure_ascii, indent_step, new_indent); + + o->write_character('\n'); + o->write_characters(indent_string.c_str(), current_indent); + o->write_character('}'); + } + else + { + o->write_character('{'); + + // first n-1 elements + auto i = val.m_value.object->cbegin(); + for (std::size_t cnt = 0; cnt < val.m_value.object->size() - 1; ++cnt, ++i) + { + o->write_character('\"'); + dump_escaped(i->first, ensure_ascii); + o->write_characters("\":", 2); + dump(i->second, false, ensure_ascii, indent_step, current_indent); + o->write_character(','); + } + + // last element + assert(i != val.m_value.object->cend()); + assert(std::next(i) == val.m_value.object->cend()); + o->write_character('\"'); + dump_escaped(i->first, ensure_ascii); + o->write_characters("\":", 2); + dump(i->second, false, ensure_ascii, indent_step, current_indent); + + o->write_character('}'); + } + + return; + } + + case value_t::array: + { + if (val.m_value.array->empty()) + { + o->write_characters("[]", 2); + return; + } + + if (pretty_print) + { + o->write_characters("[\n", 2); + + // variable to hold indentation for recursive calls + const auto new_indent = current_indent + indent_step; + if (JSON_UNLIKELY(indent_string.size() < new_indent)) + { + indent_string.resize(indent_string.size() * 2, ' '); + } + + // first n-1 elements + for (auto i = val.m_value.array->cbegin(); + i != val.m_value.array->cend() - 1; ++i) + { + o->write_characters(indent_string.c_str(), new_indent); + dump(*i, true, ensure_ascii, indent_step, new_indent); + o->write_characters(",\n", 2); + } + + // last element + assert(not val.m_value.array->empty()); + o->write_characters(indent_string.c_str(), new_indent); + dump(val.m_value.array->back(), true, ensure_ascii, indent_step, new_indent); + + o->write_character('\n'); + o->write_characters(indent_string.c_str(), current_indent); + o->write_character(']'); + } + else + { + o->write_character('['); + + // first n-1 elements + for (auto i = val.m_value.array->cbegin(); + i != val.m_value.array->cend() - 1; ++i) + { + dump(*i, false, ensure_ascii, indent_step, current_indent); + o->write_character(','); + } + + // last element + assert(not val.m_value.array->empty()); + dump(val.m_value.array->back(), false, ensure_ascii, indent_step, current_indent); + + o->write_character(']'); + } + + return; + } + + case value_t::string: + { + o->write_character('\"'); + dump_escaped(*val.m_value.string, ensure_ascii); + o->write_character('\"'); + return; + } + + case value_t::boolean: + { + if (val.m_value.boolean) + { + o->write_characters("true", 4); + } + else + { + o->write_characters("false", 5); + } + return; + } + + case value_t::number_integer: + { + dump_integer(val.m_value.number_integer); + return; + } + + case value_t::number_unsigned: + { + dump_integer(val.m_value.number_unsigned); + return; + } + + case value_t::number_float: + { + dump_float(val.m_value.number_float); + return; + } + + case value_t::discarded: + { + o->write_characters("", 11); + return; + } + + case value_t::null: + { + o->write_characters("null", 4); + return; + } + + default: // LCOV_EXCL_LINE + assert(false); // LCOV_EXCL_LINE + } + } + + private: + /*! + @brief dump escaped string + + Escape a string by replacing certain special characters by a sequence of an + escape character (backslash) and another character and other control + characters by a sequence of "\u" followed by a four-digit hex + representation. The escaped string is written to output stream @a o. + + @param[in] s the string to escape + @param[in] ensure_ascii whether to escape non-ASCII characters with + \uXXXX sequences + + @complexity Linear in the length of string @a s. + */ + void dump_escaped(const string_t& s, const bool ensure_ascii) + { + std::uint32_t codepoint; + std::uint8_t state = UTF8_ACCEPT; + std::size_t bytes = 0; // number of bytes written to string_buffer + + // number of bytes written at the point of the last valid byte + std::size_t bytes_after_last_accept = 0; + std::size_t undumped_chars = 0; + + for (std::size_t i = 0; i < s.size(); ++i) + { + const auto byte = static_cast(s[i]); + + switch (decode(state, codepoint, byte)) + { + case UTF8_ACCEPT: // decode found a new code point + { + switch (codepoint) + { + case 0x08: // backspace + { + string_buffer[bytes++] = '\\'; + string_buffer[bytes++] = 'b'; + break; + } + + case 0x09: // horizontal tab + { + string_buffer[bytes++] = '\\'; + string_buffer[bytes++] = 't'; + break; + } + + case 0x0A: // newline + { + string_buffer[bytes++] = '\\'; + string_buffer[bytes++] = 'n'; + break; + } + + case 0x0C: // formfeed + { + string_buffer[bytes++] = '\\'; + string_buffer[bytes++] = 'f'; + break; + } + + case 0x0D: // carriage return + { + string_buffer[bytes++] = '\\'; + string_buffer[bytes++] = 'r'; + break; + } + + case 0x22: // quotation mark + { + string_buffer[bytes++] = '\\'; + string_buffer[bytes++] = '\"'; + break; + } + + case 0x5C: // reverse solidus + { + string_buffer[bytes++] = '\\'; + string_buffer[bytes++] = '\\'; + break; + } + + default: + { + // escape control characters (0x00..0x1F) or, if + // ensure_ascii parameter is used, non-ASCII characters + if ((codepoint <= 0x1F) or (ensure_ascii and (codepoint >= 0x7F))) + { + if (codepoint <= 0xFFFF) + { + (std::snprintf)(string_buffer.data() + bytes, 7, "\\u%04x", + static_cast(codepoint)); + bytes += 6; + } + else + { + (std::snprintf)(string_buffer.data() + bytes, 13, "\\u%04x\\u%04x", + static_cast(0xD7C0u + (codepoint >> 10u)), + static_cast(0xDC00u + (codepoint & 0x3FFu))); + bytes += 12; + } + } + else + { + // copy byte to buffer (all previous bytes + // been copied have in default case above) + string_buffer[bytes++] = s[i]; + } + break; + } + } + + // write buffer and reset index; there must be 13 bytes + // left, as this is the maximal number of bytes to be + // written ("\uxxxx\uxxxx\0") for one code point + if (string_buffer.size() - bytes < 13) + { + o->write_characters(string_buffer.data(), bytes); + bytes = 0; + } + + // remember the byte position of this accept + bytes_after_last_accept = bytes; + undumped_chars = 0; + break; + } + + case UTF8_REJECT: // decode found invalid UTF-8 byte + { + switch (error_handler) + { + case error_handler_t::strict: + { + std::string sn(3, '\0'); + (std::snprintf)(&sn[0], sn.size(), "%.2X", byte); + JSON_THROW(type_error::create(316, "invalid UTF-8 byte at index " + std::to_string(i) + ": 0x" + sn)); + } + + case error_handler_t::ignore: + case error_handler_t::replace: + { + // in case we saw this character the first time, we + // would like to read it again, because the byte + // may be OK for itself, but just not OK for the + // previous sequence + if (undumped_chars > 0) + { + --i; + } + + // reset length buffer to the last accepted index; + // thus removing/ignoring the invalid characters + bytes = bytes_after_last_accept; + + if (error_handler == error_handler_t::replace) + { + // add a replacement character + if (ensure_ascii) + { + string_buffer[bytes++] = '\\'; + string_buffer[bytes++] = 'u'; + string_buffer[bytes++] = 'f'; + string_buffer[bytes++] = 'f'; + string_buffer[bytes++] = 'f'; + string_buffer[bytes++] = 'd'; + } + else + { + string_buffer[bytes++] = detail::binary_writer::to_char_type('\xEF'); + string_buffer[bytes++] = detail::binary_writer::to_char_type('\xBF'); + string_buffer[bytes++] = detail::binary_writer::to_char_type('\xBD'); + } + + // write buffer and reset index; there must be 13 bytes + // left, as this is the maximal number of bytes to be + // written ("\uxxxx\uxxxx\0") for one code point + if (string_buffer.size() - bytes < 13) + { + o->write_characters(string_buffer.data(), bytes); + bytes = 0; + } + + bytes_after_last_accept = bytes; + } + + undumped_chars = 0; + + // continue processing the string + state = UTF8_ACCEPT; + break; + } + + default: // LCOV_EXCL_LINE + assert(false); // LCOV_EXCL_LINE + } + break; + } + + default: // decode found yet incomplete multi-byte code point + { + if (not ensure_ascii) + { + // code point will not be escaped - copy byte to buffer + string_buffer[bytes++] = s[i]; + } + ++undumped_chars; + break; + } + } + } + + // we finished processing the string + if (JSON_LIKELY(state == UTF8_ACCEPT)) + { + // write buffer + if (bytes > 0) + { + o->write_characters(string_buffer.data(), bytes); + } + } + else + { + // we finish reading, but do not accept: string was incomplete + switch (error_handler) + { + case error_handler_t::strict: + { + std::string sn(3, '\0'); + (std::snprintf)(&sn[0], sn.size(), "%.2X", static_cast(s.back())); + JSON_THROW(type_error::create(316, "incomplete UTF-8 string; last byte: 0x" + sn)); + } + + case error_handler_t::ignore: + { + // write all accepted bytes + o->write_characters(string_buffer.data(), bytes_after_last_accept); + break; + } + + case error_handler_t::replace: + { + // write all accepted bytes + o->write_characters(string_buffer.data(), bytes_after_last_accept); + // add a replacement character + if (ensure_ascii) + { + o->write_characters("\\ufffd", 6); + } + else + { + o->write_characters("\xEF\xBF\xBD", 3); + } + break; + } + + default: // LCOV_EXCL_LINE + assert(false); // LCOV_EXCL_LINE + } + } + } + + /*! + @brief count digits + + Count the number of decimal (base 10) digits for an input unsigned integer. + + @param[in] x unsigned integer number to count its digits + @return number of decimal digits + */ + inline unsigned int count_digits(number_unsigned_t x) noexcept + { + unsigned int n_digits = 1; + for (;;) + { + if (x < 10) + { + return n_digits; + } + if (x < 100) + { + return n_digits + 1; + } + if (x < 1000) + { + return n_digits + 2; + } + if (x < 10000) + { + return n_digits + 3; + } + x = x / 10000u; + n_digits += 4; + } + } + + /*! + @brief dump an integer + + Dump a given integer to output stream @a o. Works internally with + @a number_buffer. + + @param[in] x integer number (signed or unsigned) to dump + @tparam NumberType either @a number_integer_t or @a number_unsigned_t + */ + template::value or + std::is_same::value, + int> = 0> + void dump_integer(NumberType x) + { + static constexpr std::array, 100> digits_to_99 + { + { + {{'0', '0'}}, {{'0', '1'}}, {{'0', '2'}}, {{'0', '3'}}, {{'0', '4'}}, {{'0', '5'}}, {{'0', '6'}}, {{'0', '7'}}, {{'0', '8'}}, {{'0', '9'}}, + {{'1', '0'}}, {{'1', '1'}}, {{'1', '2'}}, {{'1', '3'}}, {{'1', '4'}}, {{'1', '5'}}, {{'1', '6'}}, {{'1', '7'}}, {{'1', '8'}}, {{'1', '9'}}, + {{'2', '0'}}, {{'2', '1'}}, {{'2', '2'}}, {{'2', '3'}}, {{'2', '4'}}, {{'2', '5'}}, {{'2', '6'}}, {{'2', '7'}}, {{'2', '8'}}, {{'2', '9'}}, + {{'3', '0'}}, {{'3', '1'}}, {{'3', '2'}}, {{'3', '3'}}, {{'3', '4'}}, {{'3', '5'}}, {{'3', '6'}}, {{'3', '7'}}, {{'3', '8'}}, {{'3', '9'}}, + {{'4', '0'}}, {{'4', '1'}}, {{'4', '2'}}, {{'4', '3'}}, {{'4', '4'}}, {{'4', '5'}}, {{'4', '6'}}, {{'4', '7'}}, {{'4', '8'}}, {{'4', '9'}}, + {{'5', '0'}}, {{'5', '1'}}, {{'5', '2'}}, {{'5', '3'}}, {{'5', '4'}}, {{'5', '5'}}, {{'5', '6'}}, {{'5', '7'}}, {{'5', '8'}}, {{'5', '9'}}, + {{'6', '0'}}, {{'6', '1'}}, {{'6', '2'}}, {{'6', '3'}}, {{'6', '4'}}, {{'6', '5'}}, {{'6', '6'}}, {{'6', '7'}}, {{'6', '8'}}, {{'6', '9'}}, + {{'7', '0'}}, {{'7', '1'}}, {{'7', '2'}}, {{'7', '3'}}, {{'7', '4'}}, {{'7', '5'}}, {{'7', '6'}}, {{'7', '7'}}, {{'7', '8'}}, {{'7', '9'}}, + {{'8', '0'}}, {{'8', '1'}}, {{'8', '2'}}, {{'8', '3'}}, {{'8', '4'}}, {{'8', '5'}}, {{'8', '6'}}, {{'8', '7'}}, {{'8', '8'}}, {{'8', '9'}}, + {{'9', '0'}}, {{'9', '1'}}, {{'9', '2'}}, {{'9', '3'}}, {{'9', '4'}}, {{'9', '5'}}, {{'9', '6'}}, {{'9', '7'}}, {{'9', '8'}}, {{'9', '9'}}, + } + }; + + // special case for "0" + if (x == 0) + { + o->write_character('0'); + return; + } + + // use a pointer to fill the buffer + auto buffer_ptr = number_buffer.begin(); + + const bool is_negative = std::is_same::value and not(x >= 0); // see issue #755 + number_unsigned_t abs_value; + + unsigned int n_chars; + + if (is_negative) + { + *buffer_ptr = '-'; + abs_value = static_cast(std::abs(static_cast(x))); + + // account one more byte for the minus sign + n_chars = 1 + count_digits(abs_value); + } + else + { + abs_value = static_cast(x); + n_chars = count_digits(abs_value); + } + + // spare 1 byte for '\0' + assert(n_chars < number_buffer.size() - 1); + + // jump to the end to generate the string from backward + // so we later avoid reversing the result + buffer_ptr += n_chars; + + // Fast int2ascii implementation inspired by "Fastware" talk by Andrei Alexandrescu + // See: https://www.youtube.com/watch?v=o4-CwDo2zpg + while (abs_value >= 100) + { + const auto digits_index = static_cast((abs_value % 100)); + abs_value /= 100; + *(--buffer_ptr) = digits_to_99[digits_index][1]; + *(--buffer_ptr) = digits_to_99[digits_index][0]; + } + + if (abs_value >= 10) + { + const auto digits_index = static_cast(abs_value); + *(--buffer_ptr) = digits_to_99[digits_index][1]; + *(--buffer_ptr) = digits_to_99[digits_index][0]; + } + else + { + *(--buffer_ptr) = static_cast('0' + abs_value); + } + + o->write_characters(number_buffer.data(), n_chars); + } + + /*! + @brief dump a floating-point number + + Dump a given floating-point number to output stream @a o. Works internally + with @a number_buffer. + + @param[in] x floating-point number to dump + */ + void dump_float(number_float_t x) + { + // NaN / inf + if (not std::isfinite(x)) + { + o->write_characters("null", 4); + return; + } + + // If number_float_t is an IEEE-754 single or double precision number, + // use the Grisu2 algorithm to produce short numbers which are + // guaranteed to round-trip, using strtof and strtod, resp. + // + // NB: The test below works if == . + static constexpr bool is_ieee_single_or_double + = (std::numeric_limits::is_iec559 and std::numeric_limits::digits == 24 and std::numeric_limits::max_exponent == 128) or + (std::numeric_limits::is_iec559 and std::numeric_limits::digits == 53 and std::numeric_limits::max_exponent == 1024); + + dump_float(x, std::integral_constant()); + } + + void dump_float(number_float_t x, std::true_type /*is_ieee_single_or_double*/) + { + char* begin = number_buffer.data(); + char* end = ::nlohmann::detail::to_chars(begin, begin + number_buffer.size(), x); + + o->write_characters(begin, static_cast(end - begin)); + } + + void dump_float(number_float_t x, std::false_type /*is_ieee_single_or_double*/) + { + // get number of digits for a float -> text -> float round-trip + static constexpr auto d = std::numeric_limits::max_digits10; + + // the actual conversion + std::ptrdiff_t len = (std::snprintf)(number_buffer.data(), number_buffer.size(), "%.*g", d, x); + + // negative value indicates an error + assert(len > 0); + // check if buffer was large enough + assert(static_cast(len) < number_buffer.size()); + + // erase thousands separator + if (thousands_sep != '\0') + { + const auto end = std::remove(number_buffer.begin(), + number_buffer.begin() + len, thousands_sep); + std::fill(end, number_buffer.end(), '\0'); + assert((end - number_buffer.begin()) <= len); + len = (end - number_buffer.begin()); + } + + // convert decimal point to '.' + if (decimal_point != '\0' and decimal_point != '.') + { + const auto dec_pos = std::find(number_buffer.begin(), number_buffer.end(), decimal_point); + if (dec_pos != number_buffer.end()) + { + *dec_pos = '.'; + } + } + + o->write_characters(number_buffer.data(), static_cast(len)); + + // determine if need to append ".0" + const bool value_is_int_like = + std::none_of(number_buffer.begin(), number_buffer.begin() + len + 1, + [](char c) + { + return c == '.' or c == 'e'; + }); + + if (value_is_int_like) + { + o->write_characters(".0", 2); + } + } + + /*! + @brief check whether a string is UTF-8 encoded + + The function checks each byte of a string whether it is UTF-8 encoded. The + result of the check is stored in the @a state parameter. The function must + be called initially with state 0 (accept). State 1 means the string must + be rejected, because the current byte is not allowed. If the string is + completely processed, but the state is non-zero, the string ended + prematurely; that is, the last byte indicated more bytes should have + followed. + + @param[in,out] state the state of the decoding + @param[in,out] codep codepoint (valid only if resulting state is UTF8_ACCEPT) + @param[in] byte next byte to decode + @return new state + + @note The function has been edited: a std::array is used. + + @copyright Copyright (c) 2008-2009 Bjoern Hoehrmann + @sa http://bjoern.hoehrmann.de/utf-8/decoder/dfa/ + */ + static std::uint8_t decode(std::uint8_t& state, std::uint32_t& codep, const std::uint8_t byte) noexcept + { + static const std::array utf8d = + { + { + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 00..1F + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 20..3F + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 40..5F + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 60..7F + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, // 80..9F + 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, // A0..BF + 8, 8, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, // C0..DF + 0xA, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x4, 0x3, 0x3, // E0..EF + 0xB, 0x6, 0x6, 0x6, 0x5, 0x8, 0x8, 0x8, 0x8, 0x8, 0x8, 0x8, 0x8, 0x8, 0x8, 0x8, // F0..FF + 0x0, 0x1, 0x2, 0x3, 0x5, 0x8, 0x7, 0x1, 0x1, 0x1, 0x4, 0x6, 0x1, 0x1, 0x1, 0x1, // s0..s0 + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, // s1..s2 + 1, 2, 1, 1, 1, 1, 1, 2, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, // s3..s4 + 1, 2, 1, 1, 1, 1, 1, 1, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 3, 1, 3, 1, 1, 1, 1, 1, 1, // s5..s6 + 1, 3, 1, 1, 1, 1, 1, 3, 1, 3, 1, 1, 1, 1, 1, 1, 1, 3, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 // s7..s8 + } + }; + + const std::uint8_t type = utf8d[byte]; + + codep = (state != UTF8_ACCEPT) + ? (byte & 0x3fu) | (codep << 6u) + : (0xFFu >> type) & (byte); + + state = utf8d[256u + state * 16u + type]; + return state; + } + + private: + /// the output of the serializer + output_adapter_t o = nullptr; + + /// a (hopefully) large enough character buffer + std::array number_buffer{{}}; + + /// the locale + const std::lconv* loc = nullptr; + /// the locale's thousand separator character + const char thousands_sep = '\0'; + /// the locale's decimal point character + const char decimal_point = '\0'; + + /// string buffer + std::array string_buffer{{}}; + + /// the indentation character + const char indent_char; + /// the indentation string + string_t indent_string; + + /// error_handler how to react on decoding errors + const error_handler_t error_handler; +}; +} // namespace detail } // namespace nlohmann +// #include + +// #include + /*! @brief namespace for Niels Lohmann @@ -12697,6 +13084,7 @@ class basic_json @since 2.1.0 */ + JSON_NODISCARD static basic_json meta() { basic_json result; @@ -13329,7 +13717,7 @@ class basic_json object = nullptr; // silence warning, see #821 if (JSON_UNLIKELY(t == value_t::null)) { - JSON_THROW(other_error::create(500, "961c151d2e87f2686a955a9be24d316f1362bf21 3.5.0")); // LCOV_EXCL_LINE + JSON_THROW(other_error::create(500, "961c151d2e87f2686a955a9be24d316f1362bf21 3.6.1")); // LCOV_EXCL_LINE } break; } @@ -13702,6 +14090,8 @@ class basic_json case value_t::discarded: m_type = value_t::discarded; break; + default: // LCOV_EXCL_LINE + assert(false); // LCOV_EXCL_LINE } assert_invariant(); } @@ -13789,7 +14179,7 @@ class basic_json bool is_an_object = std::all_of(init.begin(), init.end(), [](const detail::json_ref& element_ref) { - return (element_ref->is_array() and element_ref->size() == 2 and (*element_ref)[0].is_string()); + return element_ref->is_array() and element_ref->size() == 2 and (*element_ref)[0].is_string(); }); // adjust type if type deduction is not wanted @@ -13869,6 +14259,7 @@ class basic_json @since version 1.0.0 */ + JSON_NODISCARD static basic_json array(initializer_list_t init = {}) { return basic_json(init, false, value_t::array); @@ -13912,6 +14303,7 @@ class basic_json @since version 1.0.0 */ + JSON_NODISCARD static basic_json object(initializer_list_t init = {}) { return basic_json(init, false, value_t::object); @@ -14472,7 +14864,7 @@ class basic_json */ constexpr bool is_null() const noexcept { - return (m_type == value_t::null); + return m_type == value_t::null; } /*! @@ -14494,7 +14886,7 @@ class basic_json */ constexpr bool is_boolean() const noexcept { - return (m_type == value_t::boolean); + return m_type == value_t::boolean; } /*! @@ -14553,7 +14945,7 @@ class basic_json */ constexpr bool is_number_integer() const noexcept { - return (m_type == value_t::number_integer or m_type == value_t::number_unsigned); + return m_type == value_t::number_integer or m_type == value_t::number_unsigned; } /*! @@ -14581,7 +14973,7 @@ class basic_json */ constexpr bool is_number_unsigned() const noexcept { - return (m_type == value_t::number_unsigned); + return m_type == value_t::number_unsigned; } /*! @@ -14609,7 +15001,7 @@ class basic_json */ constexpr bool is_number_float() const noexcept { - return (m_type == value_t::number_float); + return m_type == value_t::number_float; } /*! @@ -14631,7 +15023,7 @@ class basic_json */ constexpr bool is_object() const noexcept { - return (m_type == value_t::object); + return m_type == value_t::object; } /*! @@ -14653,7 +15045,7 @@ class basic_json */ constexpr bool is_array() const noexcept { - return (m_type == value_t::array); + return m_type == value_t::array; } /*! @@ -14675,7 +15067,7 @@ class basic_json */ constexpr bool is_string() const noexcept { - return (m_type == value_t::string); + return m_type == value_t::string; } /*! @@ -14702,7 +15094,7 @@ class basic_json */ constexpr bool is_discarded() const noexcept { - return (m_type == value_t::discarded); + return m_type == value_t::discarded; } /*! @@ -15236,7 +15628,7 @@ class basic_json #ifndef _MSC_VER // fix for issue #167 operator<< ambiguity under VS2015 and not std::is_same>::value -#if defined(JSON_HAS_CPP_17) && defined(_MSC_VER) and _MSC_VER <= 1914 +#if defined(JSON_HAS_CPP_17) && (defined(__GNUC__) || (defined(_MSC_VER) and _MSC_VER <= 1914)) and not std::is_same::value #endif #endif @@ -16274,6 +16666,8 @@ class basic_json @liveexample{The example shows how `find()` is used.,find__key_type} + @sa @ref contains(KeyT&&) const -- checks whether a key exists + @since version 1.0.0 */ template @@ -16334,6 +16728,36 @@ class basic_json return is_object() ? m_value.object->count(std::forward(key)) : 0; } + /*! + @brief check the existence of an element in a JSON object + + Check whether an element exists in a JSON object with key equivalent to + @a key. If the element is not found or the JSON value is not an object, + false is returned. + + @note This method always returns false when executed on a JSON type + that is not an object. + + @param[in] key key value to check its existence. + + @return true if an element with specified @a key exists. If no such + element with such key is found or the JSON value is not an object, + false is returned. + + @complexity Logarithmic in the size of the JSON object. + + @liveexample{The following code shows an example for `contains()`.,contains} + + @sa @ref find(KeyT&&) -- returns an iterator to an object element + + @since version 3.6.0 + */ + template + bool contains(KeyT&& key) const + { + return is_object() and m_value.object->find(std::forward(key)) != m_value.object->end(); + } + /// @} @@ -17118,7 +17542,8 @@ class basic_json // add element to array (move semantics) m_value.array->push_back(std::move(val)); - // invalidate object + // invalidate object: mark it null so we do not call the destructor + // cppcheck-suppress accessMoved val.m_type = value_t::null; } @@ -17910,28 +18335,28 @@ class basic_json switch (lhs_type) { case value_t::array: - return (*lhs.m_value.array == *rhs.m_value.array); + return *lhs.m_value.array == *rhs.m_value.array; case value_t::object: - return (*lhs.m_value.object == *rhs.m_value.object); + return *lhs.m_value.object == *rhs.m_value.object; case value_t::null: return true; case value_t::string: - return (*lhs.m_value.string == *rhs.m_value.string); + return *lhs.m_value.string == *rhs.m_value.string; case value_t::boolean: - return (lhs.m_value.boolean == rhs.m_value.boolean); + return lhs.m_value.boolean == rhs.m_value.boolean; case value_t::number_integer: - return (lhs.m_value.number_integer == rhs.m_value.number_integer); + return lhs.m_value.number_integer == rhs.m_value.number_integer; case value_t::number_unsigned: - return (lhs.m_value.number_unsigned == rhs.m_value.number_unsigned); + return lhs.m_value.number_unsigned == rhs.m_value.number_unsigned; case value_t::number_float: - return (lhs.m_value.number_float == rhs.m_value.number_float); + return lhs.m_value.number_float == rhs.m_value.number_float; default: return false; @@ -17939,27 +18364,27 @@ class basic_json } else if (lhs_type == value_t::number_integer and rhs_type == value_t::number_float) { - return (static_cast(lhs.m_value.number_integer) == rhs.m_value.number_float); + return static_cast(lhs.m_value.number_integer) == rhs.m_value.number_float; } else if (lhs_type == value_t::number_float and rhs_type == value_t::number_integer) { - return (lhs.m_value.number_float == static_cast(rhs.m_value.number_integer)); + return lhs.m_value.number_float == static_cast(rhs.m_value.number_integer); } else if (lhs_type == value_t::number_unsigned and rhs_type == value_t::number_float) { - return (static_cast(lhs.m_value.number_unsigned) == rhs.m_value.number_float); + return static_cast(lhs.m_value.number_unsigned) == rhs.m_value.number_float; } else if (lhs_type == value_t::number_float and rhs_type == value_t::number_unsigned) { - return (lhs.m_value.number_float == static_cast(rhs.m_value.number_unsigned)); + return lhs.m_value.number_float == static_cast(rhs.m_value.number_unsigned); } else if (lhs_type == value_t::number_unsigned and rhs_type == value_t::number_integer) { - return (static_cast(lhs.m_value.number_unsigned) == rhs.m_value.number_integer); + return static_cast(lhs.m_value.number_unsigned) == rhs.m_value.number_integer; } else if (lhs_type == value_t::number_integer and rhs_type == value_t::number_unsigned) { - return (lhs.m_value.number_integer == static_cast(rhs.m_value.number_unsigned)); + return lhs.m_value.number_integer == static_cast(rhs.m_value.number_unsigned); } return false; @@ -17973,7 +18398,7 @@ class basic_json std::is_scalar::value, int>::type = 0> friend bool operator==(const_reference lhs, const ScalarType rhs) noexcept { - return (lhs == basic_json(rhs)); + return lhs == basic_json(rhs); } /*! @@ -17984,7 +18409,7 @@ class basic_json std::is_scalar::value, int>::type = 0> friend bool operator==(const ScalarType lhs, const_reference rhs) noexcept { - return (basic_json(lhs) == rhs); + return basic_json(lhs) == rhs; } /*! @@ -18018,7 +18443,7 @@ class basic_json std::is_scalar::value, int>::type = 0> friend bool operator!=(const_reference lhs, const ScalarType rhs) noexcept { - return (lhs != basic_json(rhs)); + return lhs != basic_json(rhs); } /*! @@ -18029,7 +18454,7 @@ class basic_json std::is_scalar::value, int>::type = 0> friend bool operator!=(const ScalarType lhs, const_reference rhs) noexcept { - return (basic_json(lhs) != rhs); + return basic_json(lhs) != rhs; } /*! @@ -18068,6 +18493,8 @@ class basic_json switch (lhs_type) { case value_t::array: + // note parentheses are necessary, see + // https://github.com/nlohmann/json/issues/1530 return (*lhs.m_value.array) < (*rhs.m_value.array); case value_t::object: @@ -18134,7 +18561,7 @@ class basic_json std::is_scalar::value, int>::type = 0> friend bool operator<(const_reference lhs, const ScalarType rhs) noexcept { - return (lhs < basic_json(rhs)); + return lhs < basic_json(rhs); } /*! @@ -18145,7 +18572,7 @@ class basic_json std::is_scalar::value, int>::type = 0> friend bool operator<(const ScalarType lhs, const_reference rhs) noexcept { - return (basic_json(lhs) < rhs); + return basic_json(lhs) < rhs; } /*! @@ -18180,7 +18607,7 @@ class basic_json std::is_scalar::value, int>::type = 0> friend bool operator<=(const_reference lhs, const ScalarType rhs) noexcept { - return (lhs <= basic_json(rhs)); + return lhs <= basic_json(rhs); } /*! @@ -18191,7 +18618,7 @@ class basic_json std::is_scalar::value, int>::type = 0> friend bool operator<=(const ScalarType lhs, const_reference rhs) noexcept { - return (basic_json(lhs) <= rhs); + return basic_json(lhs) <= rhs; } /*! @@ -18226,7 +18653,7 @@ class basic_json std::is_scalar::value, int>::type = 0> friend bool operator>(const_reference lhs, const ScalarType rhs) noexcept { - return (lhs > basic_json(rhs)); + return lhs > basic_json(rhs); } /*! @@ -18237,7 +18664,7 @@ class basic_json std::is_scalar::value, int>::type = 0> friend bool operator>(const ScalarType lhs, const_reference rhs) noexcept { - return (basic_json(lhs) > rhs); + return basic_json(lhs) > rhs; } /*! @@ -18272,7 +18699,7 @@ class basic_json std::is_scalar::value, int>::type = 0> friend bool operator>=(const_reference lhs, const ScalarType rhs) noexcept { - return (lhs >= basic_json(rhs)); + return lhs >= basic_json(rhs); } /*! @@ -18283,7 +18710,7 @@ class basic_json std::is_scalar::value, int>::type = 0> friend bool operator>=(const ScalarType lhs, const_reference rhs) noexcept { - return (basic_json(lhs) >= rhs); + return basic_json(lhs) >= rhs; } /// @} @@ -18329,8 +18756,8 @@ class basic_json friend std::ostream& operator<<(std::ostream& o, const basic_json& j) { // read width member and use it as indentation parameter if nonzero - const bool pretty_print = (o.width() > 0); - const auto indentation = (pretty_print ? o.width() : 0); + const bool pretty_print = o.width() > 0; + const auto indentation = pretty_print ? o.width() : 0; // reset width to 0 for subsequent calls to this stream o.width(0); @@ -18386,9 +18813,6 @@ class basic_json @pre The container storage is contiguous. Violating this precondition yields undefined behavior. **This precondition is enforced with an assertion.** - @pre Each element of the container has a size of 1 byte. Violating this - precondition yields undefined behavior. **This precondition is enforced - with a static assertion.** @warning There is no way to enforce all preconditions at compile-time. If the function is called with a noncompliant container and with @@ -18402,7 +18826,9 @@ class basic_json @param[in] allow_exceptions whether to throw exceptions in case of a parse error (optional, true by default) - @return result of the deserialization + @return deserialized JSON value; in case of a parse error and + @a allow_exceptions set to `false`, the return value will be + value_t::discarded. @throw parse_error.101 if a parse error occurs; example: `""unexpected end of input; expected string literal""` @@ -18429,6 +18855,7 @@ class basic_json @since version 2.0.3 (contiguous containers) */ + JSON_NODISCARD static basic_json parse(detail::input_adapter&& i, const parser_callback_t cb = nullptr, const bool allow_exceptions = true) @@ -18466,9 +18893,6 @@ class basic_json @pre The container storage is contiguous. Violating this precondition yields undefined behavior. **This precondition is enforced with an assertion.** - @pre Each element of the container has a size of 1 byte. Violating this - precondition yields undefined behavior. **This precondition is enforced - with a static assertion.** @warning There is no way to enforce all preconditions at compile-time. If the function is called with a noncompliant container and with @@ -18505,13 +18929,9 @@ class basic_json const bool strict = true) { assert(sax); - switch (format) - { - case input_format_t::json: - return parser(std::move(i)).sax_parse(sax, strict); - default: - return detail::binary_reader(std::move(i)).sax_parse(format, sax, strict); - } + return format == input_format_t::json + ? parser(std::move(i)).sax_parse(sax, strict) + : detail::binary_reader(std::move(i)).sax_parse(format, sax, strict); } /*! @@ -18544,7 +18964,9 @@ class basic_json @param[in] allow_exceptions whether to throw exceptions in case of a parse error (optional, true by default) - @return result of the deserialization + @return deserialized JSON value; in case of a parse error and + @a allow_exceptions set to `false`, the return value will be + value_t::discarded. @throw parse_error.101 in case of an unexpected token @throw parse_error.102 if to_unicode fails or surrogate error @@ -19175,7 +19597,9 @@ class basic_json @param[in] allow_exceptions whether to throw exceptions in case of a parse error (optional, true by default) - @return deserialized JSON value + @return deserialized JSON value; in case of a parse error and + @a allow_exceptions set to `false`, the return value will be + value_t::discarded. @throw parse_error.110 if the given input ends prematurely or the end of file was not reached when @a strict was set to true @@ -19200,6 +19624,7 @@ class basic_json @a strict parameter since 3.0.0; added @a allow_exceptions parameter since 3.2.0 */ + JSON_NODISCARD static basic_json from_cbor(detail::input_adapter&& i, const bool strict = true, const bool allow_exceptions = true) @@ -19215,6 +19640,7 @@ class basic_json */ template::value, int> = 0> + JSON_NODISCARD static basic_json from_cbor(A1 && a1, A2 && a2, const bool strict = true, const bool allow_exceptions = true) @@ -19278,7 +19704,9 @@ class basic_json @param[in] allow_exceptions whether to throw exceptions in case of a parse error (optional, true by default) - @return deserialized JSON value + @return deserialized JSON value; in case of a parse error and + @a allow_exceptions set to `false`, the return value will be + value_t::discarded. @throw parse_error.110 if the given input ends prematurely or the end of file was not reached when @a strict was set to true @@ -19305,6 +19733,7 @@ class basic_json @a strict parameter since 3.0.0; added @a allow_exceptions parameter since 3.2.0 */ + JSON_NODISCARD static basic_json from_msgpack(detail::input_adapter&& i, const bool strict = true, const bool allow_exceptions = true) @@ -19320,6 +19749,7 @@ class basic_json */ template::value, int> = 0> + JSON_NODISCARD static basic_json from_msgpack(A1 && a1, A2 && a2, const bool strict = true, const bool allow_exceptions = true) @@ -19365,7 +19795,9 @@ class basic_json @param[in] allow_exceptions whether to throw exceptions in case of a parse error (optional, true by default) - @return deserialized JSON value + @return deserialized JSON value; in case of a parse error and + @a allow_exceptions set to `false`, the return value will be + value_t::discarded. @throw parse_error.110 if the given input ends prematurely or the end of file was not reached when @a strict was set to true @@ -19389,6 +19821,7 @@ class basic_json @since version 3.1.0; added @a allow_exceptions parameter since 3.2.0 */ + JSON_NODISCARD static basic_json from_ubjson(detail::input_adapter&& i, const bool strict = true, const bool allow_exceptions = true) @@ -19404,6 +19837,7 @@ class basic_json */ template::value, int> = 0> + JSON_NODISCARD static basic_json from_ubjson(A1 && a1, A2 && a2, const bool strict = true, const bool allow_exceptions = true) @@ -19454,7 +19888,9 @@ class basic_json @param[in] allow_exceptions whether to throw exceptions in case of a parse error (optional, true by default) - @return deserialized JSON value + @return deserialized JSON value; in case of a parse error and + @a allow_exceptions set to `false`, the return value will be + value_t::discarded. @throw parse_error.114 if an unsupported BSON record type is encountered @@ -19472,6 +19908,7 @@ class basic_json @sa @ref from_ubjson(detail::input_adapter&&, const bool, const bool) for the related UBJSON format */ + JSON_NODISCARD static basic_json from_bson(detail::input_adapter&& i, const bool strict = true, const bool allow_exceptions = true) @@ -19487,6 +19924,7 @@ class basic_json */ template::value, int> = 0> + JSON_NODISCARD static basic_json from_bson(A1 && a1, A2 && a2, const bool strict = true, const bool allow_exceptions = true) @@ -19822,63 +20260,59 @@ class basic_json const auto operation_add = [&result](json_pointer & ptr, basic_json val) { // adding to the root of the target document means replacing it - if (ptr.is_root()) + if (ptr.empty()) { result = val; + return; } - else + + // make sure the top element of the pointer exists + json_pointer top_pointer = ptr.top(); + if (top_pointer != ptr) { - // make sure the top element of the pointer exists - json_pointer top_pointer = ptr.top(); - if (top_pointer != ptr) + result.at(top_pointer); + } + + // get reference to parent of JSON pointer ptr + const auto last_path = ptr.back(); + ptr.pop_back(); + basic_json& parent = result[ptr]; + + switch (parent.m_type) + { + case value_t::null: + case value_t::object: { - result.at(top_pointer); + // use operator[] to add value + parent[last_path] = val; + break; } - // get reference to parent of JSON pointer ptr - const auto last_path = ptr.pop_back(); - basic_json& parent = result[ptr]; - - switch (parent.m_type) + case value_t::array: { - case value_t::null: - case value_t::object: + if (last_path == "-") { - // use operator[] to add value - parent[last_path] = val; - break; + // special case: append to back + parent.push_back(val); } - - case value_t::array: + else { - if (last_path == "-") + const auto idx = json_pointer::array_index(last_path); + if (JSON_UNLIKELY(static_cast(idx) > parent.size())) { - // special case: append to back - parent.push_back(val); + // avoid undefined behavior + JSON_THROW(out_of_range::create(401, "array index " + std::to_string(idx) + " is out of range")); } - else - { - const auto idx = json_pointer::array_index(last_path); - if (JSON_UNLIKELY(static_cast(idx) > parent.size())) - { - // avoid undefined behavior - JSON_THROW(out_of_range::create(401, "array index " + std::to_string(idx) + " is out of range")); - } - // default case: insert add offset - parent.insert(parent.begin() + static_cast(idx), val); - } - break; + // default case: insert add offset + parent.insert(parent.begin() + static_cast(idx), val); } - - // LCOV_EXCL_START - default: - { - // if there exists a parent it cannot be primitive - assert(false); - } - // LCOV_EXCL_STOP + break; } + + // if there exists a parent it cannot be primitive + default: // LCOV_EXCL_LINE + assert(false); // LCOV_EXCL_LINE } }; @@ -19886,7 +20320,8 @@ class basic_json const auto operation_remove = [&result](json_pointer & ptr) { // get reference to parent of JSON pointer ptr - const auto last_path = ptr.pop_back(); + const auto last_path = ptr.back(); + ptr.pop_back(); basic_json& parent = result.at(ptr); // remove child @@ -20033,7 +20468,7 @@ class basic_json break; } - case patch_operations::invalid: + default: { // op must be "add", "remove", "replace", "move", "copy", or // "test" @@ -20078,6 +20513,7 @@ class basic_json @since version 2.0.0 */ + JSON_NODISCARD static basic_json diff(const basic_json& source, const basic_json& target, const std::string& path = "") { @@ -20097,106 +20533,105 @@ class basic_json { {"op", "replace"}, {"path", path}, {"value", target} }); + return result; } - else + + switch (source.type()) { - switch (source.type()) + case value_t::array: { - case value_t::array: + // first pass: traverse common elements + std::size_t i = 0; + while (i < source.size() and i < target.size()) { - // first pass: traverse common elements - std::size_t i = 0; - while (i < source.size() and i < target.size()) - { - // recursive call to compare array values at index i - auto temp_diff = diff(source[i], target[i], path + "/" + std::to_string(i)); - result.insert(result.end(), temp_diff.begin(), temp_diff.end()); - ++i; - } - - // i now reached the end of at least one array - // in a second pass, traverse the remaining elements - - // remove my remaining elements - const auto end_index = static_cast(result.size()); - while (i < source.size()) - { - // add operations in reverse order to avoid invalid - // indices - result.insert(result.begin() + end_index, object( - { - {"op", "remove"}, - {"path", path + "/" + std::to_string(i)} - })); - ++i; - } - - // add other remaining elements - while (i < target.size()) - { - result.push_back( - { - {"op", "add"}, - {"path", path + "/" + std::to_string(i)}, - {"value", target[i]} - }); - ++i; - } - - break; + // recursive call to compare array values at index i + auto temp_diff = diff(source[i], target[i], path + "/" + std::to_string(i)); + result.insert(result.end(), temp_diff.begin(), temp_diff.end()); + ++i; } - case value_t::object: + // i now reached the end of at least one array + // in a second pass, traverse the remaining elements + + // remove my remaining elements + const auto end_index = static_cast(result.size()); + while (i < source.size()) { - // first pass: traverse this object's elements - for (auto it = source.cbegin(); it != source.cend(); ++it) + // add operations in reverse order to avoid invalid + // indices + result.insert(result.begin() + end_index, object( { - // escape the key name to be used in a JSON patch - const auto key = json_pointer::escape(it.key()); - - if (target.find(it.key()) != target.end()) - { - // recursive call to compare object values at key it - auto temp_diff = diff(it.value(), target[it.key()], path + "/" + key); - result.insert(result.end(), temp_diff.begin(), temp_diff.end()); - } - else - { - // found a key that is not in o -> remove it - result.push_back(object( - { - {"op", "remove"}, {"path", path + "/" + key} - })); - } - } - - // second pass: traverse other object's elements - for (auto it = target.cbegin(); it != target.cend(); ++it) - { - if (source.find(it.key()) == source.end()) - { - // found a key that is not in this -> add it - const auto key = json_pointer::escape(it.key()); - result.push_back( - { - {"op", "add"}, {"path", path + "/" + key}, - {"value", it.value()} - }); - } - } - - break; + {"op", "remove"}, + {"path", path + "/" + std::to_string(i)} + })); + ++i; } - default: + // add other remaining elements + while (i < target.size()) { - // both primitive type: replace value result.push_back( { - {"op", "replace"}, {"path", path}, {"value", target} + {"op", "add"}, + {"path", path + "/" + std::to_string(i)}, + {"value", target[i]} }); - break; + ++i; } + + break; + } + + case value_t::object: + { + // first pass: traverse this object's elements + for (auto it = source.cbegin(); it != source.cend(); ++it) + { + // escape the key name to be used in a JSON patch + const auto key = json_pointer::escape(it.key()); + + if (target.find(it.key()) != target.end()) + { + // recursive call to compare object values at key it + auto temp_diff = diff(it.value(), target[it.key()], path + "/" + key); + result.insert(result.end(), temp_diff.begin(), temp_diff.end()); + } + else + { + // found a key that is not in o -> remove it + result.push_back(object( + { + {"op", "remove"}, {"path", path + "/" + key} + })); + } + } + + // second pass: traverse other object's elements + for (auto it = target.cbegin(); it != target.cend(); ++it) + { + if (source.find(it.key()) == source.end()) + { + // found a key that is not in this -> add it + const auto key = json_pointer::escape(it.key()); + result.push_back( + { + {"op", "add"}, {"path", path + "/" + key}, + {"value", it.value()} + }); + } + } + + break; + } + + default: + { + // both primitive type: replace value + result.push_back( + { + {"op", "replace"}, {"path", path}, {"value", target} + }); + break; } } @@ -20397,10 +20832,11 @@ inline nlohmann::json::json_pointer operator "" _json_pointer(const char* s, std #undef JSON_LIKELY #undef JSON_UNLIKELY #undef JSON_DEPRECATED +#undef JSON_NODISCARD #undef JSON_HAS_CPP_14 #undef JSON_HAS_CPP_17 #undef NLOHMANN_BASIC_JSON_TPL_DECLARATION #undef NLOHMANN_BASIC_JSON_TPL -#endif +#endif // INCLUDE_NLOHMANN_JSON_HPP_ From fc46f8fc5ed34f789a73ef5fedf609bfb882ce53 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Fri, 19 Apr 2019 14:55:12 +0200 Subject: [PATCH 084/634] flake-registry: Use nixpkgs 19.03 --- flake-registry.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/flake-registry.json b/flake-registry.json index 422f77675..ae94f1df2 100644 --- a/flake-registry.json +++ b/flake-registry.json @@ -7,7 +7,7 @@ "uri": "github:tweag/nix/flakes" }, "nixpkgs": { - "uri": "github:edolstra/nixpkgs/flake" + "uri": "github:edolstra/nixpkgs/release-19.03" } }, "version": 1 From 7ec7bad2f8174c67ced4a0a3e88f61c1f88affd0 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Fri, 19 Apr 2019 15:52:40 +0200 Subject: [PATCH 085/634] Update flake.lock --- flake.lock | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/flake.lock b/flake.lock index 3154006c8..9848cb996 100644 --- a/flake.lock +++ b/flake.lock @@ -2,7 +2,7 @@ "nonFlakeRequires": {}, "requires": { "nixpkgs": { - "uri": "github:edolstra/nixpkgs/f10e8a02eb7fa2b4a070f30cf87f4efcc7f3186d" + "uri": "github:edolstra/nixpkgs/a4d896e89932e873c4117908d558db6210fa3b56" } }, "version": 1 From cc51e37ad09a1dac0a58c543ffe275ddc18819aa Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Fri, 19 Apr 2019 16:07:37 +0200 Subject: [PATCH 086/634] Use "." as the default installable This makes e.g. "nix build" do something more or less reasonable (namely, build the default package of the flake in the current directory). --- src/nix/build.cc | 1 - src/nix/command.hh | 8 ++++++-- src/nix/installables.cc | 4 ++++ 3 files changed, 10 insertions(+), 3 deletions(-) diff --git a/src/nix/build.cc b/src/nix/build.cc index d6a6a8071..07b6b2f02 100644 --- a/src/nix/build.cc +++ b/src/nix/build.cc @@ -3,7 +3,6 @@ #include "common-args.hh" #include "shared.hh" #include "store-api.hh" -#include "primops/flake.hh" using namespace nix; diff --git a/src/nix/command.hh b/src/nix/command.hh index a52fbb9ba..6d43261ac 100644 --- a/src/nix/command.hh +++ b/src/nix/command.hh @@ -108,6 +108,8 @@ struct InstallablesCommand : virtual Args, SourceExprCommand void prepare() override; + virtual bool useDefaultInstallables() { return true; } + private: std::vector _installables; @@ -119,14 +121,14 @@ struct InstallableCommand : virtual Args, SourceExprCommand InstallableCommand() { - expectArg("installable", &_installable); + expectArg("installable", &_installable, true); } void prepare() override; private: - std::string _installable; + std::string _installable{"."}; }; /* A command that operates on zero or more store paths. */ @@ -146,6 +148,8 @@ public: virtual void run(ref store, Paths storePaths) = 0; void run(ref store) override; + + bool useDefaultInstallables() override { return !all; } }; /* A command that operates on exactly one store path. */ diff --git a/src/nix/installables.cc b/src/nix/installables.cc index 9d87c70c3..c3ca87aa7 100644 --- a/src/nix/installables.cc +++ b/src/nix/installables.cc @@ -334,6 +334,10 @@ PathSet toDerivations(ref store, void InstallablesCommand::prepare() { + if (_installables.empty() && !file && useDefaultInstallables()) + // FIXME: commands like "nix install" should not have a + // default, probably. + _installables.push_back("."); installables = parseInstallables(getStore(), _installables); } From e9c42c06ef9a1f7af7c34162cc91a0dbeb746202 Mon Sep 17 00:00:00 2001 From: Nick Van den Broeck Date: Thu, 25 Apr 2019 10:49:56 +0200 Subject: [PATCH 087/634] Fixed lookupFlake bug --- src/libexpr/primops/flake.cc | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/src/libexpr/primops/flake.cc b/src/libexpr/primops/flake.cc index d898da1e9..f6744a1f7 100644 --- a/src/libexpr/primops/flake.cc +++ b/src/libexpr/primops/flake.cc @@ -146,15 +146,14 @@ std::shared_ptr getFlagRegistry() return std::make_shared(); } -// This always returns a vector with globalReg, userReg, localReg, flakeReg. +// This always returns a vector with flakeRef, userReg, globalReg. // If one of them doesn't exist, the registry is left empty but does exist. const Registries EvalState::getFlakeRegistries() { Registries registries; - registries.push_back(getGlobalRegistry()); // TODO (Nick): Doesn't this break immutability? - registries.push_back(getUserRegistry()); - registries.push_back(std::make_shared()); // local registries.push_back(getFlagRegistry()); + registries.push_back(getUserRegistry()); + registries.push_back(getGlobalRegistry()); return registries; } @@ -169,8 +168,10 @@ static FlakeRef lookupFlake(EvalState & state, const FlakeRef & flakeRef, const if (i != registry->entries.end()) { auto newRef = i->second; if (std::get_if(&flakeRef.data)) { - if (flakeRef.ref) newRef.ref = flakeRef.ref; - if (flakeRef.rev) newRef.rev = flakeRef.rev; + if (flakeRef.ref || flakeRef.rev) { + newRef.ref = flakeRef.ref; + newRef.rev = flakeRef.rev; + } } std::string errorMsg = "found cycle in flake registries: "; for (FlakeRef oldRef : pastSearches) { From d867e1804a39174f21d50095d9e6bc4a87190e16 Mon Sep 17 00:00:00 2001 From: Nick Van den Broeck Date: Wed, 17 Apr 2019 14:03:04 +0200 Subject: [PATCH 088/634] Fix printing FlakeList --- src/nix/flake.cc | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/src/nix/flake.cc b/src/nix/flake.cc index 6459df019..a25493f79 100644 --- a/src/nix/flake.cc +++ b/src/nix/flake.cc @@ -30,9 +30,14 @@ struct CmdFlakeList : StoreCommand, MixEvalArgs stopProgressBar(); - for (auto & registry : registries) - for (auto & entry : registry->entries) - std::cout << entry.first << " " << entry.second << "\n"; + for (auto & entry : registries[0]->entries) + std::cout << entry.first.to_string() << " flags " << entry.second.to_string() << "\n"; + + for (auto & entry : registries[1]->entries) + std::cout << entry.first.to_string() << " user " << entry.second.to_string() << "\n"; + + for (auto & entry : registries[2]->entries) + std::cout << entry.first.to_string() << " global " << entry.second.to_string() << "\n"; } }; @@ -146,7 +151,7 @@ struct CmdFlakeInfo : FlakeCommand, MixJSON, MixEvalArgs, StoreCommand void run(nix::ref store) override { auto evalState = std::make_shared(searchPath, store); - nix::Flake flake = nix::getFlake(*evalState, FlakeRef(flakeUri), true); + Flake flake = getFlake(*evalState, FlakeRef(flakeUri), true); printFlakeInfo(flake, json); } }; From 35d1c95f7f6fc744a829bb25b75c15cc2da31e99 Mon Sep 17 00:00:00 2001 From: Nick Van den Broeck Date: Tue, 30 Apr 2019 11:03:31 +0200 Subject: [PATCH 089/634] Fix flag registry order --- src/libexpr/primops/flake.cc | 2 +- src/libexpr/primops/flake.hh | 4 ++++ src/nix/flake.cc | 6 +++--- 3 files changed, 8 insertions(+), 4 deletions(-) diff --git a/src/libexpr/primops/flake.cc b/src/libexpr/primops/flake.cc index f6744a1f7..84e878f75 100644 --- a/src/libexpr/primops/flake.cc +++ b/src/libexpr/primops/flake.cc @@ -146,7 +146,7 @@ std::shared_ptr getFlagRegistry() return std::make_shared(); } -// This always returns a vector with flakeRef, userReg, globalReg. +// This always returns a vector with flakeReg, userReg, globalReg. // If one of them doesn't exist, the registry is left empty but does exist. const Registries EvalState::getFlakeRegistries() { diff --git a/src/libexpr/primops/flake.hh b/src/libexpr/primops/flake.hh index 4e0d3b646..8e9af5843 100644 --- a/src/libexpr/primops/flake.hh +++ b/src/libexpr/primops/flake.hh @@ -5,6 +5,10 @@ namespace nix { +static const size_t FLAG_REGISTRY = 0; +static const size_t USER_REGISTRY = 1; +static const size_t GLOBAL_REGISTRY = 2; + struct Value; class EvalState; diff --git a/src/nix/flake.cc b/src/nix/flake.cc index a25493f79..a80338fd3 100644 --- a/src/nix/flake.cc +++ b/src/nix/flake.cc @@ -30,13 +30,13 @@ struct CmdFlakeList : StoreCommand, MixEvalArgs stopProgressBar(); - for (auto & entry : registries[0]->entries) + for (auto & entry : registries[FLAG_REGISTRY]->entries) std::cout << entry.first.to_string() << " flags " << entry.second.to_string() << "\n"; - for (auto & entry : registries[1]->entries) + for (auto & entry : registries[USER_REGISTRY]->entries) std::cout << entry.first.to_string() << " user " << entry.second.to_string() << "\n"; - for (auto & entry : registries[2]->entries) + for (auto & entry : registries[GLOBAL_REGISTRY]->entries) std::cout << entry.first.to_string() << " global " << entry.second.to_string() << "\n"; } }; From 24b35bf9e7219feba9399466c41801f0ace3ef74 Mon Sep 17 00:00:00 2001 From: Nick Van den Broeck Date: Thu, 21 Mar 2019 09:30:16 +0100 Subject: [PATCH 090/634] Fixed issue #13 --- src/libexpr/common-eval-args.cc | 9 +++++++++ src/libexpr/common-eval-args.hh | 2 ++ src/libexpr/eval.hh | 7 +++++++ src/libexpr/primops/flake.cc | 22 +++++++++++----------- src/nix/build.cc | 2 +- src/nix/flake.cc | 1 + 6 files changed, 31 insertions(+), 12 deletions(-) diff --git a/src/libexpr/common-eval-args.cc b/src/libexpr/common-eval-args.cc index 3e5400052..8e94d358e 100644 --- a/src/libexpr/common-eval-args.cc +++ b/src/libexpr/common-eval-args.cc @@ -33,6 +33,15 @@ MixEvalArgs::MixEvalArgs() .handler([&](std::vector ss) { evalSettings.pureEval = false; }); + + mkFlag() + .longName("override-flake") + .labels({"original-ref", "resolved-ref"}) + .description("override a flake registry value") + .arity(2) + .handler([&](std::vector ss) { + registryOverrides.push_back(std::make_pair(ss[0], ss[1])); + }); } Bindings * MixEvalArgs::getAutoArgs(EvalState & state) diff --git a/src/libexpr/common-eval-args.hh b/src/libexpr/common-eval-args.hh index be7fda783..54fb731de 100644 --- a/src/libexpr/common-eval-args.hh +++ b/src/libexpr/common-eval-args.hh @@ -16,6 +16,8 @@ struct MixEvalArgs : virtual Args Strings searchPath; + std::vector> registryOverrides; + private: std::map autoArgs; diff --git a/src/libexpr/eval.hh b/src/libexpr/eval.hh index 3f537da89..44988cd70 100644 --- a/src/libexpr/eval.hh +++ b/src/libexpr/eval.hh @@ -63,6 +63,8 @@ typedef std::list SearchPath; /* Initialise the Boehm GC, if applicable. */ void initGC(); +typedef std::vector> RegistryOverrides; + class EvalState { @@ -89,6 +91,9 @@ public: const ref store; + RegistryOverrides registryOverrides; + + private: SrcToStore srcToStore; @@ -211,6 +216,8 @@ public: path. Nothing is copied to the store. */ Path coerceToPath(const Pos & pos, Value & v, PathSet & context); + void addRegistryOverrides(RegistryOverrides overrides) { registryOverrides = overrides; } + public: /* The base environment, containing the builtin functions and diff --git a/src/libexpr/primops/flake.cc b/src/libexpr/primops/flake.cc index f6744a1f7..5af0c49c7 100644 --- a/src/libexpr/primops/flake.cc +++ b/src/libexpr/primops/flake.cc @@ -140,10 +140,13 @@ std::shared_ptr getUserRegistry() return readRegistry(getUserRegistryPath()); } -std::shared_ptr getFlagRegistry() +std::shared_ptr getFlagRegistry(RegistryOverrides registryOverrides) { - // TODO (Nick): Implement this. - return std::make_shared(); + auto flagRegistry = std::make_shared(); + for (auto const & x : registryOverrides) { + flagRegistry->entries.insert_or_assign(FlakeRef(x.first), FlakeRef(x.second)); + } + return flagRegistry; } // This always returns a vector with flakeRef, userReg, globalReg. @@ -151,7 +154,7 @@ std::shared_ptr getFlagRegistry() const Registries EvalState::getFlakeRegistries() { Registries registries; - registries.push_back(getFlagRegistry()); + registries.push_back(getFlagRegistry(registryOverrides)); registries.push_back(getUserRegistry()); registries.push_back(getGlobalRegistry()); return registries; @@ -357,9 +360,8 @@ NonFlake getNonFlake(EvalState & state, const FlakeRef & flakeRef, FlakeAlias al ResolvedFlake resolveFlake(EvalState & state, const FlakeRef & topRef, RegistryAccess registryAccess, bool isTopFlake) { - Flake flake = getFlake(state, topRef, - registryAccess == AllowRegistry || (registryAccess == AllowRegistryAtTop && isTopFlake)); - + bool allowRegistries = registryAccess == AllowRegistry || (registryAccess == AllowRegistryAtTop && isTopFlake); + Flake flake = getFlake(state, topRef, allowRegistries); LockFile lockFile; if (isTopFlake) @@ -405,10 +407,8 @@ static LockFile makeLockFile(EvalState & evalState, FlakeRef & flakeRef) void updateLockFile(EvalState & state, const Path & path) { - // FIXME: don't copy 'path' to the store (especially since we - // dirty it immediately afterwards). - - FlakeRef flakeRef = FlakeRef(path); // FIXME: ugly + // FIXME: We are writing the lockfile to the store here! Very bad practice! + FlakeRef flakeRef = FlakeRef(path); auto lockFile = makeLockFile(state, flakeRef); writeLockFile(lockFile, path + "/flake.lock"); diff --git a/src/nix/build.cc b/src/nix/build.cc index 07b6b2f02..c08ec0e62 100644 --- a/src/nix/build.cc +++ b/src/nix/build.cc @@ -54,7 +54,7 @@ struct CmdBuild : MixDryRun, InstallablesCommand auto buildables = build(store, dryRun ? DryRun : Build, installables); auto evalState = std::make_shared(searchPath, store); - + evalState->addRegistryOverrides(registryOverrides); if (dryRun) return; for (size_t i = 0; i < buildables.size(); ++i) { diff --git a/src/nix/flake.cc b/src/nix/flake.cc index a25493f79..00c1c384c 100644 --- a/src/nix/flake.cc +++ b/src/nix/flake.cc @@ -92,6 +92,7 @@ struct CmdFlakeDeps : FlakeCommand, MixJSON, StoreCommand, MixEvalArgs void run(nix::ref store) override { auto evalState = std::make_shared(searchPath, store); + evalState->addRegistryOverrides(registryOverrides); FlakeRef flakeRef(flakeUri); From eba85e23670c57448aaf29d2fedec356e4cd2fe7 Mon Sep 17 00:00:00 2001 From: Nick Van den Broeck Date: Wed, 1 May 2019 10:34:23 +0200 Subject: [PATCH 091/634] WIP: still need to adapt flakeref parsing --- src/libexpr/primops/flake.cc | 22 ++++++++++++---------- src/libexpr/primops/flakeref.cc | 1 + src/libexpr/primops/flakeref.hh | 11 ++++++----- 3 files changed, 19 insertions(+), 15 deletions(-) diff --git a/src/libexpr/primops/flake.cc b/src/libexpr/primops/flake.cc index 88a0293e3..e806ef6c7 100644 --- a/src/libexpr/primops/flake.cc +++ b/src/libexpr/primops/flake.cc @@ -194,11 +194,8 @@ static FlakeRef lookupFlake(EvalState & state, const FlakeRef & flakeRef, const return flakeRef; } -static FlakeSourceInfo fetchFlake(EvalState & state, const FlakeRef flakeRef, bool impureIsAllowed = false) +static FlakeSourceInfo fetchFlake(EvalState & state, const FlakeRef fRef, bool impureIsAllowed = false) { - FlakeRef fRef = lookupFlake(state, flakeRef, - impureIsAllowed ? state.getFlakeRegistries() : std::vector>()); - if (evalSettings.pureEval && !impureIsAllowed && !fRef.isImmutable()) throw Error("requested to fetch mutable flake '%s' in pure mode", fRef); @@ -266,26 +263,31 @@ static FlakeSourceInfo fetchFlake(EvalState & state, const FlakeRef flakeRef, bo // This will return the flake which corresponds to a given FlakeRef. The lookupFlake is done within this function. Flake getFlake(EvalState & state, const FlakeRef & flakeRef, bool impureIsAllowed = false) { - FlakeSourceInfo sourceInfo = fetchFlake(state, flakeRef, impureIsAllowed); + FlakeRef resolvedRef = lookupFlake(state, flakeRef, + impureIsAllowed ? state.getFlakeRegistries() : std::vector>()); + + FlakeSourceInfo sourceInfo = fetchFlake(state, resolvedRef, impureIsAllowed); debug("got flake source '%s' with revision %s", sourceInfo.storePath, sourceInfo.rev.value_or(Hash(htSHA1)).to_string(Base16, false)); + resolvedRef = sourceInfo.flakeRef; // `resolvedRef` is now immutable + state.store->assertStorePath(sourceInfo.storePath); if (state.allowedPaths) state.allowedPaths->insert(sourceInfo.storePath); - Flake flake(flakeRef, std::move(sourceInfo)); - if (std::get_if(&flakeRef.data)) { + Flake flake(resolvedRef, std::move(sourceInfo)); + if (std::get_if(&resolvedRef.data)) { // FIXME: ehm? if (flake.sourceInfo.rev) - flake.ref = FlakeRef(flakeRef.baseRef().to_string() + flake.ref = FlakeRef(resolvedRef.baseRef().to_string() + "/" + flake.sourceInfo.rev->to_string(Base16, false)); } - Path flakeFile = sourceInfo.storePath + "/flake.nix"; + Path flakeFile = sourceInfo.storePath + resolvedRef.subdir + "/flake.nix"; if (!pathExists(flakeFile)) - throw Error("source tree referenced by '%s' does not contain a 'flake.nix' file", flakeRef); + throw Error("source tree referenced by '%s' does not contain a 'flake.nix' file", resolvedRef); Value vInfo; state.evalFile(flakeFile, vInfo); // FIXME: symlink attack diff --git a/src/libexpr/primops/flakeref.cc b/src/libexpr/primops/flakeref.cc index b91bbee2a..022535515 100644 --- a/src/libexpr/primops/flakeref.cc +++ b/src/libexpr/primops/flakeref.cc @@ -139,6 +139,7 @@ std::string FlakeRef::to_string() const string += (ref ? "/" + *ref : "") + (rev ? "/" + rev->to_string(Base16, false) : ""); + if (subdir != "") string += "?dir=" + subdir; return string; } diff --git a/src/libexpr/primops/flakeref.hh b/src/libexpr/primops/flakeref.hh index 51fdc3b70..299094634 100644 --- a/src/libexpr/primops/flakeref.hh +++ b/src/libexpr/primops/flakeref.hh @@ -69,7 +69,7 @@ namespace nix { https://example.org/my/repo.git https://example.org/my/repo.git?ref=release-1.2.3 https://example.org/my/repo.git?rev=e72daba8250068216d79d2aeef40d4d95aff6666 - git://github.com/edolstra/dwarffs.git\?ref=flake\&rev=2efca4bc9da70fb001b26c3dc858c6397d3c4817 + git://github.com/edolstra/dwarffs.git?ref=flake&rev=2efca4bc9da70fb001b26c3dc858c6397d3c4817 * /path.git(\?attr(&attr)*)? @@ -144,17 +144,18 @@ struct FlakeRef std::optional ref; std::optional rev; + Path subdir = ""; // This is a relative path pointing at the flake.nix file's directory, relative to the git root. bool operator<(const FlakeRef & flakeRef) const { - return std::make_tuple(this->data, ref, rev) < - std::make_tuple(flakeRef.data, flakeRef.ref, flakeRef.rev); + return std::make_tuple(data, ref, rev, subdir) < + std::make_tuple(flakeRef.data, flakeRef.ref, flakeRef.rev, subdir); } bool operator==(const FlakeRef & flakeRef) const { - return std::make_tuple(this->data, ref, rev) == - std::make_tuple(flakeRef.data, flakeRef.ref, flakeRef.rev); + return std::make_tuple(data, ref, rev, subdir) == + std::make_tuple(flakeRef.data, flakeRef.ref, flakeRef.rev, flakeRef.subdir); } // Parse a flake URI. From 9b3069a88ceff3646d254fa403f41be889e6423d Mon Sep 17 00:00:00 2001 From: Nick Van den Broeck Date: Tue, 30 Apr 2019 12:47:15 +0200 Subject: [PATCH 092/634] Fuzzymatching Fixed issue #61 --- src/libexpr/primops/flake.cc | 42 ++++++++++++++++++++++-------------- 1 file changed, 26 insertions(+), 16 deletions(-) diff --git a/src/libexpr/primops/flake.cc b/src/libexpr/primops/flake.cc index 88a0293e3..cc93eee36 100644 --- a/src/libexpr/primops/flake.cc +++ b/src/libexpr/primops/flake.cc @@ -161,7 +161,23 @@ const Registries EvalState::getFlakeRegistries() } static FlakeRef lookupFlake(EvalState & state, const FlakeRef & flakeRef, const Registries & registries, - std::vector pastSearches = {}) + std::vector pastSearches = {}); + +FlakeRef updateFlakeRef(EvalState & state, const FlakeRef & newRef, const Registries & registries, std::vector pastSearches) +{ + std::string errorMsg = "found cycle in flake registries: "; + for (FlakeRef oldRef : pastSearches) { + errorMsg += oldRef.to_string(); + if (oldRef == newRef) + throw Error(errorMsg); + errorMsg += " - "; + } + pastSearches.push_back(newRef); + return lookupFlake(state, newRef, registries, pastSearches); +} + +static FlakeRef lookupFlake(EvalState & state, const FlakeRef & flakeRef, const Registries & registries, + std::vector pastSearches) { if (registries.empty() && !flakeRef.isDirect()) throw Error("indirect flake reference '%s' is not allowed", flakeRef); @@ -170,21 +186,15 @@ static FlakeRef lookupFlake(EvalState & state, const FlakeRef & flakeRef, const auto i = registry->entries.find(flakeRef); if (i != registry->entries.end()) { auto newRef = i->second; - if (std::get_if(&flakeRef.data)) { - if (flakeRef.ref || flakeRef.rev) { - newRef.ref = flakeRef.ref; - newRef.rev = flakeRef.rev; - } - } - std::string errorMsg = "found cycle in flake registries: "; - for (FlakeRef oldRef : pastSearches) { - errorMsg += oldRef.to_string(); - if (oldRef == newRef) - throw Error(errorMsg); - errorMsg += " - "; - } - pastSearches.push_back(newRef); - return lookupFlake(state, newRef, registries, pastSearches); + return updateFlakeRef(state, newRef, registries, pastSearches); + } + + auto j = registry->entries.find(flakeRef.baseRef()); + if (j != registry->entries.end()) { + auto newRef = j->second; + newRef.ref = flakeRef.ref; + newRef.rev = flakeRef.rev; + return updateFlakeRef(state, newRef, registries, pastSearches); } } From 00db8d4549b1e87db45395e157dec3f729bdc071 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Wed, 1 May 2019 14:24:22 +0200 Subject: [PATCH 093/634] Support 'dir' parameters in https and ssh flake URIs --- src/libexpr/primops/flakeref.cc | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/src/libexpr/primops/flakeref.cc b/src/libexpr/primops/flakeref.cc index 022535515..66eab4db6 100644 --- a/src/libexpr/primops/flakeref.cc +++ b/src/libexpr/primops/flakeref.cc @@ -30,7 +30,9 @@ const static std::string schemeRegex = "(?:http|https|ssh|git|file)"; const static std::string authorityRegex = "[a-zA-Z0-9._~-]*"; const static std::string segmentRegex = "[a-zA-Z0-9._~-]+"; const static std::string pathRegex = "/?" + segmentRegex + "(?:/" + segmentRegex + ")*"; -const static std::string paramRegex = "[a-z]+=[a-zA-Z0-9._-]*"; +// FIXME: support escaping in query string. +// Note: '/' is not a valid query parameter, but so what... +const static std::string paramRegex = "[a-z]+=[/a-zA-Z0-9._-]*"; FlakeRef::FlakeRef(const std::string & uri, bool allowRelative) { @@ -97,6 +99,9 @@ FlakeRef::FlakeRef(const std::string & uri, bool allowRelative) if (!std::regex_match(value, refRegex2)) throw Error("invalid Git ref '%s'", value); ref = value; + } else if (name == "dir") { + // FIXME: validate value; should not contain relative paths + subdir = value; } else // FIXME: should probably pass through unknown parameters throw Error("invalid Git flake reference parameter '%s', in '%s'", name, uri); From 43408d3cd6e254c1c69eb9cf9de4de042b986ab6 Mon Sep 17 00:00:00 2001 From: Nick Van den Broeck Date: Wed, 1 May 2019 16:24:33 +0200 Subject: [PATCH 094/634] flake.lock now uses flakeRef.subdir --- src/libexpr/primops/flake.cc | 21 ++++++++++++--------- src/libexpr/primops/flake.hh | 2 +- 2 files changed, 13 insertions(+), 10 deletions(-) diff --git a/src/libexpr/primops/flake.cc b/src/libexpr/primops/flake.cc index e806ef6c7..41b3f6d28 100644 --- a/src/libexpr/primops/flake.cc +++ b/src/libexpr/primops/flake.cc @@ -285,7 +285,7 @@ Flake getFlake(EvalState & state, const FlakeRef & flakeRef, bool impureIsAllowe + "/" + flake.sourceInfo.rev->to_string(Base16, false)); } - Path flakeFile = sourceInfo.storePath + resolvedRef.subdir + "/flake.nix"; + Path flakeFile = sourceInfo.storePath + "/" + resolvedRef.subdir + "/flake.nix"; if (!pathExists(flakeFile)) throw Error("source tree referenced by '%s' does not contain a 'flake.nix' file", resolvedRef); @@ -367,7 +367,7 @@ ResolvedFlake resolveFlake(EvalState & state, const FlakeRef & topRef, LockFile lockFile; if (isTopFlake) - lockFile = readLockFile(flake.sourceInfo.storePath + "/flake.lock"); // FIXME: symlink attack + lockFile = readLockFile(flake.sourceInfo.storePath + "/" + flake.resolvedRef.subdir + "/flake.lock"); // FIXME: symlink attack ResolvedFlake deps(flake); @@ -407,16 +407,19 @@ static LockFile makeLockFile(EvalState & evalState, FlakeRef & flakeRef) return lockFile; } -void updateLockFile(EvalState & state, const Path & path) +void updateLockFile(EvalState & state, const FlakeUri & flakeUri) { // FIXME: We are writing the lockfile to the store here! Very bad practice! - FlakeRef flakeRef = FlakeRef(path); - auto lockFile = makeLockFile(state, flakeRef); - writeLockFile(lockFile, path + "/flake.lock"); + FlakeRef flakeRef = FlakeRef(flakeUri); + if (auto refData = std::get_if(flakeRef)) { + auto lockFile = makeLockFile(state, flakeRef); + writeLockFile(lockFile, refData->path + "/" + flakeRef.subdir + "/flake.lock"); - // Hack: Make sure that flake.lock is visible to Git. Otherwise, - // exportGit will fail to copy it to the Nix store. - runProgram("git", true, { "-C", path, "add", "flake.lock" }); + // Hack: Make sure that flake.lock is visible to Git. Otherwise, + // exportGit will fail to copy it to the Nix store. + runProgram("git", true, { "-C", refData->path, "add", flakeRef.subDir + "/flake.lock" }); + } else + throw Error("flakeUri %s can't be updated because it is not a path", flakeUri); } void callFlake(EvalState & state, const ResolvedFlake & resFlake, Value & v) diff --git a/src/libexpr/primops/flake.hh b/src/libexpr/primops/flake.hh index 8e9af5843..6329c36ec 100644 --- a/src/libexpr/primops/flake.hh +++ b/src/libexpr/primops/flake.hh @@ -90,7 +90,7 @@ struct ResolvedFlake ResolvedFlake resolveFlake(EvalState &, const FlakeRef &, RegistryAccess registryAccess, bool isTopFlake = true); -void updateLockFile(EvalState &, const Path & path); +void updateLockFile(EvalState &, const FlakeUri &); void gitCloneFlake (std::string flakeUri, EvalState &, Registries, Path); } From ab9e47284a46f1a933d84c9c66f8fb4ba6c4ba34 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Wed, 1 May 2019 18:07:36 +0200 Subject: [PATCH 095/634] Improve error message --- src/libexpr/primops/flake.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/libexpr/primops/flake.cc b/src/libexpr/primops/flake.cc index 41b3f6d28..293e5ad0b 100644 --- a/src/libexpr/primops/flake.cc +++ b/src/libexpr/primops/flake.cc @@ -287,7 +287,7 @@ Flake getFlake(EvalState & state, const FlakeRef & flakeRef, bool impureIsAllowe Path flakeFile = sourceInfo.storePath + "/" + resolvedRef.subdir + "/flake.nix"; if (!pathExists(flakeFile)) - throw Error("source tree referenced by '%s' does not contain a 'flake.nix' file", resolvedRef); + throw Error("source tree referenced by '%s' does not contain a '%s/flake.nix' file", resolvedRef, resolvedRef.subdir); Value vInfo; state.evalFile(flakeFile, vInfo); // FIXME: symlink attack From fa88f7152070d4c886b512de00691da709bc7229 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Wed, 1 May 2019 20:23:39 +0200 Subject: [PATCH 096/634] Validate 'dir=' parameters We reject any path element starting with a '.' (mostly to reject '.' and '..'). --- src/libexpr/primops/flakeref.cc | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) diff --git a/src/libexpr/primops/flakeref.cc b/src/libexpr/primops/flakeref.cc index 66eab4db6..56ba58d09 100644 --- a/src/libexpr/primops/flakeref.cc +++ b/src/libexpr/primops/flakeref.cc @@ -34,6 +34,11 @@ const static std::string pathRegex = "/?" + segmentRegex + "(?:/" + segmentRegex // Note: '/' is not a valid query parameter, but so what... const static std::string paramRegex = "[a-z]+=[/a-zA-Z0-9._-]*"; +// 'dir' path elements cannot start with a '.'. We also reject +// potentially dangerous characters like ';'. +const static std::string subDirElemRegex = "(?:[a-zA-Z0-9_-]+[a-zA-Z0-9._-]*)"; +const static std::string subDirRegex = subDirElemRegex + "(?:/" + subDirElemRegex + ")*"; + FlakeRef::FlakeRef(const std::string & uri, bool allowRelative) { // FIXME: could combine this into one regex. @@ -55,6 +60,8 @@ FlakeRef::FlakeRef(const std::string & uri, bool allowRelative) static std::regex refRegex2(refRegex, std::regex::ECMAScript); + static std::regex subDirRegex2(subDirRegex, std::regex::ECMAScript); + std::cmatch match; if (std::regex_match(uri.c_str(), match, flakeRegex)) { IsAlias d; @@ -100,7 +107,8 @@ FlakeRef::FlakeRef(const std::string & uri, bool allowRelative) throw Error("invalid Git ref '%s'", value); ref = value; } else if (name == "dir") { - // FIXME: validate value; should not contain relative paths + if (!std::regex_match(value, subDirRegex2)) + throw Error("flake '%s' has invalid subdirectory '%s'", uri, value); subdir = value; } else // FIXME: should probably pass through unknown parameters @@ -124,6 +132,7 @@ FlakeRef::FlakeRef(const std::string & uri, bool allowRelative) std::string FlakeRef::to_string() const { std::string string; + if (auto refData = std::get_if(&data)) string = refData->alias; @@ -142,9 +151,12 @@ std::string FlakeRef::to_string() const else abort(); + // FIXME: need to use ?rev etc. for IsGit URIs. string += (ref ? "/" + *ref : "") + (rev ? "/" + rev->to_string(Base16, false) : ""); + if (subdir != "") string += "?dir=" + subdir; + return string; } From a37436d7929f37fb390837419d166a81559abb3e Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Wed, 1 May 2019 20:38:41 +0200 Subject: [PATCH 097/634] Accept empty directories --- src/libexpr/primops/flake.cc | 5 ++++- src/libexpr/primops/flakeref.cc | 2 +- 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/src/libexpr/primops/flake.cc b/src/libexpr/primops/flake.cc index 293e5ad0b..de692f167 100644 --- a/src/libexpr/primops/flake.cc +++ b/src/libexpr/primops/flake.cc @@ -285,7 +285,10 @@ Flake getFlake(EvalState & state, const FlakeRef & flakeRef, bool impureIsAllowe + "/" + flake.sourceInfo.rev->to_string(Base16, false)); } - Path flakeFile = sourceInfo.storePath + "/" + resolvedRef.subdir + "/flake.nix"; + // Guard against symlink attacks. + auto flakeFile = canonPath(sourceInfo.storePath + "/" + resolvedRef.subdir + "/flake.nix"); + if (!isInDir(flakeFile, sourceInfo.storePath)) + throw Error("flake file '%s' escapes from '%s'", resolvedRef, sourceInfo.storePath); if (!pathExists(flakeFile)) throw Error("source tree referenced by '%s' does not contain a '%s/flake.nix' file", resolvedRef, resolvedRef.subdir); diff --git a/src/libexpr/primops/flakeref.cc b/src/libexpr/primops/flakeref.cc index 56ba58d09..b7a20a170 100644 --- a/src/libexpr/primops/flakeref.cc +++ b/src/libexpr/primops/flakeref.cc @@ -107,7 +107,7 @@ FlakeRef::FlakeRef(const std::string & uri, bool allowRelative) throw Error("invalid Git ref '%s'", value); ref = value; } else if (name == "dir") { - if (!std::regex_match(value, subDirRegex2)) + if (value != "" && !std::regex_match(value, subDirRegex2)) throw Error("flake '%s' has invalid subdirectory '%s'", uri, value); subdir = value; } else From 5d6e8c008ba8716f16ddfad954663d9e732f8556 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Wed, 1 May 2019 20:43:16 +0200 Subject: [PATCH 098/634] Allow 'dir' parameter in github: URIs E.g. 'github:edolstra/dwarffs/flake?dir=foo/bar'. --- src/libexpr/primops/flakeref.cc | 19 +++++++++++++++++-- 1 file changed, 17 insertions(+), 2 deletions(-) diff --git a/src/libexpr/primops/flakeref.cc b/src/libexpr/primops/flakeref.cc index b7a20a170..141d61c0d 100644 --- a/src/libexpr/primops/flakeref.cc +++ b/src/libexpr/primops/flakeref.cc @@ -33,12 +33,14 @@ const static std::string pathRegex = "/?" + segmentRegex + "(?:/" + segmentRegex // FIXME: support escaping in query string. // Note: '/' is not a valid query parameter, but so what... const static std::string paramRegex = "[a-z]+=[/a-zA-Z0-9._-]*"; +const static std::string paramsRegex = "(?:[?](" + paramRegex + "(?:&" + paramRegex + ")*))"; // 'dir' path elements cannot start with a '.'. We also reject // potentially dangerous characters like ';'. const static std::string subDirElemRegex = "(?:[a-zA-Z0-9_-]+[a-zA-Z0-9._-]*)"; const static std::string subDirRegex = subDirElemRegex + "(?:/" + subDirElemRegex + ")*"; + FlakeRef::FlakeRef(const std::string & uri, bool allowRelative) { // FIXME: could combine this into one regex. @@ -48,14 +50,15 @@ FlakeRef::FlakeRef(const std::string & uri, bool allowRelative) std::regex::ECMAScript); static std::regex githubRegex( - "github:(" + ownerRegex + ")/(" + repoRegex + ")(?:/" + revOrRefRegex + ")?", + "github:(" + ownerRegex + ")/(" + repoRegex + ")(?:/" + revOrRefRegex + ")?" + + paramsRegex + "?", std::regex::ECMAScript); static std::regex uriRegex( "((" + schemeRegex + "):" + "(?://(" + authorityRegex + "))?" + "(" + pathRegex + "))" + - "(?:[?](" + paramRegex + "(?:&" + paramRegex + ")*))?", + paramsRegex + "?", std::regex::ECMAScript); static std::regex refRegex2(refRegex, std::regex::ECMAScript); @@ -85,6 +88,18 @@ FlakeRef::FlakeRef(const std::string & uri, bool allowRelative) else if (match[4].matched) { ref = match[4]; } + for (auto & param : tokenizeString(match[5], "&")) { + auto n = param.find('='); + assert(n != param.npos); + std::string name(param, 0, n); + std::string value(param, n + 1); + if (name == "dir") { + if (value != "" && !std::regex_match(value, subDirRegex2)) + throw Error("flake '%s' has invalid subdirectory '%s'", uri, value); + subdir = value; + } else + throw Error("invalid Git flake reference parameter '%s', in '%s'", name, uri); + } data = d; } From e0d4aa75fc8957347215f3c52366f47f1e3d6d6e Mon Sep 17 00:00:00 2001 From: Nick Van den Broeck Date: Thu, 2 May 2019 08:40:00 +0200 Subject: [PATCH 099/634] Fixed compile errors --- src/libexpr/primops/flake.cc | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/libexpr/primops/flake.cc b/src/libexpr/primops/flake.cc index de692f167..0d9ef36ba 100644 --- a/src/libexpr/primops/flake.cc +++ b/src/libexpr/primops/flake.cc @@ -370,7 +370,7 @@ ResolvedFlake resolveFlake(EvalState & state, const FlakeRef & topRef, LockFile lockFile; if (isTopFlake) - lockFile = readLockFile(flake.sourceInfo.storePath + "/" + flake.resolvedRef.subdir + "/flake.lock"); // FIXME: symlink attack + lockFile = readLockFile(flake.sourceInfo.storePath + "/" + flake.ref.subdir + "/flake.lock"); // FIXME: symlink attack ResolvedFlake deps(flake); @@ -414,13 +414,13 @@ void updateLockFile(EvalState & state, const FlakeUri & flakeUri) { // FIXME: We are writing the lockfile to the store here! Very bad practice! FlakeRef flakeRef = FlakeRef(flakeUri); - if (auto refData = std::get_if(flakeRef)) { + if (auto refData = std::get_if(&flakeRef.data)) { auto lockFile = makeLockFile(state, flakeRef); writeLockFile(lockFile, refData->path + "/" + flakeRef.subdir + "/flake.lock"); // Hack: Make sure that flake.lock is visible to Git. Otherwise, // exportGit will fail to copy it to the Nix store. - runProgram("git", true, { "-C", refData->path, "add", flakeRef.subDir + "/flake.lock" }); + runProgram("git", true, { "-C", refData->path, "add", flakeRef.subdir + "/flake.lock" }); } else throw Error("flakeUri %s can't be updated because it is not a path", flakeUri); } From 2b8c63f303de7d1da133cb3e9a00d509dca40f57 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Thu, 2 May 2019 20:22:14 +0200 Subject: [PATCH 100/634] Add 'nix dev-shell' and 'nix print-dev-env' command 'nix dev-shell' is intended to replace nix-shell. It supports flakes, e.g. $ nix dev-shell nixpkgs:hello starts a bash shell providing an environment for building 'hello'. Like Lorri (and unlike nix-shell), it computes the build environment by building a modified top-level derivation that writes the environment after running $stdenv/setup to $out and exits. This provides some caching, so it's faster than nix-shell in some cases (especially for packages with lots of dependencies, where the setup script takes a long time). There also is a command 'nix print-dev-env' that prints out shell code for setting up the build environment in an existing shell, e.g. $ . <(nix print-dev-env nixpkgs:hello) https://github.com/tweag/nix/issues/21 --- src/nix/shell.cc | 276 +++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 276 insertions(+) create mode 100644 src/nix/shell.cc diff --git a/src/nix/shell.cc b/src/nix/shell.cc new file mode 100644 index 000000000..14d88faeb --- /dev/null +++ b/src/nix/shell.cc @@ -0,0 +1,276 @@ +#include "eval.hh" +#include "command.hh" +#include "common-args.hh" +#include "shared.hh" +#include "store-api.hh" +#include "derivations.hh" +#include "affinity.hh" +#include "progress-bar.hh" + +using namespace nix; + +struct BuildEnvironment +{ + // FIXME: figure out which vars should be exported. + std::map env; + std::map functions; +}; + +BuildEnvironment readEnvironment(const Path & path) +{ + BuildEnvironment res; + + auto lines = tokenizeString(readFile(path), "\n"); + + auto getLine = + [&]() { + if (lines.empty()) + throw Error("shell environment '%s' ends unexpectedly", path); + auto line = lines.front(); + lines.pop_front(); + return line; + }; + + while (!lines.empty()) { + auto line = getLine(); + + auto eq = line.find('='); + if (eq != std::string::npos) { + std::string name(line, 0, eq); + std::string value(line, eq + 1); + // FIXME: parse arrays + res.env.insert({name, value}); + } + + else if (hasSuffix(line, " () ")) { + std::string name(line, 0, line.size() - 4); + // FIXME: validate name + auto l = getLine(); + if (l != "{ ") throw Error("shell environment '%s' has unexpected line '%s'", path, l); + std::string body; + while ((l = getLine()) != "}") { + body += l; + body += '\n'; + } + res.functions.insert({name, body}); + } + + else throw Error("shell environment '%s' has unexpected line '%s'", path, line); + } + + return res; +} + +/* Given an existing derivation, return the shell environment as + initialised by stdenv's setup script. We do this by building a + modified derivation with the same dependencies and nearly the same + initial environment variables, that just writes the resulting + environment to a file and exits. */ +BuildEnvironment getDerivationEnvironment(ref store, Derivation drv) +{ + auto builder = baseNameOf(drv.builder); + if (builder != "bash") + throw Error("'nix shell' only works on derivations that use 'bash' as their builder"); + + drv.args = {"-c", "set -e; if [[ -n $stdenv ]]; then source $stdenv/setup; fi; set > $out"}; + + /* Remove derivation checks. */ + drv.env.erase("allowedReferences"); + drv.env.erase("allowedRequisites"); + drv.env.erase("disallowedReferences"); + drv.env.erase("disallowedRequisites"); + + // FIXME: handle structured attrs + + /* Rehash and write the derivation. FIXME: would be nice to use + 'buildDerivation', but that's privileged. */ + auto drvName = drv.env["name"] + "-env"; + for (auto & output : drv.outputs) + drv.env.erase(output.first); + drv.env["out"] = ""; + drv.env["outputs"] = "out"; + drv.outputs["out"] = DerivationOutput("", "", ""); + Hash h = hashDerivationModulo(*store, drv); + Path shellOutPath = store->makeOutputPath("out", h, drvName); + drv.outputs["out"].path = shellOutPath; + drv.env["out"] = shellOutPath; + Path shellDrvPath2 = writeDerivation(store, drv, drvName); + + /* Build the derivation. */ + store->buildPaths({shellDrvPath2}); + + assert(store->isValidPath(shellOutPath)); + + return readEnvironment(shellOutPath); +} + +struct Common : InstallableCommand +{ + /* + std::set keepVars{ + "DISPLAY", + "HOME", + "IN_NIX_SHELL", + "LOGNAME", + "NIX_BUILD_SHELL", + "PAGER", + "PATH", + "TERM", + "TZ", + "USER", + }; + */ + + std::set ignoreVars{ + "BASHOPTS", + "EUID", + "NIX_BUILD_TOP", + "PPID", + "PWD", + "SHELLOPTS", + "SHLVL", + "TEMP", + "TEMPDIR", + "TMP", + "TMPDIR", + "UID", + }; + + void makeRcScript(const BuildEnvironment & buildEnvironment, std::ostream & out) + { + for (auto & i : buildEnvironment.env) { + // FIXME: shellEscape + // FIXME: figure out what to export + // FIXME: handle arrays + if (!ignoreVars.count(i.first) && !hasPrefix(i.first, "BASH_")) + out << fmt("export %s=%s\n", i.first, i.second); + } + + for (auto & i : buildEnvironment.functions) { + out << fmt("%s () {\n%s\n}\n", i.first, i.second); + } + + // FIXME: set outputs + + out << "export NIX_BUILD_TOP=\"$(mktemp -d --tmpdir nix-shell.XXXXXX)\"\n"; + for (auto & i : {"TMP", "TMPDIR", "TEMP", "TEMPDIR"}) + out << fmt("export %s=\"$NIX_BUILD_TOP\"\n", i); + } +}; + +std::pair createTempFile(const Path & prefix = "nix") +{ + Path tmpl(getEnv("TMPDIR", "/tmp") + "/" + prefix + ".XXXXXX"); + // Strictly speaking, this is UB, but who cares... + AutoCloseFD fd(mkstemp((char *) tmpl.c_str())); + if (!fd) + throw SysError("creating temporary file '%s'", tmpl); + return {std::move(fd), tmpl}; +} + +struct CmdDevShell : Common +{ + + std::string name() override + { + return "dev-shell"; + } + + std::string description() override + { + return "run a bash shell that provides the build environment of a derivation"; + } + + Examples examples() override + { + return { + Example{ + "To get the build environment of GNU hello:", + "nix dev-shell nixpkgs:hello" + }, + Example{ + "To get the build environment of the default package of flake in the current directory:", + "nix dev-shell" + }, + }; + } + + void run(ref store) override + { + auto drvs = toDerivations(store, {installable}); + + if (drvs.size() != 1) + throw Error("'%s' needs to evaluate to a single derivation, but it evaluated to %d derivations", + installable->what(), drvs.size()); + + auto & drvPath = *drvs.begin(); + + auto buildEnvironment = getDerivationEnvironment(store, store->derivationFromPath(drvPath)); + + auto [rcFileFd, rcFilePath] = createTempFile("nix-shell"); + + std::ostringstream ss; + makeRcScript(buildEnvironment, ss); + + ss << fmt("rm -f '%s'\n", rcFilePath); + + writeFull(rcFileFd.get(), ss.str()); + + stopProgressBar(); + + auto shell = getEnv("SHELL", "bash"); + + auto args = Strings{baseNameOf(shell), "--rcfile", rcFilePath}; + + restoreAffinity(); + restoreSignals(); + + execvp(shell.c_str(), stringsToCharPtrs(args).data()); + + throw SysError("executing shell '%s'", shell); + } +}; + +struct CmdPrintDevEnv : Common +{ + + std::string name() override + { + return "print-dev-env"; + } + + std::string description() override + { + return "print shell code that can be sourced by bash to reproduce the build environment of a derivation"; + } + + Examples examples() override + { + return { + Example{ + "To apply the build environment of GNU hello to the current shell:", + ". <(nix print-dev-env nixpkgs:hello)" + }, + }; + } + + void run(ref store) override + { + auto drvs = toDerivations(store, {installable}); + + if (drvs.size() != 1) + throw Error("'%s' needs to evaluate to a single derivation, but it evaluated to %d derivations", + installable->what(), drvs.size()); + + auto & drvPath = *drvs.begin(); + + auto buildEnvironment = getDerivationEnvironment(store, store->derivationFromPath(drvPath)); + + stopProgressBar(); + + makeRcScript(buildEnvironment, std::cout); + } +}; + +static RegisterCommand r1(make_ref()); +static RegisterCommand r2(make_ref()); From 7dcf5b011a0942ecf953f2b607c4c8d0e9e652c7 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Thu, 2 May 2019 21:09:52 +0200 Subject: [PATCH 101/634] Add function for quoting strings --- src/libstore/build.cc | 2 +- src/libstore/store-api.cc | 7 +------ src/libutil/util.cc | 22 ---------------------- src/libutil/util.hh | 22 ++++++++++++++++++++-- 4 files changed, 22 insertions(+), 31 deletions(-) diff --git a/src/libstore/build.cc b/src/libstore/build.cc index 53a0c743b..9730c75e2 100644 --- a/src/libstore/build.cc +++ b/src/libstore/build.cc @@ -1805,7 +1805,7 @@ void DerivationGoal::startBuilder() concatStringsSep(", ", parsedDrv->getRequiredSystemFeatures()), drvPath, settings.thisSystem, - concatStringsSep(", ", settings.systemFeatures)); + concatStringsSep(", ", settings.systemFeatures)); if (drv->isBuiltin()) preloadNSS(); diff --git a/src/libstore/store-api.cc b/src/libstore/store-api.cc index c13ff1156..8fabeeea4 100644 --- a/src/libstore/store-api.cc +++ b/src/libstore/store-api.cc @@ -726,12 +726,7 @@ ValidPathInfo decodeValidPathInfo(std::istream & str, bool hashGiven) string showPaths(const PathSet & paths) { - string s; - for (auto & i : paths) { - if (s.size() != 0) s += ", "; - s += "'" + i + "'"; - } - return s; + return concatStringsSep(", ", quoteStrings(paths)); } diff --git a/src/libutil/util.cc b/src/libutil/util.cc index f4f86c5c8..5598415f5 100644 --- a/src/libutil/util.cc +++ b/src/libutil/util.cc @@ -1167,28 +1167,6 @@ template StringSet tokenizeString(const string & s, const string & separators); template vector tokenizeString(const string & s, const string & separators); -string concatStringsSep(const string & sep, const Strings & ss) -{ - string s; - for (auto & i : ss) { - if (s.size() != 0) s += sep; - s += i; - } - return s; -} - - -string concatStringsSep(const string & sep, const StringSet & ss) -{ - string s; - for (auto & i : ss) { - if (s.size() != 0) s += sep; - s += i; - } - return s; -} - - string chomp(const string & s) { size_t i = s.find_last_not_of(" \n\r\t"); diff --git a/src/libutil/util.hh b/src/libutil/util.hh index 9f239bff3..8bd57d2e4 100644 --- a/src/libutil/util.hh +++ b/src/libutil/util.hh @@ -334,8 +334,26 @@ template C tokenizeString(const string & s, const string & separators = /* Concatenate the given strings with a separator between the elements. */ -string concatStringsSep(const string & sep, const Strings & ss); -string concatStringsSep(const string & sep, const StringSet & ss); +template +string concatStringsSep(const string & sep, const C & ss) +{ + string s; + for (auto & i : ss) { + if (s.size() != 0) s += sep; + s += i; + } + return s; +} + + +/* Add quotes around a collection of strings. */ +template Strings quoteStrings(const C & c) +{ + Strings res; + for (auto & s : c) + res.push_back("'" + s + "'"); + return res; +} /* Remove trailing whitespace from a string. */ From 2919c496ea10a99d08baffbef485cfb719233b9f Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Thu, 2 May 2019 21:10:13 +0200 Subject: [PATCH 102/634] nix dev-shell: Use 'provides.devShell' by default Thus $ nix dev-shell will now build the 'provides.devShell' attribute from the flake in the current directory. If it doesn't exist, it falls back to 'provides.defaultPackage'. --- flake.nix | 5 +++++ shell.nix | 6 ++++-- src/nix/command.hh | 5 +++++ src/nix/installables.cc | 42 ++++++++++++++++++++++++++++++----------- src/nix/shell.cc | 6 ++++++ 5 files changed, 51 insertions(+), 13 deletions(-) diff --git a/flake.nix b/flake.nix index 95ec5d952..ab316c7c6 100644 --- a/flake.nix +++ b/flake.nix @@ -17,5 +17,10 @@ packages.nix = hydraJobs.build.x86_64-linux; defaultPackage = packages.nix; + + devShell = import ./shell.nix { + nixpkgs = deps.nixpkgs; + }; + }; } diff --git a/shell.nix b/shell.nix index 8167f87a2..d7e63bad3 100644 --- a/shell.nix +++ b/shell.nix @@ -1,6 +1,8 @@ -{ useClang ? false }: +{ useClang ? false +, nixpkgs ? builtins.fetchTarball https://github.com/NixOS/nixpkgs-channels/archive/nixos-19.03.tar.gz +}: -with import (builtins.fetchTarball https://github.com/NixOS/nixpkgs-channels/archive/nixos-19.03.tar.gz) {}; +with import nixpkgs { system = builtins.currentSystem or "x86_64-linux"; }; with import ./release-common.nix { inherit pkgs; }; diff --git a/src/nix/command.hh b/src/nix/command.hh index 6d43261ac..640c6cd16 100644 --- a/src/nix/command.hh +++ b/src/nix/command.hh @@ -88,6 +88,11 @@ struct SourceExprCommand : virtual Args, StoreCommand, MixEvalArgs std::shared_ptr parseInstallable( ref store, const std::string & installable); + virtual Strings getDefaultFlakeAttrPaths() + { + return {"defaultPackage"}; + } + private: std::shared_ptr evalState; diff --git a/src/nix/installables.cc b/src/nix/installables.cc index c3ca87aa7..db67952e1 100644 --- a/src/nix/installables.cc +++ b/src/nix/installables.cc @@ -142,13 +142,18 @@ struct InstallableAttrPath : InstallableValue struct InstallableFlake : InstallableValue { FlakeRef flakeRef; - std::string attrPath; + Strings attrPaths; + bool searchPackages = false; - InstallableFlake(SourceExprCommand & cmd, FlakeRef && flakeRef, const std::string & attrPath) - : InstallableValue(cmd), flakeRef(flakeRef), attrPath(attrPath) + InstallableFlake(SourceExprCommand & cmd, FlakeRef && flakeRef, Strings attrPaths) + : InstallableValue(cmd), flakeRef(flakeRef), attrPaths(std::move(attrPaths)) { } - std::string what() override { return flakeRef.to_string() + ":" + attrPath; } + InstallableFlake(SourceExprCommand & cmd, FlakeRef && flakeRef, std::string attrPath) + : InstallableValue(cmd), flakeRef(flakeRef), attrPaths{attrPath}, searchPackages(true) + { } + + std::string what() override { return flakeRef.to_string() + ":" + *attrPaths.begin(); } Value * toValue(EvalState & state) override { @@ -166,18 +171,31 @@ struct InstallableFlake : InstallableValue auto emptyArgs = state.allocBindings(0); - if (auto aPackages = *vProvides->attrs->get(state.symbols.create("packages"))) { + // As a convenience, look for the attribute in + // 'provides.packages'. + if (searchPackages) { + if (auto aPackages = *vProvides->attrs->get(state.symbols.create("packages"))) { + try { + auto * v = findAlongAttrPath(state, *attrPaths.begin(), *emptyArgs, *aPackages->value); + state.forceValue(*v); + return v; + } catch (AttrPathNotFound & e) { + } + } + } + + // Otherwise, look for it in 'provides'. + for (auto & attrPath : attrPaths) { try { - auto * v = findAlongAttrPath(state, attrPath, *emptyArgs, *aPackages->value); + auto * v = findAlongAttrPath(state, attrPath, *emptyArgs, *vProvides); state.forceValue(*v); return v; } catch (AttrPathNotFound & e) { } } - auto * v = findAlongAttrPath(state, attrPath, *emptyArgs, *vProvides); - state.forceValue(*v); - return v; + throw Error("flake '%s' does not provide attribute %s", + flakeRef, concatStringsSep(", ", quoteStrings(attrPaths))); } }; @@ -216,7 +234,8 @@ std::vector> SourceExprCommand::parseInstallables( else if (hasPrefix(s, "nixpkgs.")) { bool static warned; warnOnce(warned, "the syntax 'nixpkgs.' is deprecated; use 'nixpkgs:' instead"); - result.push_back(std::make_shared(*this, FlakeRef("nixpkgs"), std::string(s, 8))); + result.push_back(std::make_shared(*this, FlakeRef("nixpkgs"), + Strings{"packages." + std::string(s, 8)})); } else if ((colon = s.rfind(':')) != std::string::npos) { @@ -233,7 +252,8 @@ std::vector> SourceExprCommand::parseInstallables( if (storePath != "") result.push_back(std::make_shared(storePath)); else - result.push_back(std::make_shared(*this, FlakeRef(s, true), "defaultPackage")); + result.push_back(std::make_shared(*this, FlakeRef(s, true), + getDefaultFlakeAttrPaths())); } else diff --git a/src/nix/shell.cc b/src/nix/shell.cc index 14d88faeb..2e17111d6 100644 --- a/src/nix/shell.cc +++ b/src/nix/shell.cc @@ -125,6 +125,7 @@ struct Common : InstallableCommand "BASHOPTS", "EUID", "NIX_BUILD_TOP", + "NIX_ENFORCE_PURITY", "PPID", "PWD", "SHELLOPTS", @@ -156,6 +157,11 @@ struct Common : InstallableCommand for (auto & i : {"TMP", "TMPDIR", "TEMP", "TEMPDIR"}) out << fmt("export %s=\"$NIX_BUILD_TOP\"\n", i); } + + Strings getDefaultFlakeAttrPaths() override + { + return {"devShell", "defaultPackage"}; + } }; std::pair createTempFile(const Path & prefix = "nix") From dea18ff99913349ef0f54b80d45c8dfdc8b31f65 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Thu, 2 May 2019 21:13:19 +0200 Subject: [PATCH 103/634] nix dev-shell: Execute shellHook --- src/nix/shell.cc | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/nix/shell.cc b/src/nix/shell.cc index 2e17111d6..d3ecf8de4 100644 --- a/src/nix/shell.cc +++ b/src/nix/shell.cc @@ -156,6 +156,8 @@ struct Common : InstallableCommand out << "export NIX_BUILD_TOP=\"$(mktemp -d --tmpdir nix-shell.XXXXXX)\"\n"; for (auto & i : {"TMP", "TMPDIR", "TEMP", "TEMPDIR"}) out << fmt("export %s=\"$NIX_BUILD_TOP\"\n", i); + + out << "eval \"$shellHook\"\n"; } Strings getDefaultFlakeAttrPaths() override From 8ec77614f63e14d1869734b0d21a646667bbf88b Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Thu, 2 May 2019 21:28:41 +0200 Subject: [PATCH 104/634] Move createTempFile to libutil --- src/libutil/util.cc | 11 +++++++++++ src/libutil/util.hh | 12 ++++++++---- src/nix/shell.cc | 10 ---------- 3 files changed, 19 insertions(+), 14 deletions(-) diff --git a/src/libutil/util.cc b/src/libutil/util.cc index 5598415f5..75b73fcfa 100644 --- a/src/libutil/util.cc +++ b/src/libutil/util.cc @@ -461,6 +461,17 @@ Path createTempDir(const Path & tmpRoot, const Path & prefix, } +std::pair createTempFile(const Path & prefix) +{ + Path tmpl(getEnv("TMPDIR", "/tmp") + "/" + prefix + ".XXXXXX"); + // Strictly speaking, this is UB, but who cares... + AutoCloseFD fd(mkstemp((char *) tmpl.c_str())); + if (!fd) + throw SysError("creating temporary file '%s'", tmpl); + return {std::move(fd), tmpl}; +} + + static Lazy getHome2([]() { Path homeDir = getEnv("HOME"); if (homeDir.empty()) { diff --git a/src/libutil/util.hh b/src/libutil/util.hh index 8bd57d2e4..6c9d7c2eb 100644 --- a/src/libutil/util.hh +++ b/src/libutil/util.hh @@ -118,10 +118,6 @@ void deletePath(const Path & path); void deletePath(const Path & path, unsigned long long & bytesFreed); -/* Create a temporary directory. */ -Path createTempDir(const Path & tmpRoot = "", const Path & prefix = "nix", - bool includePid = true, bool useGlobalCounter = true, mode_t mode = 0755); - /* Return $HOME or the user's home directory from /etc/passwd. */ Path getHome(); @@ -199,6 +195,14 @@ public: }; +/* Create a temporary directory. */ +Path createTempDir(const Path & tmpRoot = "", const Path & prefix = "nix", + bool includePid = true, bool useGlobalCounter = true, mode_t mode = 0755); + +/* Create a temporary file, returning a file handle and its path. */ +std::pair createTempFile(const Path & prefix = "nix"); + + class Pipe { public: diff --git a/src/nix/shell.cc b/src/nix/shell.cc index d3ecf8de4..95028f10e 100644 --- a/src/nix/shell.cc +++ b/src/nix/shell.cc @@ -166,16 +166,6 @@ struct Common : InstallableCommand } }; -std::pair createTempFile(const Path & prefix = "nix") -{ - Path tmpl(getEnv("TMPDIR", "/tmp") + "/" + prefix + ".XXXXXX"); - // Strictly speaking, this is UB, but who cares... - AutoCloseFD fd(mkstemp((char *) tmpl.c_str())); - if (!fd) - throw SysError("creating temporary file '%s'", tmpl); - return {std::move(fd), tmpl}; -} - struct CmdDevShell : Common { From 7ba0f98e644c31ce0c16db10aa87f896937e0ddf Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Thu, 2 May 2019 21:28:52 +0200 Subject: [PATCH 105/634] nix dev-shell: Less purity --- src/nix/shell.cc | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/src/nix/shell.cc b/src/nix/shell.cc index 95028f10e..0813d122c 100644 --- a/src/nix/shell.cc +++ b/src/nix/shell.cc @@ -124,6 +124,7 @@ struct Common : InstallableCommand std::set ignoreVars{ "BASHOPTS", "EUID", + "HOME", // FIXME: don't ignore in pure mode? "NIX_BUILD_TOP", "NIX_ENFORCE_PURITY", "PPID", @@ -134,11 +135,15 @@ struct Common : InstallableCommand "TEMPDIR", "TMP", "TMPDIR", + "TZ", "UID", }; void makeRcScript(const BuildEnvironment & buildEnvironment, std::ostream & out) { + out << "export IN_NIX_SHELL=1\n"; + out << "nix_saved_PATH=\"$PATH\"\n"; + for (auto & i : buildEnvironment.env) { // FIXME: shellEscape // FIXME: figure out what to export @@ -147,6 +152,8 @@ struct Common : InstallableCommand out << fmt("export %s=%s\n", i.first, i.second); } + out << "PATH=\"$PATH:$nix_saved_PATH\"\n"; + for (auto & i : buildEnvironment.functions) { out << fmt("%s () {\n%s\n}\n", i.first, i.second); } From f8c4742c2f92c7ba4dbe0911fb91ddce33c6f98a Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Fri, 3 May 2019 13:15:13 +0200 Subject: [PATCH 106/634] Fix 'git add' when subdir is empty --- src/libexpr/primops/flake.cc | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/libexpr/primops/flake.cc b/src/libexpr/primops/flake.cc index 1c20d33bb..ab797fe01 100644 --- a/src/libexpr/primops/flake.cc +++ b/src/libexpr/primops/flake.cc @@ -430,7 +430,9 @@ void updateLockFile(EvalState & state, const FlakeUri & flakeUri) // Hack: Make sure that flake.lock is visible to Git. Otherwise, // exportGit will fail to copy it to the Nix store. - runProgram("git", true, { "-C", refData->path, "add", flakeRef.subdir + "/flake.lock" }); + runProgram("git", true, + { "-C", refData->path, "add", + (flakeRef.subdir == "" ? "" : flakeRef.subdir + "/") + "flake.lock" }); } else throw Error("flakeUri %s can't be updated because it is not a path", flakeUri); } From 201f92e02c3a5d58d27ab9a7aca3b76d56d18264 Mon Sep 17 00:00:00 2001 From: Nick Van den Broeck Date: Wed, 1 May 2019 11:38:48 +0200 Subject: [PATCH 107/634] Fixed Flake data type and flake fetching --- src/libexpr/primops/flake.cc | 110 +++++++++++++++-------------------- src/libexpr/primops/flake.hh | 30 ++++++---- src/nix/flake.cc | 55 ++++++++++++------ 3 files changed, 102 insertions(+), 93 deletions(-) diff --git a/src/libexpr/primops/flake.cc b/src/libexpr/primops/flake.cc index ab797fe01..89267e2b1 100644 --- a/src/libexpr/primops/flake.cc +++ b/src/libexpr/primops/flake.cc @@ -204,28 +204,32 @@ static FlakeRef lookupFlake(EvalState & state, const FlakeRef & flakeRef, const return flakeRef; } -static FlakeSourceInfo fetchFlake(EvalState & state, const FlakeRef fRef, bool impureIsAllowed = false) +// Lookups happen here too +static SourceInfo fetchFlake(EvalState & state, const FlakeRef & flakeRef, bool impureIsAllowed = false) { - if (evalSettings.pureEval && !impureIsAllowed && !fRef.isImmutable()) - throw Error("requested to fetch mutable flake '%s' in pure mode", fRef); + FlakeRef resolvedRef = lookupFlake(state, flakeRef, + impureIsAllowed ? state.getFlakeRegistries() : std::vector>()); + + if (evalSettings.pureEval && !impureIsAllowed && !resolvedRef.isImmutable()) + throw Error("requested to fetch mutable flake '%s' in pure mode", resolvedRef); // This only downloads only one revision of the repo, not the entire history. - if (auto refData = std::get_if(&fRef.data)) { + if (auto refData = std::get_if(&resolvedRef.data)) { // FIXME: use regular /archive URLs instead? api.github.com // might have stricter rate limits. auto url = fmt("https://api.github.com/repos/%s/%s/tarball/%s", refData->owner, refData->repo, - fRef.rev ? fRef.rev->to_string(Base16, false) - : fRef.ref ? *fRef.ref : "master"); + resolvedRef.rev ? resolvedRef.rev->to_string(Base16, false) + : resolvedRef.ref ? *resolvedRef.ref : "master"); std::string accessToken = settings.githubAccessToken.get(); if (accessToken != "") url += "?access_token=" + accessToken; auto result = getDownloader()->downloadCached(state.store, url, true, "source", - Hash(), nullptr, fRef.rev ? 1000000000 : settings.tarballTtl); + Hash(), nullptr, resolvedRef.rev ? 1000000000 : settings.tarballTtl); if (!result.etag) throw Error("did not receive an ETag header from '%s'", url); @@ -233,72 +237,60 @@ static FlakeSourceInfo fetchFlake(EvalState & state, const FlakeRef fRef, bool i if (result.etag->size() != 42 || (*result.etag)[0] != '"' || (*result.etag)[41] != '"') throw Error("ETag header '%s' from '%s' is not a Git revision", *result.etag, url); - FlakeSourceInfo info(fRef); + std::string rev = std::string(*result.etag, 1, result.etag->size() - 2); + const FlakeRef ref(resolvedRef.baseRef().to_string() + "/" + rev); + SourceInfo info(ref); info.storePath = result.path; - info.rev = Hash(std::string(*result.etag, 1, result.etag->size() - 2), htSHA1); - info.flakeRef.rev = info.rev; - info.flakeRef.ref = {}; return info; } // This downloads the entire git history - else if (auto refData = std::get_if(&fRef.data)) { - auto gitInfo = exportGit(state.store, refData->uri, fRef.ref, fRef.rev, "source"); - FlakeSourceInfo info(fRef); + else if (auto refData = std::get_if(&resolvedRef.data)) { + auto gitInfo = exportGit(state.store, refData->uri, resolvedRef.ref, resolvedRef.rev, "source"); + const FlakeRef ref(resolvedRef.baseRef().to_string() + "/" + gitInfo.ref + "/" + gitInfo.rev.to_string(Base16, false)); + SourceInfo info(ref); info.storePath = gitInfo.storePath; - info.rev = gitInfo.rev; info.revCount = gitInfo.revCount; - info.flakeRef.ref = gitInfo.ref; - info.flakeRef.rev = info.rev; return info; } - else if (auto refData = std::get_if(&fRef.data)) { + else if (auto refData = std::get_if(&resolvedRef.data)) { if (!pathExists(refData->path + "/.git")) throw Error("flake '%s' does not reference a Git repository", refData->path); auto gitInfo = exportGit(state.store, refData->path, {}, {}, "source"); - FlakeSourceInfo info(fRef); + const FlakeRef ref(resolvedRef.baseRef().to_string() + "/" + gitInfo.ref + "/" + gitInfo.rev.to_string(Base16, false)); + SourceInfo info(ref); info.storePath = gitInfo.storePath; - info.rev = gitInfo.rev; info.revCount = gitInfo.revCount; - info.flakeRef.ref = gitInfo.ref; - info.flakeRef.rev = info.rev; return info; } else abort(); } -// This will return the flake which corresponds to a given FlakeRef. The lookupFlake is done within this function. +// This will return the flake which corresponds to a given FlakeRef. The lookupFlake is done within `fetchFlake`, which is used here. Flake getFlake(EvalState & state, const FlakeRef & flakeRef, bool impureIsAllowed = false) { - FlakeRef resolvedRef = lookupFlake(state, flakeRef, - impureIsAllowed ? state.getFlakeRegistries() : std::vector>()); + SourceInfo sourceInfo = fetchFlake(state, flakeRef, impureIsAllowed); + debug("got flake source '%s' with flakeref %s", sourceInfo.storePath, sourceInfo.resolvedRef.to_string()); - FlakeSourceInfo sourceInfo = fetchFlake(state, resolvedRef, impureIsAllowed); - debug("got flake source '%s' with revision %s", - sourceInfo.storePath, sourceInfo.rev.value_or(Hash(htSHA1)).to_string(Base16, false)); + FlakeRef resolvedRef = sourceInfo.resolvedRef; - resolvedRef = sourceInfo.flakeRef; // `resolvedRef` is now immutable + resolvedRef = sourceInfo.resolvedRef; // `resolvedRef` is now immutable state.store->assertStorePath(sourceInfo.storePath); if (state.allowedPaths) state.allowedPaths->insert(sourceInfo.storePath); - Flake flake(resolvedRef, std::move(sourceInfo)); - if (std::get_if(&resolvedRef.data)) { - // FIXME: ehm? - if (flake.sourceInfo.rev) - flake.ref = FlakeRef(resolvedRef.baseRef().to_string() - + "/" + flake.sourceInfo.rev->to_string(Base16, false)); - } - // Guard against symlink attacks. - auto flakeFile = canonPath(sourceInfo.storePath + "/" + resolvedRef.subdir + "/flake.nix"); + Path flakeFile = canonPath(sourceInfo.storePath + "/" + resolvedRef.subdir + "/flake.nix"); if (!isInDir(flakeFile, sourceInfo.storePath)) throw Error("flake file '%s' escapes from '%s'", resolvedRef, sourceInfo.storePath); + + Flake flake(flakeRef, sourceInfo); + if (!pathExists(flakeFile)) throw Error("source tree referenced by '%s' does not contain a '%s/flake.nix' file", resolvedRef, resolvedRef.subdir); @@ -344,24 +336,18 @@ Flake getFlake(EvalState & state, const FlakeRef & flakeRef, bool impureIsAllowe // Get the `NonFlake` corresponding to a `FlakeRef`. NonFlake getNonFlake(EvalState & state, const FlakeRef & flakeRef, FlakeAlias alias) { - FlakeSourceInfo sourceInfo = fetchFlake(state, flakeRef); - debug("got non-flake source '%s' with revision %s", - sourceInfo.storePath, sourceInfo.rev.value_or(Hash(htSHA1)).to_string(Base16, false)); + SourceInfo sourceInfo = fetchFlake(state, flakeRef); + debug("got non-flake source '%s' with flakeref %s", sourceInfo.storePath, sourceInfo.resolvedRef.to_string()); - auto flakePath = sourceInfo.storePath; - state.store->assertStorePath(flakePath); + FlakeRef resolvedRef = sourceInfo.resolvedRef; + + NonFlake nonFlake(flakeRef, sourceInfo); + + nonFlake.storePath = sourceInfo.storePath; + state.store->assertStorePath(nonFlake.storePath); if (state.allowedPaths) - state.allowedPaths->insert(flakePath); - - NonFlake nonFlake(flakeRef); - if (std::get_if(&flakeRef.data)) { - if (sourceInfo.rev) - nonFlake.ref = FlakeRef(flakeRef.baseRef().to_string() - + "/" + sourceInfo.rev->to_string(Base16, false)); - } - - nonFlake.path = flakePath; + state.allowedPaths->insert(nonFlake.storePath); nonFlake.alias = alias; @@ -380,7 +366,7 @@ ResolvedFlake resolveFlake(EvalState & state, const FlakeRef & topRef, LockFile lockFile; if (isTopFlake) - lockFile = readLockFile(flake.sourceInfo.storePath + "/" + flake.ref.subdir + "/flake.lock"); // FIXME: symlink attack + lockFile = readLockFile(flake.storePath + flake.resolvedRef.subdir + "/flake.lock"); // FIXME: symlink attack ResolvedFlake deps(flake); @@ -399,13 +385,13 @@ ResolvedFlake resolveFlake(EvalState & state, const FlakeRef & topRef, LockFile::FlakeEntry dependenciesToFlakeEntry(const ResolvedFlake & resolvedFlake) { - LockFile::FlakeEntry entry(resolvedFlake.flake.sourceInfo.flakeRef); + LockFile::FlakeEntry entry(resolvedFlake.flake.resolvedRef); for (auto & newResFlake : resolvedFlake.flakeDeps) - entry.flakeEntries.insert_or_assign(newResFlake.flake.id, dependenciesToFlakeEntry(newResFlake)); + entry.flakeEntries.insert_or_assign(newResFlake.flake.originalRef, dependenciesToFlakeEntry(newResFlake)); for (auto & nonFlake : resolvedFlake.nonFlakeDeps) - entry.nonFlakeEntries.insert_or_assign(nonFlake.alias, nonFlake.ref); + entry.nonFlakeEntries.insert_or_assign(nonFlake.alias, nonFlake.resolvedRef); return entry; } @@ -453,18 +439,18 @@ void callFlake(EvalState & state, const ResolvedFlake & resFlake, Value & v) auto vNonFlake = state.allocAttr(v, nonFlake.alias); state.mkAttrs(*vNonFlake, 4); - state.store->isValidPath(nonFlake.path); - mkString(*state.allocAttr(*vNonFlake, state.sOutPath), nonFlake.path, {nonFlake.path}); + state.store->isValidPath(nonFlake.storePath); + mkString(*state.allocAttr(*vNonFlake, state.sOutPath), nonFlake.storePath, {nonFlake.storePath}); } mkString(*state.allocAttr(v, state.sDescription), resFlake.flake.description); - auto & path = resFlake.flake.sourceInfo.storePath; + auto & path = resFlake.flake.storePath; state.store->isValidPath(path); mkString(*state.allocAttr(v, state.sOutPath), path, {path}); - if (resFlake.flake.sourceInfo.revCount) - mkInt(*state.allocAttr(v, state.symbols.create("revCount")), *resFlake.flake.sourceInfo.revCount); + if (resFlake.flake.revCount) + mkInt(*state.allocAttr(v, state.symbols.create("revCount")), *resFlake.flake.revCount); auto vProvides = state.allocAttr(v, state.symbols.create("provides")); mkApp(*vProvides, *resFlake.flake.vProvides, v); diff --git a/src/libexpr/primops/flake.hh b/src/libexpr/primops/flake.hh index 6329c36ec..f3bf76cc7 100644 --- a/src/libexpr/primops/flake.hh +++ b/src/libexpr/primops/flake.hh @@ -23,7 +23,7 @@ struct LockFile { FlakeRef ref; std::map flakeEntries; - std::map nonFlakeEntries; + std::map nonFlakeEntries; FlakeEntry(const FlakeRef & flakeRef) : ref(flakeRef) {}; }; @@ -43,37 +43,43 @@ std::shared_ptr readRegistry(const Path &); void writeRegistry(const FlakeRegistry &, const Path &); -struct FlakeSourceInfo +struct SourceInfo { - FlakeRef flakeRef; + FlakeRef resolvedRef; Path storePath; - std::optional rev; std::optional revCount; // date - FlakeSourceInfo(const FlakeRef & flakeRef) : flakeRef(flakeRef) { } + SourceInfo(const FlakeRef & resolvRef) : resolvedRef(resolvRef) {}; }; struct Flake { FlakeId id; - FlakeRef ref; + FlakeRef originalRef; + FlakeRef resolvedRef; std::string description; - FlakeSourceInfo sourceInfo; + std::optional revCount; + Path storePath; std::vector requires; std::map nonFlakeRequires; Value * vProvides; // FIXME: gc - Flake(const FlakeRef & flakeRef, FlakeSourceInfo && sourceInfo) - : ref(flakeRef), sourceInfo(sourceInfo) {}; + // date + // content hash + Flake(const FlakeRef & origRef, const SourceInfo & sourceInfo) : originalRef(origRef), + resolvedRef(sourceInfo.resolvedRef), revCount(sourceInfo.revCount), storePath(sourceInfo.storePath) {}; }; struct NonFlake { FlakeAlias alias; - FlakeRef ref; - Path path; + FlakeRef originalRef; + FlakeRef resolvedRef; + std::optional revCount; + Path storePath; // date // content hash - NonFlake(const FlakeRef flakeRef) : ref(flakeRef) {}; + NonFlake(const FlakeRef & origRef, const SourceInfo & sourceInfo) : originalRef(origRef), + resolvedRef(sourceInfo.resolvedRef), revCount(sourceInfo.revCount), storePath(sourceInfo.storePath) {}; }; std::shared_ptr getGlobalRegistry(); diff --git a/src/nix/flake.cc b/src/nix/flake.cc index e38c4db0b..0af368570 100644 --- a/src/nix/flake.cc +++ b/src/nix/flake.cc @@ -45,35 +45,53 @@ void printFlakeInfo(Flake & flake, bool json) { if (json) { nlohmann::json j; j["id"] = flake.id; - j["uri"] = flake.sourceInfo.flakeRef.to_string(); + j["uri"] = flake.resolvedRef.to_string(); j["description"] = flake.description; - if (flake.sourceInfo.rev) - j["revision"] = flake.sourceInfo.rev->to_string(Base16, false); - if (flake.sourceInfo.revCount) - j["revCount"] = *flake.sourceInfo.revCount; - j["path"] = flake.sourceInfo.storePath; + if (flake.resolvedRef.ref) + j["branch"] = *flake.resolvedRef.ref; + if (flake.resolvedRef.rev) + j["revision"] = flake.resolvedRef.rev->to_string(Base16, false); + if (flake.revCount) + j["revCount"] = *flake.revCount; + j["path"] = flake.storePath; std::cout << j.dump(4) << std::endl; } else { std::cout << "ID: " << flake.id << "\n"; - std::cout << "URI: " << flake.sourceInfo.flakeRef << "\n"; + std::cout << "URI: " << flake.resolvedRef.to_string() << "\n"; std::cout << "Description: " << flake.description << "\n"; - if (flake.sourceInfo.rev) - std::cout << "Revision: " << flake.sourceInfo.rev->to_string(Base16, false) << "\n"; - if (flake.sourceInfo.revCount) - std::cout << "Revcount: " << *flake.sourceInfo.revCount << "\n"; - std::cout << "Path: " << flake.sourceInfo.storePath << "\n"; + if (flake.resolvedRef.ref) + std::cout << "Branch: " << *flake.resolvedRef.ref; + if (flake.resolvedRef.rev) + std::cout << "Revision: " << flake.resolvedRef.rev->to_string(Base16, false) << "\n"; + if (flake.revCount) + std::cout << "Revcount: " << *flake.revCount << "\n"; + std::cout << "Path: " << flake.storePath << "\n"; } } void printNonFlakeInfo(NonFlake & nonFlake, bool json) { if (json) { nlohmann::json j; - j["name"] = nonFlake.alias; - j["location"] = nonFlake.path; + j["id"] = nonFlake.alias; + j["uri"] = nonFlake.resolvedRef.to_string(); + if (nonFlake.resolvedRef.ref) + j["branch"] = *nonFlake.resolvedRef.ref; + if (nonFlake.resolvedRef.rev) + j["revision"] = nonFlake.resolvedRef.rev->to_string(Base16, false); + if (nonFlake.revCount) + j["revCount"] = *nonFlake.revCount; + j["path"] = nonFlake.storePath; std::cout << j.dump(4) << std::endl; } else { - std::cout << "name: " << nonFlake.alias << "\n"; - std::cout << "Location: " << nonFlake.path << "\n"; + std::cout << "ID: " << nonFlake.alias << "\n"; + std::cout << "URI: " << nonFlake.resolvedRef.to_string() << "\n"; + if (nonFlake.resolvedRef.ref) + std::cout << "Branch: " << *nonFlake.resolvedRef.ref; + if (nonFlake.resolvedRef.rev) + std::cout << "Revision: " << nonFlake.resolvedRef.rev->to_string(Base16, false) << "\n"; + if (nonFlake.revCount) + std::cout << "Revcount: " << *nonFlake.revCount << "\n"; + std::cout << "Path: " << nonFlake.storePath << "\n"; } } @@ -244,14 +262,13 @@ struct CmdFlakePin : virtual Args, StoreCommand, MixEvalArgs FlakeRegistry userRegistry = *readRegistry(userRegistryPath); auto it = userRegistry.entries.find(FlakeRef(alias)); if (it != userRegistry.entries.end()) { - it->second = getFlake(*evalState, it->second, true).ref; - // The 'ref' in 'flake' is immutable. + it->second = getFlake(*evalState, it->second, true).resolvedRef; writeRegistry(userRegistry, userRegistryPath); } else { std::shared_ptr globalReg = getGlobalRegistry(); it = globalReg->entries.find(FlakeRef(alias)); if (it != globalReg->entries.end()) { - FlakeRef newRef = getFlake(*evalState, it->second, true).ref; + FlakeRef newRef = getFlake(*evalState, it->second, true).resolvedRef; userRegistry.entries.insert_or_assign(alias, newRef); writeRegistry(userRegistry, userRegistryPath); } else From c38c726eb5d447c7e9d894d57cd05ac46c173ddd Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Tue, 7 May 2019 21:15:45 +0200 Subject: [PATCH 108/634] Fix test failures when $TMPDIR changes --- tests/build-dry.sh | 6 +++--- tests/nix-copy-ssh.sh | 2 +- tests/nix-shell.sh | 8 ++++---- tests/placeholders.sh | 2 -- 4 files changed, 8 insertions(+), 10 deletions(-) diff --git a/tests/build-dry.sh b/tests/build-dry.sh index 610e6070c..e72533e70 100644 --- a/tests/build-dry.sh +++ b/tests/build-dry.sh @@ -8,13 +8,13 @@ clearStore clearCache # Ensure this builds successfully first -nix build -f dependencies.nix +nix build --no-link -f dependencies.nix clearStore clearCache # Try --dry-run using old command first -nix-build dependencies.nix --dry-run 2>&1 | grep "will be built" +nix-build --no-out-link dependencies.nix --dry-run 2>&1 | grep "will be built" # Now new command: nix build -f dependencies.nix --dry-run 2>&1 | grep "will be built" @@ -27,7 +27,7 @@ clearCache # Try --dry-run using new command first nix build -f dependencies.nix --dry-run 2>&1 | grep "will be built" # Now old command: -nix-build dependencies.nix --dry-run 2>&1 | grep "will be built" +nix-build --no-out-link dependencies.nix --dry-run 2>&1 | grep "will be built" fi ################################################### diff --git a/tests/nix-copy-ssh.sh b/tests/nix-copy-ssh.sh index 6aba667a4..eb801548d 100644 --- a/tests/nix-copy-ssh.sh +++ b/tests/nix-copy-ssh.sh @@ -7,7 +7,7 @@ remoteRoot=$TEST_ROOT/store2 chmod -R u+w "$remoteRoot" || true rm -rf "$remoteRoot" -outPath=$(nix-build dependencies.nix) +outPath=$(nix-build --no-out-link dependencies.nix) nix copy --to "ssh://localhost?store=$NIX_STORE_DIR&remote-store=$remoteRoot%3fstore=$NIX_STORE_DIR%26real=$remoteRoot$NIX_STORE_DIR" $outPath diff --git a/tests/nix-shell.sh b/tests/nix-shell.sh index 6024ea399..ee502dddb 100644 --- a/tests/nix-shell.sh +++ b/tests/nix-shell.sh @@ -27,13 +27,13 @@ output=$(nix-shell --pure --keep SELECTED_IMPURE_VAR shell.nix -A shellDrv --run # Test nix-shell on a .drv symlink # Legacy: absolute path and .drv extension required -nix-instantiate shell.nix -A shellDrv --indirect --add-root shell.drv -[[ $(nix-shell --pure $PWD/shell.drv --run \ +nix-instantiate shell.nix -A shellDrv --indirect --add-root $TEST_ROOT/shell.drv +[[ $(nix-shell --pure $TEST_ROOT/shell.drv --run \ 'echo "$IMPURE_VAR - $VAR_FROM_STDENV_SETUP - $VAR_FROM_NIX"') = " - foo - bar" ]] # New behaviour: just needs to resolve to a derivation in the store -nix-instantiate shell.nix -A shellDrv --indirect --add-root shell -[[ $(nix-shell --pure shell --run \ +nix-instantiate shell.nix -A shellDrv --indirect --add-root $TEST_ROOT/shell +[[ $(nix-shell --pure $TEST_ROOT/shell --run \ 'echo "$IMPURE_VAR - $VAR_FROM_STDENV_SETUP - $VAR_FROM_NIX"') = " - foo - bar" ]] # Test nix-shell -p diff --git a/tests/placeholders.sh b/tests/placeholders.sh index 071cfe2dc..cd1bb7bc2 100644 --- a/tests/placeholders.sh +++ b/tests/placeholders.sh @@ -18,5 +18,3 @@ nix-build --no-out-link -E ' "; } ' - -echo XYZZY From 3c171851a8aed22e977a1798dc5d306faa6e5b63 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Tue, 7 May 2019 22:29:16 +0200 Subject: [PATCH 109/634] Make the URL/path of the global flake registry configurable --- src/libexpr/eval.cc | 8 ++++++++ src/libexpr/eval.hh | 5 +++++ src/libexpr/primops/flake.cc | 3 +-- 3 files changed, 14 insertions(+), 2 deletions(-) diff --git a/src/libexpr/eval.cc b/src/libexpr/eval.cc index 4bd1280ad..2d83af983 100644 --- a/src/libexpr/eval.cc +++ b/src/libexpr/eval.cc @@ -1972,6 +1972,14 @@ std::ostream & operator << (std::ostream & str, const ExternalValueBase & v) { EvalSettings evalSettings; +EvalSettings::EvalSettings() +{ + if (flakeRegistry == "") + // FIXME: static initialization order fiasco. But this will go + // away when we switch to an online registry. + flakeRegistry = settings.nixDataDir + "/nix/flake-registry.json"; +} + static GlobalConfig::Register r1(&evalSettings); diff --git a/src/libexpr/eval.hh b/src/libexpr/eval.hh index 44988cd70..b0bf777fc 100644 --- a/src/libexpr/eval.hh +++ b/src/libexpr/eval.hh @@ -367,6 +367,11 @@ struct EvalSettings : Config Setting allowedUris{this, {}, "allowed-uris", "Prefixes of URIs that builtin functions such as fetchurl and fetchGit are allowed to fetch."}; + + Setting flakeRegistry{this, "", "flake-registry", + "Path or URI of the global flake registry."}; + + EvalSettings(); }; extern EvalSettings evalSettings; diff --git a/src/libexpr/primops/flake.cc b/src/libexpr/primops/flake.cc index 89267e2b1..f73ab4424 100644 --- a/src/libexpr/primops/flake.cc +++ b/src/libexpr/primops/flake.cc @@ -126,8 +126,7 @@ void writeLockFile(const LockFile & lockFile, const Path & path) std::shared_ptr getGlobalRegistry() { - Path registryFile = settings.nixDataDir + "/nix/flake-registry.json"; - return readRegistry(registryFile); + return readRegistry(evalSettings.flakeRegistry); } Path getUserRegistryPath() From 2a41a567e29846cf32d38f338b992069592770c5 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Tue, 7 May 2019 23:06:15 +0200 Subject: [PATCH 110/634] Improve FlakeRef::to_string() We were incorrectly using path syntax (i.e. //) for Git repositories. This is only valid for GitHub flakerefs. --- src/libexpr/primops/flakeref.cc | 42 +++++++++++++++++++++++++-------- 1 file changed, 32 insertions(+), 10 deletions(-) diff --git a/src/libexpr/primops/flakeref.cc b/src/libexpr/primops/flakeref.cc index 141d61c0d..784a0868e 100644 --- a/src/libexpr/primops/flakeref.cc +++ b/src/libexpr/primops/flakeref.cc @@ -147,31 +147,53 @@ FlakeRef::FlakeRef(const std::string & uri, bool allowRelative) std::string FlakeRef::to_string() const { std::string string; + bool first = true; - if (auto refData = std::get_if(&data)) + auto addParam = + [&](const std::string & name, std::string value) { + string += first ? '?' : '&'; + first = false; + string += name; + string += '='; + string += value; // FIXME: escaping + }; + + if (auto refData = std::get_if(&data)) { string = refData->alias; + if (ref) string += '/' + *ref; + if (rev) string += '/' + rev->to_string(Base16, false); + } + + else if (auto refData = std::get_if(&data)) { + assert(subdir == ""); + assert(!rev); + assert(!ref); + return refData->path; + } else if (auto refData = std::get_if(&data)) { assert(!(ref && rev)); string = "github:" + refData->owner + "/" + refData->repo; + if (ref) { string += '/'; string += *ref; } + if (rev) { string += '/'; string += rev->to_string(Base16, false); } + if (subdir != "") addParam("dir", subdir); } else if (auto refData = std::get_if(&data)) { assert(!rev || ref); string = refData->uri; + + if (ref) { + addParam("ref", *ref); + if (rev) + addParam("rev", rev->to_string(Base16, false)); + } + + if (subdir != "") addParam("dir", subdir); } - else if (auto refData = std::get_if(&data)) - return refData->path; - else abort(); - // FIXME: need to use ?rev etc. for IsGit URIs. - string += (ref ? "/" + *ref : "") + - (rev ? "/" + rev->to_string(Base16, false) : ""); - - if (subdir != "") string += "?dir=" + subdir; - return string; } From ddd42b7e948acc1b541741250c9a77e86aaada04 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Tue, 7 May 2019 23:20:42 +0200 Subject: [PATCH 111/634] Fix immutable flakeref construction We were appending ref/revs incorrectly for the IsGit case (by appending // rather than ?ref=&rev=size() != 42 || (*result.etag)[0] != '"' || (*result.etag)[41] != '"') throw Error("ETag header '%s' from '%s' is not a Git revision", *result.etag, url); - std::string rev = std::string(*result.etag, 1, result.etag->size() - 2); - const FlakeRef ref(resolvedRef.baseRef().to_string() + "/" + rev); + FlakeRef ref(resolvedRef.baseRef()); + ref.rev = Hash(std::string(*result.etag, 1, result.etag->size() - 2), htSHA1); SourceInfo info(ref); info.storePath = result.path; @@ -247,7 +247,9 @@ static SourceInfo fetchFlake(EvalState & state, const FlakeRef & flakeRef, bool // This downloads the entire git history else if (auto refData = std::get_if(&resolvedRef.data)) { auto gitInfo = exportGit(state.store, refData->uri, resolvedRef.ref, resolvedRef.rev, "source"); - const FlakeRef ref(resolvedRef.baseRef().to_string() + "/" + gitInfo.ref + "/" + gitInfo.rev.to_string(Base16, false)); + FlakeRef ref(resolvedRef.baseRef()); + ref.ref = gitInfo.ref; + ref.rev = gitInfo.rev; SourceInfo info(ref); info.storePath = gitInfo.storePath; info.revCount = gitInfo.revCount; @@ -258,7 +260,9 @@ static SourceInfo fetchFlake(EvalState & state, const FlakeRef & flakeRef, bool if (!pathExists(refData->path + "/.git")) throw Error("flake '%s' does not reference a Git repository", refData->path); auto gitInfo = exportGit(state.store, refData->path, {}, {}, "source"); - const FlakeRef ref(resolvedRef.baseRef().to_string() + "/" + gitInfo.ref + "/" + gitInfo.rev.to_string(Base16, false)); + FlakeRef ref(resolvedRef.baseRef()); + ref.ref = gitInfo.ref; + ref.rev = gitInfo.rev; SourceInfo info(ref); info.storePath = gitInfo.storePath; info.revCount = gitInfo.revCount; From 2d5a21968842c88b425e1a591bd413c484a470e7 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Tue, 7 May 2019 23:22:47 +0200 Subject: [PATCH 112/634] Add basic flake tests --- Makefile.config.in | 1 + configure.ac | 1 + release-common.nix | 1 + tests/{config.nix => config.nix.in} | 2 +- tests/flakes.sh | 117 ++++++++++++++++++++++++++++ tests/local.mk | 8 +- 6 files changed, 126 insertions(+), 4 deletions(-) rename tests/{config.nix => config.nix.in} (92%) create mode 100644 tests/flakes.sh diff --git a/Makefile.config.in b/Makefile.config.in index 59730b646..f2273eaed 100644 --- a/Makefile.config.in +++ b/Makefile.config.in @@ -35,6 +35,7 @@ prefix = @prefix@ sandbox_shell = @sandbox_shell@ storedir = @storedir@ sysconfdir = @sysconfdir@ +system = @system@ doc_generate = @doc_generate@ xmllint = @xmllint@ xsltproc = @xsltproc@ diff --git a/configure.ac b/configure.ac index f5b1614f1..8087f308b 100644 --- a/configure.ac +++ b/configure.ac @@ -129,6 +129,7 @@ NEED_PROG(gzip, gzip) NEED_PROG(xz, xz) AC_PATH_PROG(dot, dot) AC_PATH_PROG(lsof, lsof, lsof) +NEED_PROG(jq, jq) NEED_PROG(cat, cat) diff --git a/release-common.nix b/release-common.nix index 4c5565985..b745932a7 100644 --- a/release-common.nix +++ b/release-common.nix @@ -56,6 +56,7 @@ rec { # Tests git mercurial + jq ] ++ lib.optionals stdenv.isLinux [libseccomp utillinuxMinimal] ++ lib.optional (stdenv.isLinux || stdenv.isDarwin) libsodium diff --git a/tests/config.nix b/tests/config.nix.in similarity index 92% rename from tests/config.nix rename to tests/config.nix.in index 6ba91065b..ff5aeb31a 100644 --- a/tests/config.nix +++ b/tests/config.nix.in @@ -5,7 +5,7 @@ rec { path = coreutils; - system = builtins.currentSystem; + system = "@system@"; shared = builtins.getEnv "_NIX_TEST_SHARED"; diff --git a/tests/flakes.sh b/tests/flakes.sh new file mode 100644 index 000000000..26ffd6a9f --- /dev/null +++ b/tests/flakes.sh @@ -0,0 +1,117 @@ +source common.sh + +if [[ -z $(type -p git) ]]; then + echo "Git not installed; skipping flake tests" + exit 99 +fi + +clearStore + +registry=$TEST_ROOT/registry.json + +flake1=$TEST_ROOT/flake1 +flake2=$TEST_ROOT/flake2 +flake3=$TEST_ROOT/flake3 + +for repo in $flake1 $flake2 $flake3; do + rm -rf $repo + mkdir $repo + git -C $repo init + git -C $repo config user.email "foobar@example.com" + git -C $repo config user.name "Foobar" +done + +cat > $flake1/flake.nix < $flake2/flake.nix < $registry < Date: Wed, 8 May 2019 13:24:37 +0200 Subject: [PATCH 113/634] updateLockFile: Do "git add" in a slightly nicer way "--intent-to-add" ensures the change shows up in "git diff". --- src/libexpr/primops/flake.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/libexpr/primops/flake.cc b/src/libexpr/primops/flake.cc index 89267e2b1..b451afba4 100644 --- a/src/libexpr/primops/flake.cc +++ b/src/libexpr/primops/flake.cc @@ -417,7 +417,7 @@ void updateLockFile(EvalState & state, const FlakeUri & flakeUri) // Hack: Make sure that flake.lock is visible to Git. Otherwise, // exportGit will fail to copy it to the Nix store. runProgram("git", true, - { "-C", refData->path, "add", + { "-C", refData->path, "add", "--intent-to-add", (flakeRef.subdir == "" ? "" : flakeRef.subdir + "/") + "flake.lock" }); } else throw Error("flakeUri %s can't be updated because it is not a path", flakeUri); From 0d1c2e5baed2d75c02daa66962bc7863976978fa Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Wed, 8 May 2019 13:26:05 +0200 Subject: [PATCH 114/634] Add Hydra flake to the registry --- flake-registry.json | 3 +++ 1 file changed, 3 insertions(+) diff --git a/flake-registry.json b/flake-registry.json index ae94f1df2..c6a549f92 100644 --- a/flake-registry.json +++ b/flake-registry.json @@ -8,6 +8,9 @@ }, "nixpkgs": { "uri": "github:edolstra/nixpkgs/release-19.03" + }, + "hydra": { + "uri": "github:NixOS/hydra/flake" } }, "version": 1 From 77e1f9010c5328bee16d85661fe359746cb70e40 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Wed, 8 May 2019 13:38:32 +0200 Subject: [PATCH 115/634] Export missing rev/shortRev attributes --- src/libexpr/primops/flake.cc | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/src/libexpr/primops/flake.cc b/src/libexpr/primops/flake.cc index b451afba4..dc8df7f6f 100644 --- a/src/libexpr/primops/flake.cc +++ b/src/libexpr/primops/flake.cc @@ -441,6 +441,8 @@ void callFlake(EvalState & state, const ResolvedFlake & resFlake, Value & v) state.store->isValidPath(nonFlake.storePath); mkString(*state.allocAttr(*vNonFlake, state.sOutPath), nonFlake.storePath, {nonFlake.storePath}); + + // FIXME: add rev, shortRev, revCount, ... } mkString(*state.allocAttr(v, state.sDescription), resFlake.flake.description); @@ -449,6 +451,13 @@ void callFlake(EvalState & state, const ResolvedFlake & resFlake, Value & v) state.store->isValidPath(path); mkString(*state.allocAttr(v, state.sOutPath), path, {path}); + if (resFlake.flake.resolvedRef.rev) { + mkString(*state.allocAttr(v, state.symbols.create("rev")), + resFlake.flake.resolvedRef.rev->gitRev()); + mkString(*state.allocAttr(v, state.symbols.create("shortRev")), + resFlake.flake.resolvedRef.rev->gitShortRev()); + } + if (resFlake.flake.revCount) mkInt(*state.allocAttr(v, state.symbols.create("revCount")), *resFlake.flake.revCount); From a887892eb607d45de47e77aeb1a756f7672395b6 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Wed, 8 May 2019 14:17:32 +0200 Subject: [PATCH 116/634] nix-shell: Don't fail if run from a path containing the string "nix-shell" --- src/nix-build/nix-build.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/nix-build/nix-build.cc b/src/nix-build/nix-build.cc index 618895d38..5b8e816d8 100755 --- a/src/nix-build/nix-build.cc +++ b/src/nix-build/nix-build.cc @@ -106,7 +106,7 @@ static void _main(int argc, char * * argv) // Heuristic to see if we're invoked as a shebang script, namely, // if we have at least one argument, it's the name of an // executable file, and it starts with "#!". - if (runEnv && argc > 1 && !std::regex_search(argv[1], std::regex("nix-shell"))) { + if (runEnv && argc > 1 && !std::regex_search(baseNameOf(argv[1]), std::regex("nix-shell"))) { script = argv[1]; try { auto lines = tokenizeString(readFile(script), "\n"); From cb5ebc5c1120da8900ef074ed300685cc3177ae6 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Wed, 8 May 2019 17:07:35 +0200 Subject: [PATCH 117/634] nix dev-shell: Keep $TERM --- src/nix/shell.cc | 1 + 1 file changed, 1 insertion(+) diff --git a/src/nix/shell.cc b/src/nix/shell.cc index 0813d122c..8b9106171 100644 --- a/src/nix/shell.cc +++ b/src/nix/shell.cc @@ -133,6 +133,7 @@ struct Common : InstallableCommand "SHLVL", "TEMP", "TEMPDIR", + "TERM", "TMP", "TMPDIR", "TZ", From 455aa8d9ea55d3ea661b3c6f93e3ed5a43a82746 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Wed, 8 May 2019 18:20:35 +0200 Subject: [PATCH 118/634] Add newline at end of lockfile Suggested by @grahamc. --- flake.lock | 2 +- src/libexpr/primops/flake.cc | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/flake.lock b/flake.lock index 9848cb996..0a5f76db7 100644 --- a/flake.lock +++ b/flake.lock @@ -6,4 +6,4 @@ } }, "version": 1 -} \ No newline at end of file +} diff --git a/src/libexpr/primops/flake.cc b/src/libexpr/primops/flake.cc index 5e732b362..1d409105f 100644 --- a/src/libexpr/primops/flake.cc +++ b/src/libexpr/primops/flake.cc @@ -121,7 +121,7 @@ void writeLockFile(const LockFile & lockFile, const Path & path) for (auto & x : lockFile.flakeEntries) json["requires"][x.first.to_string()] = flakeEntryToJson(x.second); createDirs(dirOf(path)); - writeFile(path, json.dump(4)); // '4' = indentation in json file + writeFile(path, json.dump(4) + "\n"); // '4' = indentation in json file } std::shared_ptr getGlobalRegistry() From 8fc1c3f413c20b4519a41e38365aa71fa06a5aae Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Wed, 8 May 2019 22:08:38 +0200 Subject: [PATCH 119/634] Fix assertion failure in FlakeRef::to_string() Fixes $ nix build nix: src/libexpr/primops/flakeref.cc:169: std::__cxx11::string nix::FlakeRef::to_string() const: Assertion `!rev' failed. Aborted e.g. when flake.nix doesn't exist. Also use gitRev(). --- src/libexpr/primops/flakeref.cc | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/src/libexpr/primops/flakeref.cc b/src/libexpr/primops/flakeref.cc index 784a0868e..3842b3f1a 100644 --- a/src/libexpr/primops/flakeref.cc +++ b/src/libexpr/primops/flakeref.cc @@ -161,13 +161,13 @@ std::string FlakeRef::to_string() const if (auto refData = std::get_if(&data)) { string = refData->alias; if (ref) string += '/' + *ref; - if (rev) string += '/' + rev->to_string(Base16, false); + if (rev) string += '/' + rev->gitRev(); } else if (auto refData = std::get_if(&data)) { assert(subdir == ""); - assert(!rev); - assert(!ref); + if (ref) addParam("ref", *ref); + if (rev) addParam("rev", rev->gitRev()); return refData->path; } @@ -175,7 +175,7 @@ std::string FlakeRef::to_string() const assert(!(ref && rev)); string = "github:" + refData->owner + "/" + refData->repo; if (ref) { string += '/'; string += *ref; } - if (rev) { string += '/'; string += rev->to_string(Base16, false); } + if (rev) { string += '/'; string += rev->gitRev(); } if (subdir != "") addParam("dir", subdir); } @@ -186,7 +186,7 @@ std::string FlakeRef::to_string() const if (ref) { addParam("ref", *ref); if (rev) - addParam("rev", rev->to_string(Base16, false)); + addParam("rev", rev->gitRev()); } if (subdir != "") addParam("dir", subdir); From cead210e664785b25d7b5819133b74b4cc2c392b Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Wed, 8 May 2019 22:40:21 +0200 Subject: [PATCH 120/634] Add patchelf to the flake registry --- flake-registry.json | 3 +++ 1 file changed, 3 insertions(+) diff --git a/flake-registry.json b/flake-registry.json index c6a549f92..348bd010a 100644 --- a/flake-registry.json +++ b/flake-registry.json @@ -11,6 +11,9 @@ }, "hydra": { "uri": "github:NixOS/hydra/flake" + }, + "patchelf": { + "uri": "github:NixOS/patchelf" } }, "version": 1 From a746dc64d2fa45456817c6ef8a4f82c6217e0392 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Thu, 9 May 2019 13:55:33 +0200 Subject: [PATCH 121/634] Add currently failing test for #81 --- tests/flakes.sh | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/tests/flakes.sh b/tests/flakes.sh index 26ffd6a9f..da952d552 100644 --- a/tests/flakes.sh +++ b/tests/flakes.sh @@ -110,6 +110,10 @@ nix build -o $TEST_ROOT/result --flake-registry $registry $flake2:bar [[ -e $flake2/flake.lock ]] git -C $flake2 commit flake.lock -m 'Add flake.lock' +# Rerunning the build should not change the lockfile. +nix build -o $TEST_ROOT/result --flake-registry $registry $flake2:bar +[[ -z $(git -C $flake2 diff) ]] + # Now we should be able to build the flake in pure mode. nix build -o $TEST_ROOT/result --flake-registry $registry flake2:bar From 391e1f511d90461dcdd52e495e2596bbd2ea3c5a Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Thu, 9 May 2019 13:59:50 +0200 Subject: [PATCH 122/634] Add test for indirect flake dependencies I.e. flake3 depends on flake2 which depends on flake1. Currently this fails with error: indirect flake reference 'flake1' is not allowed because we're not propagating lockfiles downwards properly. --- tests/flakes.sh | 22 ++++++++++++++++++++++ 1 file changed, 22 insertions(+) diff --git a/tests/flakes.sh b/tests/flakes.sh index da952d552..7b827a418 100644 --- a/tests/flakes.sh +++ b/tests/flakes.sh @@ -59,6 +59,25 @@ EOF git -C $flake2 add flake.nix git -C $flake2 commit -m 'Initial' +cat > $flake3/flake.nix < $registry < Date: Wed, 1 May 2019 17:01:03 +0200 Subject: [PATCH 123/634] Fixed issue #47 Content hashes --- src/libexpr/primops/flake.cc | 31 +++++++++++++++++++++---------- src/libexpr/primops/flake.hh | 17 +++++++++++++---- 2 files changed, 34 insertions(+), 14 deletions(-) diff --git a/src/libexpr/primops/flake.cc b/src/libexpr/primops/flake.cc index 1d409105f..e9896d665 100644 --- a/src/libexpr/primops/flake.cc +++ b/src/libexpr/primops/flake.cc @@ -50,7 +50,8 @@ LockFile::FlakeEntry readFlakeEntry(nlohmann::json json) if (!flakeRef.isImmutable()) throw Error("cannot use mutable flake '%s' in pure mode", flakeRef); - LockFile::FlakeEntry entry(flakeRef); + Hash hash = Hash((std::string) json["contentHash"]); + LockFile::FlakeEntry entry(flakeRef, hash); auto nonFlakeRequires = json["nonFlakeRequires"]; @@ -58,7 +59,9 @@ LockFile::FlakeEntry readFlakeEntry(nlohmann::json json) FlakeRef flakeRef(i->value("uri", "")); if (!flakeRef.isImmutable()) throw Error("requested to fetch FlakeRef '%s' purely, which is mutable", flakeRef); - entry.nonFlakeEntries.insert_or_assign(i.key(), flakeRef); + Hash hash = Hash((std::string) i->value("contentHash", "")); + LockFile::NonFlakeEntry newEntry(flakeRef, hash); + entry.nonFlakeEntries.insert_or_assign(i.key(), newEntry); } auto requires = json["requires"]; @@ -86,9 +89,10 @@ LockFile readLockFile(const Path & path) for (auto i = nonFlakeRequires.begin(); i != nonFlakeRequires.end(); ++i) { FlakeRef flakeRef(i->value("uri", "")); + LockFile::NonFlakeEntry entry(flakeRef, Hash((std::string) json["contentHash"])); if (!flakeRef.isImmutable()) throw Error("requested to fetch FlakeRef '%s' purely, which is mutable", flakeRef); - lockFile.nonFlakeEntries.insert_or_assign(i.key(), flakeRef); + lockFile.nonFlakeEntries.insert_or_assign(i.key(), entry); } auto requires = json["requires"]; @@ -103,8 +107,11 @@ nlohmann::json flakeEntryToJson(const LockFile::FlakeEntry & entry) { nlohmann::json json; json["uri"] = entry.ref.to_string(); - for (auto & x : entry.nonFlakeEntries) - json["nonFlakeRequires"][x.first]["uri"] = x.second.to_string(); + json["contentHash"] = entry.contentHash.to_string(SRI); + for (auto & x : entry.nonFlakeEntries) { + json["nonFlakeRequires"][x.first]["uri"] = x.second.ref.to_string(); + json["nonFlakeRequires"][x.first]["contentHash"] = x.second.contentHash.to_string(SRI); + } for (auto & x : entry.flakeEntries) json["requires"][x.first.to_string()] = flakeEntryToJson(x.second); return json; @@ -115,8 +122,10 @@ void writeLockFile(const LockFile & lockFile, const Path & path) nlohmann::json json; json["version"] = 1; json["nonFlakeRequires"] = nlohmann::json::object(); - for (auto & x : lockFile.nonFlakeEntries) - json["nonFlakeRequires"][x.first]["uri"] = x.second.to_string(); + for (auto & x : lockFile.nonFlakeEntries) { + json["nonFlakeRequires"][x.first]["uri"] = x.second.ref.to_string(); + json["nonFlakeRequires"][x.first]["contentHash"] = x.second.contentHash.to_string(SRI); + } json["requires"] = nlohmann::json::object(); for (auto & x : lockFile.flakeEntries) json["requires"][x.first.to_string()] = flakeEntryToJson(x.second); @@ -293,6 +302,7 @@ Flake getFlake(EvalState & state, const FlakeRef & flakeRef, bool impureIsAllowe throw Error("flake file '%s' escapes from '%s'", resolvedRef, sourceInfo.storePath); Flake flake(flakeRef, sourceInfo); + flake.hash = state.store->queryPathInfo(sourceInfo.storePath)->narHash; if (!pathExists(flakeFile)) throw Error("source tree referenced by '%s' does not contain a '%s/flake.nix' file", resolvedRef, resolvedRef.subdir); @@ -346,12 +356,13 @@ NonFlake getNonFlake(EvalState & state, const FlakeRef & flakeRef, FlakeAlias al NonFlake nonFlake(flakeRef, sourceInfo); - nonFlake.storePath = sourceInfo.storePath; state.store->assertStorePath(nonFlake.storePath); if (state.allowedPaths) state.allowedPaths->insert(nonFlake.storePath); + nonFlake.hash = state.store->queryPathInfo(sourceInfo.storePath)->narHash; + nonFlake.alias = alias; return nonFlake; @@ -388,13 +399,13 @@ ResolvedFlake resolveFlake(EvalState & state, const FlakeRef & topRef, LockFile::FlakeEntry dependenciesToFlakeEntry(const ResolvedFlake & resolvedFlake) { - LockFile::FlakeEntry entry(resolvedFlake.flake.resolvedRef); + LockFile::FlakeEntry entry(resolvedFlake.flake.resolvedRef, resolvedFlake.flake.hash); for (auto & newResFlake : resolvedFlake.flakeDeps) entry.flakeEntries.insert_or_assign(newResFlake.flake.originalRef, dependenciesToFlakeEntry(newResFlake)); for (auto & nonFlake : resolvedFlake.nonFlakeDeps) - entry.nonFlakeEntries.insert_or_assign(nonFlake.alias, nonFlake.resolvedRef); + entry.nonFlakeEntries.insert_or_assign(nonFlake.alias, LockFile::NonFlakeEntry(nonFlake.resolvedRef, nonFlake.hash)); return entry; } diff --git a/src/libexpr/primops/flake.hh b/src/libexpr/primops/flake.hh index f3bf76cc7..691f68b0a 100644 --- a/src/libexpr/primops/flake.hh +++ b/src/libexpr/primops/flake.hh @@ -19,16 +19,24 @@ struct FlakeRegistry struct LockFile { + struct NonFlakeEntry + { + FlakeRef ref; + Hash contentHash; + NonFlakeEntry(const FlakeRef & flakeRef, const Hash & hash) : ref(flakeRef), contentHash(hash) {}; + }; + struct FlakeEntry { FlakeRef ref; + Hash contentHash; std::map flakeEntries; - std::map nonFlakeEntries; - FlakeEntry(const FlakeRef & flakeRef) : ref(flakeRef) {}; + std::map nonFlakeEntries; + FlakeEntry(const FlakeRef & flakeRef, const Hash & hash) : ref(flakeRef), contentHash(hash) {}; }; std::map flakeEntries; - std::map nonFlakeEntries; + std::map nonFlakeEntries; }; typedef std::vector> Registries; @@ -60,6 +68,7 @@ struct Flake std::string description; std::optional revCount; Path storePath; + Hash hash; // content hash std::vector requires; std::map nonFlakeRequires; Value * vProvides; // FIXME: gc @@ -76,8 +85,8 @@ struct NonFlake FlakeRef resolvedRef; std::optional revCount; Path storePath; + Hash hash; // content hash // date - // content hash NonFlake(const FlakeRef & origRef, const SourceInfo & sourceInfo) : originalRef(origRef), resolvedRef(sourceInfo.resolvedRef), revCount(sourceInfo.revCount), storePath(sourceInfo.storePath) {}; }; From 68b17ef731570db1d048e26a9a9ab8b8c3f09e0c Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Thu, 9 May 2019 15:08:09 +0200 Subject: [PATCH 124/634] Update flake.lock --- flake.lock | 1 + 1 file changed, 1 insertion(+) diff --git a/flake.lock b/flake.lock index 0a5f76db7..965d038ce 100644 --- a/flake.lock +++ b/flake.lock @@ -2,6 +2,7 @@ "nonFlakeRequires": {}, "requires": { "nixpkgs": { + "contentHash": "sha256-vy2UmXQM66aS/Kn2tCtjt9RwxfBvV+nQVb5tJQFwi8E=", "uri": "github:edolstra/nixpkgs/a4d896e89932e873c4117908d558db6210fa3b56" } }, From 5a80cccc7075de151c79a818bad758e217cf1e9f Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Sat, 11 May 2019 01:48:33 +0200 Subject: [PATCH 125/634] Hack to fix tests.evalNixpkgs --- release.nix | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/release.nix b/release.nix index a47ca862f..f98e6d6ed 100644 --- a/release.nix +++ b/release.nix @@ -268,7 +268,8 @@ let tests.evalNixpkgs = import (nixpkgs + "/pkgs/top-level/make-tarball.nix") { - inherit nixpkgs; + # FIXME: fix pkgs/top-level/make-tarball.nix in NixOS to not require a revCount. + nixpkgs = nixpkgs // { revCount = 0; }; inherit pkgs; nix = build.x86_64-linux; officialRelease = false; From 156e3a9daacce48f2f53939dee92dd063dfc507a Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Sat, 11 May 2019 01:50:28 +0200 Subject: [PATCH 126/634] nix dev-shell: Ignore SSL_CERT_FILE --- src/nix/shell.cc | 1 + 1 file changed, 1 insertion(+) diff --git a/src/nix/shell.cc b/src/nix/shell.cc index 8b9106171..2ccad930f 100644 --- a/src/nix/shell.cc +++ b/src/nix/shell.cc @@ -131,6 +131,7 @@ struct Common : InstallableCommand "PWD", "SHELLOPTS", "SHLVL", + "SSL_CERT_FILE", // FIXME: only want to ignore /no-cert-file.crt "TEMP", "TEMPDIR", "TERM", From 4c9ebd20d7d16501aaceecf584c15499f720bf65 Mon Sep 17 00:00:00 2001 From: Nick Van den Broeck Date: Wed, 15 May 2019 08:04:21 +0200 Subject: [PATCH 127/634] One FIXME was already fixed --- src/libexpr/primops/flake.cc | 1 - 1 file changed, 1 deletion(-) diff --git a/src/libexpr/primops/flake.cc b/src/libexpr/primops/flake.cc index e9896d665..8dd9599bd 100644 --- a/src/libexpr/primops/flake.cc +++ b/src/libexpr/primops/flake.cc @@ -312,7 +312,6 @@ Flake getFlake(EvalState & state, const FlakeRef & flakeRef, bool impureIsAllowe state.forceAttrs(vInfo); - // FIXME: change to "id"? if (auto name = vInfo.attrs->get(state.sName)) flake.id = state.forceStringNoCtx(*(**name).value, *(**name).pos); else From 696a98af5aa0f34b35fd2bbb714842495104ed13 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Wed, 15 May 2019 09:02:48 +0200 Subject: [PATCH 128/634] Disable tests introduced by PR #82 This PR was not intended to be merged until those tests were actually passing. So disable them for now to unbreak the flakes branch. https://hydra.nixos.org/eval/1519271 --- tests/flakes.sh | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tests/flakes.sh b/tests/flakes.sh index 7b827a418..40ba42715 100644 --- a/tests/flakes.sh +++ b/tests/flakes.sh @@ -130,8 +130,8 @@ nix build -o $TEST_ROOT/result --flake-registry $registry $flake2:bar git -C $flake2 commit flake.lock -m 'Add flake.lock' # Rerunning the build should not change the lockfile. -nix build -o $TEST_ROOT/result --flake-registry $registry $flake2:bar -[[ -z $(git -C $flake2 diff) ]] +#nix build -o $TEST_ROOT/result --flake-registry $registry $flake2:bar +#[[ -z $(git -C $flake2 diff) ]] # Now we should be able to build the flake in pure mode. nix build -o $TEST_ROOT/result --flake-registry $registry flake2:bar @@ -140,4 +140,4 @@ nix build -o $TEST_ROOT/result --flake-registry $registry flake2:bar nix build -o $TEST_ROOT/result file://$flake2:bar # Test whether indirect dependencies work. -nix build -o $TEST_ROOT/result --flake-registry $registry $flake3:xyzzy +#nix build -o $TEST_ROOT/result --flake-registry $registry $flake3:xyzzy From 5c34d665386f4053d666b0899ecca0639e500fbd Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Wed, 15 May 2019 15:38:24 +0200 Subject: [PATCH 129/634] Make flakes work with 'nix build --store ...' It was getting confused between logical and real store paths. Also, make fetchGit and fetchMercurial update allowedPaths properly. (Maybe the evaluator, rather than the caller of the evaluator, should apply toRealPath(), but that's a bigger change.) --- src/libexpr/primops/fetchGit.cc | 2 +- src/libexpr/primops/fetchMercurial.cc | 2 +- src/libexpr/primops/flake.cc | 13 +++++++------ src/libstore/download.cc | 2 ++ src/libstore/download.hh | 3 +++ 5 files changed, 14 insertions(+), 8 deletions(-) diff --git a/src/libexpr/primops/fetchGit.cc b/src/libexpr/primops/fetchGit.cc index e79eacafe..00bbeb6d8 100644 --- a/src/libexpr/primops/fetchGit.cc +++ b/src/libexpr/primops/fetchGit.cc @@ -259,7 +259,7 @@ static void prim_fetchGit(EvalState & state, const Pos & pos, Value * * args, Va v.attrs->sort(); if (state.allowedPaths) - state.allowedPaths->insert(gitInfo.storePath); + state.allowedPaths->insert(state.store->toRealPath(gitInfo.storePath)); } static RegisterPrimOp r("fetchGit", 1, prim_fetchGit); diff --git a/src/libexpr/primops/fetchMercurial.cc b/src/libexpr/primops/fetchMercurial.cc index cfe1bd871..596047ce3 100644 --- a/src/libexpr/primops/fetchMercurial.cc +++ b/src/libexpr/primops/fetchMercurial.cc @@ -214,7 +214,7 @@ static void prim_fetchMercurial(EvalState & state, const Pos & pos, Value * * ar v.attrs->sort(); if (state.allowedPaths) - state.allowedPaths->insert(hgInfo.storePath); + state.allowedPaths->insert(state.store->toRealPath(hgInfo.storePath)); } static RegisterPrimOp r("fetchMercurial", 1, prim_fetchMercurial); diff --git a/src/libexpr/primops/flake.cc b/src/libexpr/primops/flake.cc index 8dd9599bd..c73487585 100644 --- a/src/libexpr/primops/flake.cc +++ b/src/libexpr/primops/flake.cc @@ -248,7 +248,7 @@ static SourceInfo fetchFlake(EvalState & state, const FlakeRef & flakeRef, bool FlakeRef ref(resolvedRef.baseRef()); ref.rev = Hash(std::string(*result.etag, 1, result.etag->size() - 2), htSHA1); SourceInfo info(ref); - info.storePath = result.path; + info.storePath = result.storePath; return info; } @@ -294,21 +294,22 @@ Flake getFlake(EvalState & state, const FlakeRef & flakeRef, bool impureIsAllowe state.store->assertStorePath(sourceInfo.storePath); if (state.allowedPaths) - state.allowedPaths->insert(sourceInfo.storePath); + state.allowedPaths->insert(state.store->toRealPath(sourceInfo.storePath)); // Guard against symlink attacks. Path flakeFile = canonPath(sourceInfo.storePath + "/" + resolvedRef.subdir + "/flake.nix"); - if (!isInDir(flakeFile, sourceInfo.storePath)) - throw Error("flake file '%s' escapes from '%s'", resolvedRef, sourceInfo.storePath); + Path realFlakeFile = state.store->toRealPath(flakeFile); + if (!isInDir(realFlakeFile, state.store->toRealPath(sourceInfo.storePath))) + throw Error("'flake.nix' file of flake '%s' escapes from '%s'", resolvedRef, sourceInfo.storePath); Flake flake(flakeRef, sourceInfo); flake.hash = state.store->queryPathInfo(sourceInfo.storePath)->narHash; - if (!pathExists(flakeFile)) + if (!pathExists(realFlakeFile)) throw Error("source tree referenced by '%s' does not contain a '%s/flake.nix' file", resolvedRef, resolvedRef.subdir); Value vInfo; - state.evalFile(flakeFile, vInfo); // FIXME: symlink attack + state.evalFile(realFlakeFile, vInfo); // FIXME: symlink attack state.forceAttrs(vInfo); diff --git a/src/libstore/download.cc b/src/libstore/download.cc index cb77cdc77..975cfd97d 100644 --- a/src/libstore/download.cc +++ b/src/libstore/download.cc @@ -804,6 +804,7 @@ CachedDownloadResult Downloader::downloadCached(ref store, const string & expectedStorePath = store->makeFixedOutputPath(unpack, expectedHash, name); if (store->isValidPath(expectedStorePath)) { CachedDownloadResult result; + result.storePath = expectedStorePath; result.path = store->toRealPath(expectedStorePath); return result; } @@ -912,6 +913,7 @@ CachedDownloadResult Downloader::downloadCached(ref store, const string & url, expectedHash.to_string(), gotHash.to_string()); } + result.storePath = storePath; result.path = store->toRealPath(storePath); return result; } diff --git a/src/libstore/download.hh b/src/libstore/download.hh index 8acfe4e1a..aa8c34be2 100644 --- a/src/libstore/download.hh +++ b/src/libstore/download.hh @@ -43,6 +43,9 @@ struct DownloadResult struct CachedDownloadResult { + // Note: 'storePath' may be different from 'path' when using a + // chroot store. + Path storePath; Path path; std::optional etag; }; From b0fc5bcee9f74c717d8ca564c193a5ad7846e5c7 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Wed, 15 May 2019 20:58:47 +0200 Subject: [PATCH 130/634] Start of flake documentation Imported from https://gist.github.com/edolstra/40da6e3a4d4ee8fd019395365e0772e7. The goal is to turn this into an RFC eventually and later to integrate it into the manual. --- doc/flakes/design.md | 566 +++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 566 insertions(+) create mode 100644 doc/flakes/design.md diff --git a/doc/flakes/design.md b/doc/flakes/design.md new file mode 100644 index 000000000..ebc229b09 --- /dev/null +++ b/doc/flakes/design.md @@ -0,0 +1,566 @@ +# Nix Flake MVP + +## Goals + +* To provide Nix repositories with an easy and standard way to + reference other Nix repositories. + +* To allow such references to be queried and updated automatically. + +* To provide a replacement for `nix-channel`, `NIX_PATH` and Hydra + jobset definitions. + +* To enable reproducible, hermetic evaluation of packages and NixOS + configurations. + +Things that we probably won't do in the initial iteration: + +* Sophisticated flake versioning, such as the ability to specify + version ranges on dependencies. + +* A way to specify the types of values provided by a flake. For the + most part, flakes can provide arbitrary Nix values, but there will + be some standard attribute names (e.g. `packages` must be a set of + installable derivations). + + +## Overview + +* A flake is (usually) a Git repository that contains a file named + `flake.nix` at top-level. + +* Flakes *provide* an attribute set of values, such as packages, + Nixpkgs overlays, NixOS modules, library functions, Hydra jobs, + `nix-shell` definitions, etc. + +* Flakes can *depend* on other flakes. + +* Flakes are referred to using a *flake reference*, which is either a + URL specifying its repository's location + (e.g. `github:NixOS/nixpkgs/release-18.09`) or an identifier + (e.g. `nixpkgs`) looked up in a *lock file* or *flake + registry*. They can also specify revisions, + e.g. `github:NixOS/nixpkgs/98a2a5b5370c1e2092d09cb38b9dcff6d98a109f`. + +* The *flake registry* is a centrally maintained mapping (on + `nixos.org`) from flake identifiers to flake locations + (e.g. `nixpkgs -> github:NixOS/nixpkgs/release-18.09`). + +* A flake can contain a *lock file* (`flake.lock`) used when resolving + the dependencies in `flake.nix`. It maps flake references to + references containing revisions (e.g. `nixpkgs -> + github:NixOS/nixpkgs/98a2a5b5370c1e2092d09cb38b9dcff6d98a109f`). + +* The `nix` command uses the flake registry as its default + installation source. For example, `nix build nixpkgs.hello` builds the + `hello` package provided by the `nixpkgs` flake listed in the + registry. `nix` will automatically download/upload the registry and + flakes as needed. + +* `nix build` without arguments will build the flake in the current + directory (or some parent). + +* The command `nix flake update` generates/updates `flake.lock` from + `flake.nix`. This should probably also be done automatically when + building from a local flake. + +* `nixos-rebuild` will build a configuration from a (locked) + flake. Evaluation will be done in pure mode to ensure there are no + unaccounted inputs. Thus the NixOS configuration can be reproduced + unambiguously from the top-level flake. + +* Nix code can query flake metadata such as `commitHash` (the Git + revision) or `date` (the date of the last commit). This is useful + for NixOS to compute the NixOS version string (which will be the + revision of the top-level configuration flake, uniquely identifying + the configuration). + +* Hydra jobset configurations will consist of a single flake + reference. Thus we can get rid of jobset inputs; any other needed + repositories can be fetched by the top-level flake. The top-level + flake can be locked or unlocked; if some dependencies are unlocked, + then Nix will fetch the latest revision for each. + + +## Example flake + +A flake is a Git repository that contains a file named +`flake.nix`. For example, here is the `flake.nix` for `dwarffs`, a +small repository that provides a single package and a single NixOS +module. + +```nix +{ + # The flake identifier. + name = "dwarffs"; + + # The epoch may be used in the future to determine how Nix + # expressions inside this flake are to be parsed. + epoch = 2018; + + # Some other metadata. + description = "A filesystem that fetches DWARF debug info from the Internet on demand"; + + # A list of flake references denoting the flakes that this flake + # depends on. Nix will resolve and fetch these flakes and pass them + # as a function argument to `provides` below. + # + # `flake:nixpkgs` denotes a flake named `nixpkgs` which is looked up + # in the flake registry, or in `flake.lock` inside this flake, if it + # exists. + requires = [ flake:nixpkgs ]; + + # The stuff provided by this flake. Flakes can provide whatever they + # want (convention over configuration), but some attributes have + # special meaning to tools / other flakes: for example, `packages` + # is used by the `nix` CLI to search for packages, and + # `nixosModules` is used by NixOS to automatically pull in the + # modules provided by a flake. + # + # `provides` takes a single argument named `deps` that contains + # the resolved set of flakes. (See below.) + provides = deps: { + + # This is searched by `nix`, so something like `nix install + # dwarffs.dwarffs` resolves to this `packages.dwarffs`. + packages.dwarffs = + with deps.nixpkgs.packages; + with deps.nixpkgs.builders; + with deps.nixpkgs.lib; + + stdenv.mkDerivation { + name = "dwarffs-0.1"; + + buildInputs = [ fuse nix nlohmann_json boost ]; + + NIX_CFLAGS_COMPILE = "-I ${nix.dev}/include/nix -include ${nix.dev}/include/nix/config.h -D_FILE_OFFSET_BITS=64"; + + src = cleanSource ./.; + + installPhase = + '' + mkdir -p $out/bin $out/lib/systemd/system + + cp dwarffs $out/bin/ + ln -s dwarffs $out/bin/mount.fuse.dwarffs + + cp ${./run-dwarffs.mount} $out/lib/systemd/system/run-dwarffs.mount + cp ${./run-dwarffs.automount} $out/lib/systemd/system/run-dwarffs.automount + ''; + }; + + # NixOS modules. + nixosModules.dwarffs = import ./module.nix deps; + + # Provide a single Hydra job (`hydraJobs.dwarffs`). + hydraJobs = deps.this.packages; + }; +} +``` + +Similarly, a minimal `flake.nix` for Nixpkgs: + +```nix +{ + name = "nixpkgs"; + + epoch = 2018; + + description = "A collection of packages for the Nix package manager"; + + provides = deps: + let pkgs = import ./. {}; in + { + lib = import ./lib; + + builders = { + inherit (pkgs) stdenv fetchurl; + }; + + packages = { + inherit (pkgs) hello nix fuse nlohmann_json boost; + }; + }; +} +``` +Note that `packages` is an unpolluted set of packages: non-package +values like `lib` or `fetchurl` are not part of it. + + +## Flake identifiers + +A flake has an identifier (e.g. `nixpkgs` or `dwarffs`). + + +## Flake references + +Flake references are a URI-like syntax to specify the physical +location of a flake (e.g. a Git repository) or to denote a lookup in +the flake registry or lock file. + +* `(flake:)?(/rev-or-ref(/rev)?)?` + + Look up a flake by ID in the flake lock file or in the flake + registry. These must specify an actual location for the flake using + the formats listed below. Note that in pure evaluation mode, the + flake registry is empty. + + Optionally, the `rev` or `ref` from the dereferenced flake can be + overriden. For example, + + > nixpkgs/19.09 + + uses the `19.09` branch of the `nixpkgs` flake's GitHub repository, + while + + > nixpkgs/98a2a5b5370c1e2092d09cb38b9dcff6d98a109f + + uses the specified revision. For Git (rather than GitHub) + repositories, both the rev and ref must be given, e.g. + + > nixpkgs/19.09/98a2a5b5370c1e2092d09cb38b9dcff6d98a109f + +* `github:/(/)?` + + A repository on GitHub. These differ from Git references in that + they're downloaded in a efficient way (via the tarball mechanism) + and that they support downloading a specific revision without + specifying a branch. `rev-or-ref` is either a commit hash (`rev`) + or a branch or tag name (`ref`). The default is `master` if none is + specified. Note that in pure evaluation mode, a commit hash must be + used. + + Flakes fetched in this manner expose `rev` and `date` attributes, + but not `revCount`. + + Examples: + + > github:edolstra/dwarffs + + > github:edolstra/dwarffs/unstable + + > github:edolstra/dwarffs/41c0c1bf292ea3ac3858ff393b49ca1123dbd553 + +* > https:///.git(\?attr(&attr)*)? + + > ssh:///.git(\?attr(&attr)*)? + + > git:///.git(\?attr(&attr)*)? + + > file:///(\?attr(&attr)*)? + + where `attr` is one of `rev=` or `ref=`. + + A Git repository fetched through https. Note that the path must end + in `.git`. The default for `ref` is `master`. + + Examples: + + > https://example.org/my/repo.git + > https://example.org/my/repo.git?ref=release-1.2.3 + > https://example.org/my/repo.git?rev=e72daba8250068216d79d2aeef40d4d95aff6666 + +* > /path.git(\?attr(&attr)*)? + + Like `file://path.git`, but if no `ref` or `rev` is specified, the + (possibly dirty) working tree will be used. Using a working tree is + not allowed in pure evaluation mode. + + Examples: + + > /path/to/my/repo + + > /path/to/my/repo?ref=develop + + > /path/to/my/repo?rev=e72daba8250068216d79d2aeef40d4d95aff6666 + +* > https:///.tar.xz(?hash=) + + > file:///.tar.xz(?hash=) + + A flake distributed as a tarball. In pure evaluation mode, an SRI + hash is mandatory. It exposes a `date` attribute, being the newest + file inside the tarball. + + Example: + + > https://releases.nixos.org/nixos/unstable/nixos-19.03pre167858.f2a1a4e93be/nixexprs.tar.xz + + > https://releases.nixos.org/nixos/unstable/nixos-19.03pre167858.f2a1a4e93be/nixexprs.tar.xz?hash=sha256-56bbc099995ea8581ead78f22832fee7dbcb0a0b6319293d8c2d0aef5379397c + +Note: currently, there can be only one flake per Git repository, and +it must be at top-level. In the future, we may want to add a field +(e.g. `dir=`) to specify a subdirectory inside the repository. + + +## Flake lock files + +This is a JSON file named `flake.lock` that maps flake identifiers +used in the corresponding `flake.nix` to "immutable" flake references; +that is, flake references that contain a revision (for Git +repositories) or a content hash (for tarballs). + +Example: + +```json +{ + "nixpkgs": "github:NixOS/nixpkgs/41c0c1bf292ea3ac3858ff393b49ca1123dbd553", + "foo": "https://example.org/foo.tar.xz?hash=sha256-56bbc099995ea8581ead78f22832fee7dbcb0a0b6319293d8c2d0aef5379397c" +} +``` + + +## `provides` + +The flake attribute `provides` is a function that takes an argument +named `deps` and returns a (mostly) arbitrary attrset of values. Some +of the standard result attributes: + +* `packages`: A set of installable derivations used by the `nix` + command. That is, commands such as `nix install` ignore all other + flake attributes. + +* `hydraJobs`: Used by Hydra. + +* `nixosModules`: An attrset of NixOS modules. + +* `nixosSystems`: An attrset of calls to `evalModules`, i.e. things + that `nixos-rebuild` can switch to. (Maybe this is superfluous, but + we need to avoid a situation where `nixos-rebuild` needs to fetch + its own `nixpkgs` just to do `evalModules`.) + +* `shell`: A specification of a development environment in some TBD + format. + +The function argument `flakes` is an attrset that contains an +attribute for each dependency specified in `requires`. (Should it +contain transitive dependencies? Probably not.) Each attribute is an +attrset containing the `provides` of the dependency, in addition to +the following attributes: + +* `path`: The path to the flake's source code. Useful when you want to + use non-Nix artifacts from the flake, or if you want to *store* the + source code of the dependency in a derivation. (For example, we + could store the sources of all flake dependencies in a NixOS system + configuration, as a generalization of + `system.copySystemConfiguration`.) + +* `meta`: An attrset containing the following: + + * `description` + + * `commitHash` (or `rev`?) (not for tarball flakes): The Git commit + hash. + + * `date`: The timestamp of the most recent commit (for Git + repositories), or the timestamp of the most recently modified file + (for tarballs). + + * `revCount` (for Git flakes, but not GitHub flakes): The number of + ancestors of the revision. Useful for generating version strings. + + +## Non-flake dependencies + +It may be useful to pull in repositories that are not flakes +(i.e. don't contain a `flake.nix`). This could be done in two ways: + +* Allow flakes not to have a `flake.nix` file, in which case it's a + flake with no requires and no provides. The downside of this + approach is that we can't detect accidental use of a non-flake + repository. (Also, we need to conjure up an identifier somehow.) + +* Add a flake attribute to specifiy non-flake dependencies, e.g. + + > nonFlakeRequires.foobar = github:foo/bar; + + +## Flake registry + +The flake registry maps flake IDs to flake references (where the +latter cannot be another indirection, i.e. it must not be a +`flake:` reference). + +The default registry is kept at +`https://nixos.org/flake-registry.json`. It looks like this: + +```json +{ + "version": 1, + "flakes": { + "dwarffs": { + "uri": "github:edolstra/dwarffs/flake" + }, + "nixpkgs": { + "uri": "github:NixOS/nixpkgs/release-18.09" + } + } +} +``` + +Nix automatically (re)downloads the registry. The downloaded file is a +GC root so the registry remains available if nixos.org is unreachable. +TBD: when to redownload? + + +## Nix UI + +Commands for registry / user flake configuration: + +* `nix flake list`: Show all flakes in the registry. + +* `nix flake add `: Add or override a flake to/in the + user's flake configuration (`~/.config/nix/flakes.nix`). For + example, `nix flake add nixpkgs/nixos-18.03` overrides the `nixpkgs` + flake to use the `nixos-18.03` branch. There should also be a way to + add multiple branches/revisions of the same flake by giving them a + different ID, e.g. `nix flake add --id nixpkgs-ancient + nixpkgs/nixos-16.03`). + +* `nix flake remove `: Remove a flake from the user's flake + configuration. Any flake with the same ID in the registry remains + available. + +* `nix flake lock `: Lock a flake. For example, `nix flake + lock nixpkgs` pins `nixpkgs` to the current revision. + +Commands for creating/modifying a flake: + +* `nix flake init`: Create a `flake.nix` in the current directory. + +* `nix flake update`: Update the lock file for the `flake.nix` in the + current directory. In most cases, this should be done + automatically. (E.g. `nix build` should automatically update the + lock file is a new dependency is added to `flake.nix`.) + +* `nix flake check`: Do some checks on the flake, e.g. check that all + `packages` are really packages. + +* `nix flake clone`: Do a Git clone of the flake repository. This is a + convenience to easily start hacking on a flake. E.g. `nix flake + clone dwarffs` clones the `dwarffs` GitHub repository to `./dwarffs`. + +TODO: maybe the first set of commands should have a different name +from the second set. + +Flags / configuration options: + +* `--flakes (=)*`: add/override some flakes. + +* (In `nix`) `--flake `: set the specified flake as the + installation source. E.g. `nix build --flake ./my-nixpkgs hello`. + +The default installation source in `nix` is the `packages` from all +flakes in the registry, that is: +``` +builtins.mapAttrs (flakeName: flakeInfo: + (getFlake flakeInfo.uri).${flakeName}.provides.packages or {}) + builtins.flakeRegistry +``` +(where `builtins.flakeRegistry` is the global registry with user +overrides applied, and `builtins.getFlake` downloads a flake and +resolves its dependencies.) + +It may be nice to extend the default installation source with the +`packages` from the flake in the current directory, so that + +> nix build hello + +does something similar to the old + +> nix-build -A hello + +Specifically, it builds `packages.hello` from the flake in the current +directory. Of course, this creates some ambiguity if there is a flake +in the registry named `hello`. + +Maybe the command + +> nix shell + +should do something like use `provides.shell` to initialize the shell, +but probably we should ditch `nix shell` / `nix-shell` for direnv. + + +## Pure evaluation and caching + +Flake evaluation should be done in pure mode. Thus: + +* Flakes cannot do `NIX_PATH` lookups via the `<...>` syntax. + +* They can't read random stuff from non-flake directories, such as + `~/.nix/config.nix` or overlays. + +This enables aggressive caching or precomputation of Nixpkgs package +sets. For example, for a particular Nixpkgs flake closure (as +identified by, say, a hash of the fully-qualified flake references +after dependency resolution) and system type, an attribute like +`packages.hello` should always evaluate to the same derivation. So we +can: + +* Keep a local evaluation cache (say `~/.cache/nix/eval.sqlite`) + mapping `() -> (, + , )`. + +* Download a precomputed cache + (e.g. `https://releases.nixos.org/eval/.sqlite`). So + a command like `nix search` could avoid evaluating Nixpkgs entirely. + +Of course, this doesn't allow overlays. With pure evaluation, the only +way to have these is to define a top-level flake that depends on the +Nixpkgs flake and somehow passes in a set of overlays. + +TODO: in pure mode we have to pass the system type explicitly! + + +## Hydra jobset dependencies + +Hydra can use the flake dependency resolution mechanism to fetch +dependencies. This allows us to get rid of jobset configuration in the +web interface: a jobset only requires a flake reference. That is, *a +jobset is a flake*. Hydra then just builds the `hydraJobs` attrset +`provide`d by the flake. (It omitted, maybe it can build `packages`.) + + +## NixOS system configuration + +NixOS currently contains a lot of modules that really should be moved +into their own repositories. For example, it contains a Hydra module +that duplicates the one in the Hydra repository. Also, we want +reproducible evaluation for NixOS system configurations. So NixOS +system configurations should be stored as flakes in (local) Git +repositories. + +`my-system/flake.nix`: + +```nix +{ + provides = flakes: { + nixosSystems.default = + flakes.nixpkgs.lib.evalModules { + modules = + [ { networking.firewall.enable = true; + hydra.useSubstitutes = true; + } + # The latter could be extracted automatically from `flakes`. + flakes.dwarffs.nixosModules.dwarffs + flakes.hydra.nixosModules.hydra + ]; + }; + }; + + requires = + [ "nixpkgs/nixos-18.09" + "dwarffs" + "hydra" + ... lots of other module flakes ... + ]; +} +``` + +We can then build the system: +``` +nixos-rebuild switch --flake ~/my-system +``` +This performs dependency resolution starting at `~/my-system/flake.nix` +and builds the `system` attribute in `nixosSystems.default`. From d9ad3723d59d9df2fb3c89335b5d9239f1860ec9 Mon Sep 17 00:00:00 2001 From: Nick Van den Broeck Date: Wed, 1 May 2019 11:38:48 +0200 Subject: [PATCH 131/634] Fixed issue 65 lockfile updating --- src/libexpr/primops/flake.cc | 91 +++++++++++++++++++++--------------- src/libexpr/primops/flake.hh | 6 +-- src/nix/command.hh | 4 +- src/nix/flake.cc | 9 ++-- src/nix/installables.cc | 16 +++---- tests/flakes.sh | 39 ++++++++-------- 6 files changed, 90 insertions(+), 75 deletions(-) diff --git a/src/libexpr/primops/flake.cc b/src/libexpr/primops/flake.cc index c73487585..88eadff55 100644 --- a/src/libexpr/primops/flake.cc +++ b/src/libexpr/primops/flake.cc @@ -289,8 +289,6 @@ Flake getFlake(EvalState & state, const FlakeRef & flakeRef, bool impureIsAllowe FlakeRef resolvedRef = sourceInfo.resolvedRef; - resolvedRef = sourceInfo.resolvedRef; // `resolvedRef` is now immutable - state.store->assertStorePath(sourceInfo.storePath); if (state.allowedPaths) @@ -368,35 +366,59 @@ NonFlake getNonFlake(EvalState & state, const FlakeRef & flakeRef, FlakeAlias al return nonFlake; } -/* Given a flake reference, recursively fetch it and its - dependencies. - FIXME: this should return a graph of flakes. -*/ -ResolvedFlake resolveFlake(EvalState & state, const FlakeRef & topRef, - RegistryAccess registryAccess, bool isTopFlake) +LockFile entryToLockFile(const LockFile::FlakeEntry & entry) +{ + LockFile lockFile; + lockFile.flakeEntries = entry.flakeEntries; + lockFile.nonFlakeEntries = entry.nonFlakeEntries; + return lockFile; +} + +ResolvedFlake resolveFlakeFromLockFile(EvalState & state, const FlakeRef & flakeRef, RegistryAccess registryAccess, + LockFile lockFile, bool isTopFlake = false) { bool allowRegistries = registryAccess == AllowRegistry || (registryAccess == AllowRegistryAtTop && isTopFlake); - Flake flake = getFlake(state, topRef, allowRegistries); - LockFile lockFile; - - if (isTopFlake) - lockFile = readLockFile(flake.storePath + flake.resolvedRef.subdir + "/flake.lock"); // FIXME: symlink attack + Flake flake = getFlake(state, flakeRef, allowRegistries); ResolvedFlake deps(flake); - for (auto & nonFlakeInfo : flake.nonFlakeRequires) - deps.nonFlakeDeps.push_back(getNonFlake(state, nonFlakeInfo.second, nonFlakeInfo.first)); + for (auto & nonFlakeInfo : flake.nonFlakeRequires) { + FlakeRef ref = nonFlakeInfo.second; + auto i = lockFile.nonFlakeEntries.find(nonFlakeInfo.first); + if (i != lockFile.nonFlakeEntries.end()) ref = i->second.ref; + deps.nonFlakeDeps.push_back(getNonFlake(state, ref, nonFlakeInfo.first)); + } for (auto newFlakeRef : flake.requires) { + FlakeRef ref = newFlakeRef; + LockFile newLockFile; auto i = lockFile.flakeEntries.find(newFlakeRef); - if (i != lockFile.flakeEntries.end()) newFlakeRef = i->second.ref; - // FIXME: propagate lockFile downwards - deps.flakeDeps.push_back(resolveFlake(state, newFlakeRef, registryAccess, false)); + if (i != lockFile.flakeEntries.end()) { // Propagate lockFile downwards if possible + ref = i->second.ref; + newLockFile = entryToLockFile(i->second); + } + deps.flakeDeps.push_back(resolveFlakeFromLockFile(state, ref, registryAccess, newLockFile)); } return deps; } +/* Given a flake reference, recursively fetch it and its dependencies. + FIXME: this should return a graph of flakes. +*/ +ResolvedFlake resolveFlake(EvalState & state, const FlakeRef & topRef, RegistryAccess registryAccess, + bool recreateLockFile) +{ + bool allowRegistries = registryAccess == AllowRegistry || registryAccess == AllowRegistryAtTop; + Flake flake = getFlake(state, topRef, allowRegistries); + LockFile lockFile; + + if (!recreateLockFile) // If recreateLockFile, start with an empty lockfile + lockFile = readLockFile(flake.storePath + "/flake.lock"); // FIXME: symlink attack + + return resolveFlakeFromLockFile(state, topRef, registryAccess, lockFile, true); +} + LockFile::FlakeEntry dependenciesToFlakeEntry(const ResolvedFlake & resolvedFlake) { LockFile::FlakeEntry entry(resolvedFlake.flake.resolvedRef, resolvedFlake.flake.hash); @@ -410,31 +432,25 @@ LockFile::FlakeEntry dependenciesToFlakeEntry(const ResolvedFlake & resolvedFlak return entry; } -static LockFile makeLockFile(EvalState & evalState, FlakeRef & flakeRef) +static LockFile makeLockFile(EvalState & evalState, FlakeRef & flakeRef, bool recreateLockFile) { - ResolvedFlake resFlake = resolveFlake(evalState, flakeRef, AllowRegistry); - LockFile::FlakeEntry entry = dependenciesToFlakeEntry(resFlake); - LockFile lockFile; - lockFile.flakeEntries = entry.flakeEntries; - lockFile.nonFlakeEntries = entry.nonFlakeEntries; - return lockFile; + ResolvedFlake resFlake = resolveFlake(evalState, flakeRef, AllowRegistry, recreateLockFile); + return entryToLockFile(dependenciesToFlakeEntry(resFlake)); } -void updateLockFile(EvalState & state, const FlakeUri & flakeUri) +void updateLockFile(EvalState & state, const FlakeUri & uri, bool recreateLockFile) { - // FIXME: We are writing the lockfile to the store here! Very bad practice! - FlakeRef flakeRef = FlakeRef(flakeUri); + FlakeRef flakeRef = FlakeRef(uri); + auto lockFile = makeLockFile(state, flakeRef, recreateLockFile); if (auto refData = std::get_if(&flakeRef.data)) { - auto lockFile = makeLockFile(state, flakeRef); - writeLockFile(lockFile, refData->path + "/" + flakeRef.subdir + "/flake.lock"); + writeLockFile(lockFile, refData->path + (flakeRef.subdir == "" ? "" : "/" + flakeRef.subdir) + "/flake.lock"); // Hack: Make sure that flake.lock is visible to Git. Otherwise, // exportGit will fail to copy it to the Nix store. - runProgram("git", true, - { "-C", refData->path, "add", "--intent-to-add", - (flakeRef.subdir == "" ? "" : flakeRef.subdir + "/") + "flake.lock" }); + runProgram("git", true, { "-C", refData->path, "add", + (flakeRef.subdir == "" ? "" : flakeRef.subdir + "/") + "flake.lock" }); } else - throw Error("flakeUri %s can't be updated because it is not a path", flakeUri); + throw Error("flakeUri %s can't be updated because it is not a path", uri); } void callFlake(EvalState & state, const ResolvedFlake & resFlake, Value & v) @@ -485,16 +501,17 @@ void callFlake(EvalState & state, const ResolvedFlake & resFlake, Value & v) // Return the `provides` of the top flake, while assigning to `v` the provides // of the dependencies as well. -void makeFlakeValue(EvalState & state, const FlakeRef & flakeRef, RegistryAccess registryAccess, Value & v) +void makeFlakeValue(EvalState & state, const FlakeRef & flakeRef, RegistryAccess registryAccess, Value & v, bool recreateLockFile) { - callFlake(state, resolveFlake(state, flakeRef, registryAccess), v); + callFlake(state, resolveFlake(state, flakeRef, registryAccess, recreateLockFile), v); } // This function is exposed to be used in nix files. static void prim_getFlake(EvalState & state, const Pos & pos, Value * * args, Value & v) { makeFlakeValue(state, state.forceStringNoCtx(*args[0], pos), - evalSettings.pureEval ? DisallowRegistry : AllowRegistryAtTop, v); + evalSettings.pureEval ? DisallowRegistry : AllowRegistryAtTop, v, false); + // `recreateLockFile == false` because this is the evaluation stage, which should be pure, and hence not recreate lockfiles. } static RegisterPrimOp r2("getFlake", 1, prim_getFlake); diff --git a/src/libexpr/primops/flake.hh b/src/libexpr/primops/flake.hh index 691f68b0a..e3481e99e 100644 --- a/src/libexpr/primops/flake.hh +++ b/src/libexpr/primops/flake.hh @@ -45,7 +45,7 @@ Path getUserRegistryPath(); enum RegistryAccess { DisallowRegistry, AllowRegistry, AllowRegistryAtTop }; -void makeFlakeValue(EvalState & state, const FlakeRef & flakeRef, RegistryAccess registryAccess, Value & v); +void makeFlakeValue(EvalState & state, const FlakeRef & flakeRef, RegistryAccess registryAccess, Value & v, bool recreateLockFile); std::shared_ptr readRegistry(const Path &); @@ -103,9 +103,9 @@ struct ResolvedFlake ResolvedFlake(const Flake & flake) : flake(flake) {} }; -ResolvedFlake resolveFlake(EvalState &, const FlakeRef &, RegistryAccess registryAccess, bool isTopFlake = true); +ResolvedFlake resolveFlake(EvalState &, const FlakeRef &, RegistryAccess, bool recreateLockFile); -void updateLockFile(EvalState &, const FlakeUri &); +void updateLockFile(EvalState &, const FlakeUri &, bool recreateLockFile); void gitCloneFlake (std::string flakeUri, EvalState &, Registries, Path); } diff --git a/src/nix/command.hh b/src/nix/command.hh index 640c6cd16..32a5047a8 100644 --- a/src/nix/command.hh +++ b/src/nix/command.hh @@ -76,10 +76,10 @@ struct SourceExprCommand : virtual Args, StoreCommand, MixEvalArgs { std::optional file; - bool updateLockFile = true; - SourceExprCommand(); + bool recreateLockFile = false; + ref getEvalState(); std::vector> parseInstallables( diff --git a/src/nix/flake.cc b/src/nix/flake.cc index 0af368570..d2cdf6fc9 100644 --- a/src/nix/flake.cc +++ b/src/nix/flake.cc @@ -114,7 +114,8 @@ struct CmdFlakeDeps : FlakeCommand, MixJSON, StoreCommand, MixEvalArgs FlakeRef flakeRef(flakeUri); - ResolvedFlake resFlake = resolveFlake(*evalState, flakeRef, AllowRegistryAtTop); + bool recreateLockFile = false; + ResolvedFlake resFlake = resolveFlake(*evalState, flakeRef, AllowRegistryAtTop, recreateLockFile); std::queue todo; todo.push(resFlake); @@ -132,7 +133,7 @@ struct CmdFlakeDeps : FlakeCommand, MixJSON, StoreCommand, MixEvalArgs } }; -struct CmdFlakeUpdate : StoreCommand, GitRepoCommand, MixEvalArgs +struct CmdFlakeUpdate : StoreCommand, FlakeCommand, MixEvalArgs { std::string name() override { @@ -148,8 +149,8 @@ struct CmdFlakeUpdate : StoreCommand, GitRepoCommand, MixEvalArgs { auto evalState = std::make_shared(searchPath, store); - if (gitPath == "") gitPath = absPath("."); - updateLockFile(*evalState, gitPath); + bool recreateLockFile = true; + updateLockFile(*evalState, flakeUri, recreateLockFile); } }; diff --git a/src/nix/installables.cc b/src/nix/installables.cc index db67952e1..6d784002a 100644 --- a/src/nix/installables.cc +++ b/src/nix/installables.cc @@ -23,9 +23,9 @@ SourceExprCommand::SourceExprCommand() .dest(&file); mkFlag() - .longName("no-update") - .description("don't create/update flake lock files") - .set(&updateLockFile, false); + .longName("recreate-lock-file") + .description("recreate lock file from scratch") + .set(&recreateLockFile, true); } ref SourceExprCommand::getEvalState() @@ -157,13 +157,11 @@ struct InstallableFlake : InstallableValue Value * toValue(EvalState & state) override { - auto path = std::get_if(&flakeRef.data); - if (cmd.updateLockFile && path) { - updateLockFile(state, path->path); - } - auto vFlake = state.allocValue(); - makeFlakeValue(state, flakeRef, AllowRegistryAtTop, *vFlake); + if (std::get_if(&flakeRef.data)) + updateLockFile(state, flakeRef.to_string(), cmd.recreateLockFile); + + makeFlakeValue(state, flakeRef, AllowRegistryAtTop, *vFlake, cmd.recreateLockFile); auto vProvides = (*vFlake->attrs->get(state.symbols.create("provides")))->value; diff --git a/tests/flakes.sh b/tests/flakes.sh index 40ba42715..8b68aea65 100644 --- a/tests/flakes.sh +++ b/tests/flakes.sh @@ -9,11 +9,11 @@ clearStore registry=$TEST_ROOT/registry.json -flake1=$TEST_ROOT/flake1 -flake2=$TEST_ROOT/flake2 -flake3=$TEST_ROOT/flake3 +flake1Dir=$TEST_ROOT/flake1 +flake2Dir=$TEST_ROOT/flake2 +flake3Dir=$TEST_ROOT/flake3 -for repo in $flake1 $flake2 $flake3; do +for repo in $flake1Dir $flake2Dir $flake3Dir; do rm -rf $repo mkdir $repo git -C $repo init @@ -21,7 +21,7 @@ for repo in $flake1 $flake2 $flake3; do git -C $repo config user.name "Foobar" done -cat > $flake1/flake.nix < $flake1Dir/flake.nix < $flake1/flake.nix < $flake2/flake.nix < $flake2Dir/flake.nix < $flake2/flake.nix < $flake3/flake.nix < $registry < Date: Wed, 1 May 2019 11:38:48 +0200 Subject: [PATCH 132/634] Give errors in resolveFlake If DontUpdate but the lockfile isn't correct --- src/libexpr/primops/flake.cc | 70 ++++++++++++++++++++---------------- src/libexpr/primops/flake.hh | 8 ++--- src/nix/flake.cc | 3 +- src/nix/installables.cc | 2 +- tests/config.nix | 20 +++++++++++ tests/flakes.sh | 8 ++--- 6 files changed, 70 insertions(+), 41 deletions(-) create mode 100644 tests/config.nix diff --git a/src/libexpr/primops/flake.cc b/src/libexpr/primops/flake.cc index 88eadff55..c576a8b3e 100644 --- a/src/libexpr/primops/flake.cc +++ b/src/libexpr/primops/flake.cc @@ -50,8 +50,7 @@ LockFile::FlakeEntry readFlakeEntry(nlohmann::json json) if (!flakeRef.isImmutable()) throw Error("cannot use mutable flake '%s' in pure mode", flakeRef); - Hash hash = Hash((std::string) json["contentHash"]); - LockFile::FlakeEntry entry(flakeRef, hash); + LockFile::FlakeEntry entry(flakeRef, Hash((std::string) json["contentHash"])); auto nonFlakeRequires = json["nonFlakeRequires"]; @@ -59,9 +58,8 @@ LockFile::FlakeEntry readFlakeEntry(nlohmann::json json) FlakeRef flakeRef(i->value("uri", "")); if (!flakeRef.isImmutable()) throw Error("requested to fetch FlakeRef '%s' purely, which is mutable", flakeRef); - Hash hash = Hash((std::string) i->value("contentHash", "")); - LockFile::NonFlakeEntry newEntry(flakeRef, hash); - entry.nonFlakeEntries.insert_or_assign(i.key(), newEntry); + LockFile::NonFlakeEntry nonEntry(flakeRef, Hash(i->value("contentHash", ""))); + entry.nonFlakeEntries.insert_or_assign(i.key(), nonEntry); } auto requires = json["requires"]; @@ -89,10 +87,10 @@ LockFile readLockFile(const Path & path) for (auto i = nonFlakeRequires.begin(); i != nonFlakeRequires.end(); ++i) { FlakeRef flakeRef(i->value("uri", "")); - LockFile::NonFlakeEntry entry(flakeRef, Hash((std::string) json["contentHash"])); + LockFile::NonFlakeEntry nonEntry(flakeRef, Hash(i->value("contentHash", ""))); if (!flakeRef.isImmutable()) - throw Error("requested to fetch FlakeRef '%s' purely, which is mutable", flakeRef); - lockFile.nonFlakeEntries.insert_or_assign(i.key(), entry); + throw Error("found mutable FlakeRef '%s' in lockfile at path %s", flakeRef, path); + lockFile.nonFlakeEntries.insert_or_assign(i.key(), nonEntry); } auto requires = json["requires"]; @@ -374,19 +372,25 @@ LockFile entryToLockFile(const LockFile::FlakeEntry & entry) return lockFile; } -ResolvedFlake resolveFlakeFromLockFile(EvalState & state, const FlakeRef & flakeRef, RegistryAccess registryAccess, - LockFile lockFile, bool isTopFlake = false) +ResolvedFlake resolveFlakeFromLockFile(EvalState & state, const FlakeRef & flakeRef, + ShouldUpdateLockFile update, LockFile lockFile = {}) { - bool allowRegistries = registryAccess == AllowRegistry || (registryAccess == AllowRegistryAtTop && isTopFlake); - Flake flake = getFlake(state, flakeRef, allowRegistries); + Flake flake = getFlake(state, flakeRef, update != DontUpdate); ResolvedFlake deps(flake); for (auto & nonFlakeInfo : flake.nonFlakeRequires) { FlakeRef ref = nonFlakeInfo.second; auto i = lockFile.nonFlakeEntries.find(nonFlakeInfo.first); - if (i != lockFile.nonFlakeEntries.end()) ref = i->second.ref; - deps.nonFlakeDeps.push_back(getNonFlake(state, ref, nonFlakeInfo.first)); + if (i != lockFile.nonFlakeEntries.end()) { + NonFlake nonFlake = getNonFlake(state, i->second.ref, nonFlakeInfo.first); + if (nonFlake.hash != i->second.contentHash) + throw Error("the content hash of flakeref %s doesn't match", i->second.ref.to_string()); + deps.nonFlakeDeps.push_back(nonFlake); + } else { + if (update == DontUpdate) throw Error("the lockfile requires updating nonflake dependency %s in DontUpdate mode", nonFlakeInfo.first); + deps.nonFlakeDeps.push_back(getNonFlake(state, nonFlakeInfo.second, nonFlakeInfo.first)); + } } for (auto newFlakeRef : flake.requires) { @@ -394,10 +398,14 @@ ResolvedFlake resolveFlakeFromLockFile(EvalState & state, const FlakeRef & flake LockFile newLockFile; auto i = lockFile.flakeEntries.find(newFlakeRef); if (i != lockFile.flakeEntries.end()) { // Propagate lockFile downwards if possible - ref = i->second.ref; - newLockFile = entryToLockFile(i->second); + ResolvedFlake newResFlake = resolveFlakeFromLockFile(state, i->second.ref, update, entryToLockFile(i->second)); + if (newResFlake.flake.hash != i->second.contentHash) + throw Error("the content hash of flakeref %s doesn't match", i->second.ref.to_string()); + deps.flakeDeps.push_back(newResFlake); + } else { + if (update == DontUpdate) throw Error("the lockfile requires updating flake dependency %s in DontUpdate mode", newFlakeRef.to_string()); + deps.flakeDeps.push_back(resolveFlakeFromLockFile(state, newFlakeRef, update)); } - deps.flakeDeps.push_back(resolveFlakeFromLockFile(state, ref, registryAccess, newLockFile)); } return deps; @@ -406,17 +414,18 @@ ResolvedFlake resolveFlakeFromLockFile(EvalState & state, const FlakeRef & flake /* Given a flake reference, recursively fetch it and its dependencies. FIXME: this should return a graph of flakes. */ -ResolvedFlake resolveFlake(EvalState & state, const FlakeRef & topRef, RegistryAccess registryAccess, - bool recreateLockFile) +ResolvedFlake resolveFlake(EvalState & state, const FlakeRef & topRef, ShouldUpdateLockFile update) { - bool allowRegistries = registryAccess == AllowRegistry || registryAccess == AllowRegistryAtTop; - Flake flake = getFlake(state, topRef, allowRegistries); + if (!std::get_if(&topRef.data)) update = DontUpdate; + Flake flake = getFlake(state, topRef, update != DontUpdate); LockFile lockFile; - if (!recreateLockFile) // If recreateLockFile, start with an empty lockfile + if (update != RecreateLockFile) { + // If recreateLockFile, start with an empty lockfile lockFile = readLockFile(flake.storePath + "/flake.lock"); // FIXME: symlink attack + } - return resolveFlakeFromLockFile(state, topRef, registryAccess, lockFile, true); + return resolveFlakeFromLockFile(state, topRef, update, lockFile); } LockFile::FlakeEntry dependenciesToFlakeEntry(const ResolvedFlake & resolvedFlake) @@ -426,15 +435,17 @@ LockFile::FlakeEntry dependenciesToFlakeEntry(const ResolvedFlake & resolvedFlak for (auto & newResFlake : resolvedFlake.flakeDeps) entry.flakeEntries.insert_or_assign(newResFlake.flake.originalRef, dependenciesToFlakeEntry(newResFlake)); - for (auto & nonFlake : resolvedFlake.nonFlakeDeps) - entry.nonFlakeEntries.insert_or_assign(nonFlake.alias, LockFile::NonFlakeEntry(nonFlake.resolvedRef, nonFlake.hash)); + for (auto & nonFlake : resolvedFlake.nonFlakeDeps) { + LockFile::NonFlakeEntry nonEntry(nonFlake.resolvedRef, nonFlake.hash); + entry.nonFlakeEntries.insert_or_assign(nonFlake.alias, nonEntry); + } return entry; } static LockFile makeLockFile(EvalState & evalState, FlakeRef & flakeRef, bool recreateLockFile) { - ResolvedFlake resFlake = resolveFlake(evalState, flakeRef, AllowRegistry, recreateLockFile); + ResolvedFlake resFlake = resolveFlake(evalState, flakeRef, recreateLockFile ? RecreateLockFile : UpdateLockFile); return entryToLockFile(dependenciesToFlakeEntry(resFlake)); } @@ -501,17 +512,16 @@ void callFlake(EvalState & state, const ResolvedFlake & resFlake, Value & v) // Return the `provides` of the top flake, while assigning to `v` the provides // of the dependencies as well. -void makeFlakeValue(EvalState & state, const FlakeRef & flakeRef, RegistryAccess registryAccess, Value & v, bool recreateLockFile) +void makeFlakeValue(EvalState & state, const FlakeRef & flakeRef, ShouldUpdateLockFile update, Value & v) { - callFlake(state, resolveFlake(state, flakeRef, registryAccess, recreateLockFile), v); + callFlake(state, resolveFlake(state, flakeRef, update), v); } // This function is exposed to be used in nix files. static void prim_getFlake(EvalState & state, const Pos & pos, Value * * args, Value & v) { makeFlakeValue(state, state.forceStringNoCtx(*args[0], pos), - evalSettings.pureEval ? DisallowRegistry : AllowRegistryAtTop, v, false); - // `recreateLockFile == false` because this is the evaluation stage, which should be pure, and hence not recreate lockfiles. + evalSettings.pureEval ? DontUpdate : UpdateLockFile, v); } static RegisterPrimOp r2("getFlake", 1, prim_getFlake); diff --git a/src/libexpr/primops/flake.hh b/src/libexpr/primops/flake.hh index e3481e99e..132439b93 100644 --- a/src/libexpr/primops/flake.hh +++ b/src/libexpr/primops/flake.hh @@ -43,9 +43,9 @@ typedef std::vector> Registries; Path getUserRegistryPath(); -enum RegistryAccess { DisallowRegistry, AllowRegistry, AllowRegistryAtTop }; +enum ShouldUpdateLockFile { DontUpdate, UpdateLockFile, RecreateLockFile}; -void makeFlakeValue(EvalState & state, const FlakeRef & flakeRef, RegistryAccess registryAccess, Value & v, bool recreateLockFile); +void makeFlakeValue(EvalState &, const FlakeRef &, ShouldUpdateLockFile, Value &); std::shared_ptr readRegistry(const Path &); @@ -84,8 +84,8 @@ struct NonFlake FlakeRef originalRef; FlakeRef resolvedRef; std::optional revCount; + Hash hash; Path storePath; - Hash hash; // content hash // date NonFlake(const FlakeRef & origRef, const SourceInfo & sourceInfo) : originalRef(origRef), resolvedRef(sourceInfo.resolvedRef), revCount(sourceInfo.revCount), storePath(sourceInfo.storePath) {}; @@ -103,7 +103,7 @@ struct ResolvedFlake ResolvedFlake(const Flake & flake) : flake(flake) {} }; -ResolvedFlake resolveFlake(EvalState &, const FlakeRef &, RegistryAccess, bool recreateLockFile); +ResolvedFlake resolveFlake(EvalState &, const FlakeRef &, ShouldUpdateLockFile); void updateLockFile(EvalState &, const FlakeUri &, bool recreateLockFile); diff --git a/src/nix/flake.cc b/src/nix/flake.cc index d2cdf6fc9..fc0fc76b4 100644 --- a/src/nix/flake.cc +++ b/src/nix/flake.cc @@ -114,8 +114,7 @@ struct CmdFlakeDeps : FlakeCommand, MixJSON, StoreCommand, MixEvalArgs FlakeRef flakeRef(flakeUri); - bool recreateLockFile = false; - ResolvedFlake resFlake = resolveFlake(*evalState, flakeRef, AllowRegistryAtTop, recreateLockFile); + ResolvedFlake resFlake = resolveFlake(*evalState, flakeRef, UpdateLockFile); std::queue todo; todo.push(resFlake); diff --git a/src/nix/installables.cc b/src/nix/installables.cc index 6d784002a..25f3f4f9d 100644 --- a/src/nix/installables.cc +++ b/src/nix/installables.cc @@ -161,7 +161,7 @@ struct InstallableFlake : InstallableValue if (std::get_if(&flakeRef.data)) updateLockFile(state, flakeRef.to_string(), cmd.recreateLockFile); - makeFlakeValue(state, flakeRef, AllowRegistryAtTop, *vFlake, cmd.recreateLockFile); + makeFlakeValue(state, flakeRef, cmd.recreateLockFile ? RecreateLockFile : UpdateLockFile, *vFlake); auto vProvides = (*vFlake->attrs->get(state.symbols.create("provides")))->value; diff --git a/tests/config.nix b/tests/config.nix new file mode 100644 index 000000000..03810d57a --- /dev/null +++ b/tests/config.nix @@ -0,0 +1,20 @@ +with import ; + +rec { + inherit shell; + + path = coreutils; + + system = "x86_64-linux"; + + shared = builtins.getEnv "_NIX_TEST_SHARED"; + + mkDerivation = args: + derivation ({ + inherit system; + builder = shell; + args = ["-e" args.builder or (builtins.toFile "builder.sh" "if [ -e .attrs.sh ]; then source .attrs.sh; fi; eval \"$buildCommand\"")]; + PATH = path; + } // removeAttrs args ["builder" "meta"]) + // { meta = args.meta or {}; }; +} diff --git a/tests/flakes.sh b/tests/flakes.sh index 8b68aea65..d720eaf23 100644 --- a/tests/flakes.sh +++ b/tests/flakes.sh @@ -59,7 +59,7 @@ EOF git -C $flake2Dir add flake.nix git -C $flake2Dir commit -m 'Initial' -cat > $flake3/flake.nix < $flake3Dir/flake.nix < $flake3/flake.nix < $registry < Date: Tue, 14 May 2019 11:34:45 +0200 Subject: [PATCH 133/634] Lockfile handling in `resolveFlake` is fixed --- src/libexpr/primops/flake.cc | 136 +++++++++++++++++++++-------------- src/libexpr/primops/flake.hh | 17 +++-- src/nix/command.hh | 4 ++ src/nix/flake.cc | 4 +- src/nix/installables.cc | 18 ++++- tests/config.nix | 20 ------ tests/flakes.sh | 35 ++++++++- 7 files changed, 149 insertions(+), 85 deletions(-) delete mode 100644 tests/config.nix diff --git a/src/libexpr/primops/flake.cc b/src/libexpr/primops/flake.cc index c576a8b3e..3cbb0c1ef 100644 --- a/src/libexpr/primops/flake.cc +++ b/src/libexpr/primops/flake.cc @@ -372,10 +372,58 @@ LockFile entryToLockFile(const LockFile::FlakeEntry & entry) return lockFile; } -ResolvedFlake resolveFlakeFromLockFile(EvalState & state, const FlakeRef & flakeRef, - ShouldUpdateLockFile update, LockFile lockFile = {}) +LockFile::FlakeEntry dependenciesToFlakeEntry(const ResolvedFlake & resolvedFlake) { - Flake flake = getFlake(state, flakeRef, update != DontUpdate); + LockFile::FlakeEntry entry(resolvedFlake.flake.resolvedRef, resolvedFlake.flake.hash); + + for (auto & info : resolvedFlake.flakeDeps) + entry.flakeEntries.insert_or_assign(info.first.to_string(), dependenciesToFlakeEntry(info.second)); + + for (auto & nonFlake : resolvedFlake.nonFlakeDeps) { + LockFile::NonFlakeEntry nonEntry(nonFlake.resolvedRef, nonFlake.hash); + entry.nonFlakeEntries.insert_or_assign(nonFlake.alias, nonEntry); + } + + return entry; +} + +bool allowedToWrite (HandleLockFile handle) +{ + if (handle == AllPure) return false; + else if (handle == TopRefUsesRegistries) return false; + else if (handle == UpdateLockFile) return true; + else if (handle == UseUpdatedLockFile) return false; + else if (handle == RecreateLockFile) return true; + else if (handle == UseNewLockFile) return false; + else assert(false); +} + +bool recreateLockFile (HandleLockFile handle) +{ + if (handle == AllPure) return false; + else if (handle == TopRefUsesRegistries) return false; + else if (handle == UpdateLockFile) return false; + else if (handle == UseUpdatedLockFile) return false; + else if (handle == RecreateLockFile) return true; + else if (handle == UseNewLockFile) return true; + else assert(false); +} + +bool allowedToUseRegistries (HandleLockFile handle, bool isTopRef) +{ + if (handle == AllPure) return false; + else if (handle == TopRefUsesRegistries) return isTopRef; + else if (handle == UpdateLockFile) return true; + else if (handle == UseUpdatedLockFile) return true; + else if (handle == RecreateLockFile) return true; + else if (handle == UseNewLockFile) return true; + else assert(false); +} + +ResolvedFlake resolveFlakeFromLockFile(EvalState & state, const FlakeRef & flakeRef, + HandleLockFile handleLockFile, LockFile lockFile = {}, bool topRef = false) +{ + Flake flake = getFlake(state, flakeRef, allowedToUseRegistries(handleLockFile, topRef)); ResolvedFlake deps(flake); @@ -388,23 +436,23 @@ ResolvedFlake resolveFlakeFromLockFile(EvalState & state, const FlakeRef & flake throw Error("the content hash of flakeref %s doesn't match", i->second.ref.to_string()); deps.nonFlakeDeps.push_back(nonFlake); } else { - if (update == DontUpdate) throw Error("the lockfile requires updating nonflake dependency %s in DontUpdate mode", nonFlakeInfo.first); + if (handleLockFile == AllPure || handleLockFile == TopRefUsesRegistries) + throw Error("the lockfile requires updating nonflake dependency %s in AllPure mode", nonFlakeInfo.first); deps.nonFlakeDeps.push_back(getNonFlake(state, nonFlakeInfo.second, nonFlakeInfo.first)); } } for (auto newFlakeRef : flake.requires) { - FlakeRef ref = newFlakeRef; - LockFile newLockFile; auto i = lockFile.flakeEntries.find(newFlakeRef); if (i != lockFile.flakeEntries.end()) { // Propagate lockFile downwards if possible - ResolvedFlake newResFlake = resolveFlakeFromLockFile(state, i->second.ref, update, entryToLockFile(i->second)); + ResolvedFlake newResFlake = resolveFlakeFromLockFile(state, i->second.ref, handleLockFile, entryToLockFile(i->second)); if (newResFlake.flake.hash != i->second.contentHash) throw Error("the content hash of flakeref %s doesn't match", i->second.ref.to_string()); - deps.flakeDeps.push_back(newResFlake); + deps.flakeDeps.insert_or_assign(newFlakeRef, newResFlake); } else { - if (update == DontUpdate) throw Error("the lockfile requires updating flake dependency %s in DontUpdate mode", newFlakeRef.to_string()); - deps.flakeDeps.push_back(resolveFlakeFromLockFile(state, newFlakeRef, update)); + if (handleLockFile == AllPure || handleLockFile == TopRefUsesRegistries) + throw Error("the lockfile requires updating flake dependency %s in AllPure mode", newFlakeRef.to_string()); + deps.flakeDeps.insert_or_assign(newFlakeRef, resolveFlakeFromLockFile(state, newFlakeRef, handleLockFile)); } } @@ -414,54 +462,37 @@ ResolvedFlake resolveFlakeFromLockFile(EvalState & state, const FlakeRef & flake /* Given a flake reference, recursively fetch it and its dependencies. FIXME: this should return a graph of flakes. */ -ResolvedFlake resolveFlake(EvalState & state, const FlakeRef & topRef, ShouldUpdateLockFile update) +ResolvedFlake resolveFlake(EvalState & state, const FlakeRef & topRef, HandleLockFile handleLockFile) { - if (!std::get_if(&topRef.data)) update = DontUpdate; - Flake flake = getFlake(state, topRef, update != DontUpdate); + Flake flake = getFlake(state, topRef, allowedToUseRegistries(handleLockFile, true)); LockFile lockFile; - if (update != RecreateLockFile) { + if (!recreateLockFile (handleLockFile)) { // If recreateLockFile, start with an empty lockfile lockFile = readLockFile(flake.storePath + "/flake.lock"); // FIXME: symlink attack } - return resolveFlakeFromLockFile(state, topRef, update, lockFile); + ResolvedFlake resFlake = resolveFlakeFromLockFile(state, topRef, handleLockFile, lockFile, true); + lockFile = entryToLockFile(dependenciesToFlakeEntry(resFlake)); + + if (allowedToWrite(handleLockFile)) { + if (auto refData = std::get_if(&topRef.data)) { + writeLockFile(lockFile, refData->path + (topRef.subdir == "" ? "" : "/" + topRef.subdir) + "/flake.lock"); + + // Hack: Make sure that flake.lock is visible to Git, so it ends up in the Nix store. + runProgram("git", true, { "-C", refData->path, "add", + (topRef.subdir == "" ? "" : topRef.subdir + "/") + "flake.lock" }); + } else std::cout << "Cannot write lockfile because the FlakeRef isn't of the form IsPath." << std::endl; + } else if (handleLockFile != AllPure && handleLockFile != TopRefUsesRegistries) + std::cout << "Using updating lockfile without writing it to file" << std::endl; + + return resFlake; } -LockFile::FlakeEntry dependenciesToFlakeEntry(const ResolvedFlake & resolvedFlake) +void updateLockFile (EvalState & state, const FlakeUri & flakeUri, bool recreateLockFile) { - LockFile::FlakeEntry entry(resolvedFlake.flake.resolvedRef, resolvedFlake.flake.hash); - - for (auto & newResFlake : resolvedFlake.flakeDeps) - entry.flakeEntries.insert_or_assign(newResFlake.flake.originalRef, dependenciesToFlakeEntry(newResFlake)); - - for (auto & nonFlake : resolvedFlake.nonFlakeDeps) { - LockFile::NonFlakeEntry nonEntry(nonFlake.resolvedRef, nonFlake.hash); - entry.nonFlakeEntries.insert_or_assign(nonFlake.alias, nonEntry); - } - - return entry; -} - -static LockFile makeLockFile(EvalState & evalState, FlakeRef & flakeRef, bool recreateLockFile) -{ - ResolvedFlake resFlake = resolveFlake(evalState, flakeRef, recreateLockFile ? RecreateLockFile : UpdateLockFile); - return entryToLockFile(dependenciesToFlakeEntry(resFlake)); -} - -void updateLockFile(EvalState & state, const FlakeUri & uri, bool recreateLockFile) -{ - FlakeRef flakeRef = FlakeRef(uri); - auto lockFile = makeLockFile(state, flakeRef, recreateLockFile); - if (auto refData = std::get_if(&flakeRef.data)) { - writeLockFile(lockFile, refData->path + (flakeRef.subdir == "" ? "" : "/" + flakeRef.subdir) + "/flake.lock"); - - // Hack: Make sure that flake.lock is visible to Git. Otherwise, - // exportGit will fail to copy it to the Nix store. - runProgram("git", true, { "-C", refData->path, "add", - (flakeRef.subdir == "" ? "" : flakeRef.subdir + "/") + "flake.lock" }); - } else - throw Error("flakeUri %s can't be updated because it is not a path", uri); + FlakeRef flakeRef(flakeUri); + resolveFlake(state, flakeRef, recreateLockFile ? RecreateLockFile : UpdateLockFile); } void callFlake(EvalState & state, const ResolvedFlake & resFlake, Value & v) @@ -471,7 +502,8 @@ void callFlake(EvalState & state, const ResolvedFlake & resFlake, Value & v) state.mkAttrs(v, resFlake.flakeDeps.size() + resFlake.nonFlakeDeps.size() + 8); - for (const ResolvedFlake newResFlake : resFlake.flakeDeps) { + for (auto info : resFlake.flakeDeps) { + const ResolvedFlake newResFlake = info.second; auto vFlake = state.allocAttr(v, newResFlake.flake.id); callFlake(state, newResFlake, *vFlake); } @@ -512,16 +544,16 @@ void callFlake(EvalState & state, const ResolvedFlake & resFlake, Value & v) // Return the `provides` of the top flake, while assigning to `v` the provides // of the dependencies as well. -void makeFlakeValue(EvalState & state, const FlakeRef & flakeRef, ShouldUpdateLockFile update, Value & v) +void makeFlakeValue(EvalState & state, const FlakeRef & flakeRef, HandleLockFile handle, Value & v) { - callFlake(state, resolveFlake(state, flakeRef, update), v); + callFlake(state, resolveFlake(state, flakeRef, handle), v); } // This function is exposed to be used in nix files. static void prim_getFlake(EvalState & state, const Pos & pos, Value * * args, Value & v) { makeFlakeValue(state, state.forceStringNoCtx(*args[0], pos), - evalSettings.pureEval ? DontUpdate : UpdateLockFile, v); + evalSettings.pureEval ? AllPure : UseUpdatedLockFile, v); } static RegisterPrimOp r2("getFlake", 1, prim_getFlake); diff --git a/src/libexpr/primops/flake.hh b/src/libexpr/primops/flake.hh index 132439b93..6f91686a6 100644 --- a/src/libexpr/primops/flake.hh +++ b/src/libexpr/primops/flake.hh @@ -36,16 +36,23 @@ struct LockFile }; std::map flakeEntries; - std::map nonFlakeEntries; + std::map nonFlakeEntries; }; typedef std::vector> Registries; Path getUserRegistryPath(); -enum ShouldUpdateLockFile { DontUpdate, UpdateLockFile, RecreateLockFile}; +enum HandleLockFile + { AllPure // Everything is handled 100% purely + , TopRefUsesRegistries // The top FlakeRef uses the registries, apart from that, everything happens 100% purely + , UpdateLockFile // Update the existing lockfile and write it to file + , UseUpdatedLockFile // `UpdateLockFile` without writing to file + , RecreateLockFile // Recreate the lockfile from scratch and write it to file + , UseNewLockFile // `RecreateLockFile` without writing to file + }; -void makeFlakeValue(EvalState &, const FlakeRef &, ShouldUpdateLockFile, Value &); +void makeFlakeValue(EvalState &, const FlakeRef &, HandleLockFile, Value &); std::shared_ptr readRegistry(const Path &); @@ -98,12 +105,12 @@ Flake getFlake(EvalState &, const FlakeRef &, bool impureIsAllowed); struct ResolvedFlake { Flake flake; - std::vector flakeDeps; // The flake dependencies + std::map flakeDeps; // The key in this map, is the originalRef as written in flake.nix std::vector nonFlakeDeps; ResolvedFlake(const Flake & flake) : flake(flake) {} }; -ResolvedFlake resolveFlake(EvalState &, const FlakeRef &, ShouldUpdateLockFile); +ResolvedFlake resolveFlake(EvalState &, const FlakeRef &, HandleLockFile); void updateLockFile(EvalState &, const FlakeUri &, bool recreateLockFile); diff --git a/src/nix/command.hh b/src/nix/command.hh index 32a5047a8..30d869b19 100644 --- a/src/nix/command.hh +++ b/src/nix/command.hh @@ -80,6 +80,10 @@ struct SourceExprCommand : virtual Args, StoreCommand, MixEvalArgs bool recreateLockFile = false; + bool saveLockFile = true; + + bool noRegistries = false; + ref getEvalState(); std::vector> parseInstallables( diff --git a/src/nix/flake.cc b/src/nix/flake.cc index fc0fc76b4..bfb3178ad 100644 --- a/src/nix/flake.cc +++ b/src/nix/flake.cc @@ -126,8 +126,8 @@ struct CmdFlakeDeps : FlakeCommand, MixJSON, StoreCommand, MixEvalArgs for (NonFlake & nonFlake : resFlake.nonFlakeDeps) printNonFlakeInfo(nonFlake, json); - for (ResolvedFlake & newResFlake : resFlake.flakeDeps) - todo.push(newResFlake); + for (auto info : resFlake.flakeDeps) + todo.push(info.second); } } }; diff --git a/src/nix/installables.cc b/src/nix/installables.cc index 25f3f4f9d..a2a55d949 100644 --- a/src/nix/installables.cc +++ b/src/nix/installables.cc @@ -26,6 +26,16 @@ SourceExprCommand::SourceExprCommand() .longName("recreate-lock-file") .description("recreate lock file from scratch") .set(&recreateLockFile, true); + + mkFlag() + .longName("dont-save-lock-file") + .description("save the newly generated lock file") + .set(&saveLockFile, false); + + mkFlag() + .longName("no-registries") + .description("don't use flake registries") + .set(&noRegistries, true); } ref SourceExprCommand::getEvalState() @@ -158,10 +168,12 @@ struct InstallableFlake : InstallableValue Value * toValue(EvalState & state) override { auto vFlake = state.allocValue(); - if (std::get_if(&flakeRef.data)) - updateLockFile(state, flakeRef.to_string(), cmd.recreateLockFile); - makeFlakeValue(state, flakeRef, cmd.recreateLockFile ? RecreateLockFile : UpdateLockFile, *vFlake); + HandleLockFile handle = cmd.noRegistries ? AllPure : + cmd.recreateLockFile ? + (cmd.saveLockFile ? RecreateLockFile : UseNewLockFile) + : (cmd.saveLockFile ? UpdateLockFile : UseUpdatedLockFile); + makeFlakeValue(state, flakeRef, handle, *vFlake); auto vProvides = (*vFlake->attrs->get(state.symbols.create("provides")))->value; diff --git a/tests/config.nix b/tests/config.nix deleted file mode 100644 index 03810d57a..000000000 --- a/tests/config.nix +++ /dev/null @@ -1,20 +0,0 @@ -with import ; - -rec { - inherit shell; - - path = coreutils; - - system = "x86_64-linux"; - - shared = builtins.getEnv "_NIX_TEST_SHARED"; - - mkDerivation = args: - derivation ({ - inherit system; - builder = shell; - args = ["-e" args.builder or (builtins.toFile "builder.sh" "if [ -e .attrs.sh ]; then source .attrs.sh; fi; eval \"$buildCommand\"")]; - PATH = path; - } // removeAttrs args ["builder" "meta"]) - // { meta = args.meta or {}; }; -} diff --git a/tests/flakes.sh b/tests/flakes.sh index d720eaf23..45c5f2048 100644 --- a/tests/flakes.sh +++ b/tests/flakes.sh @@ -118,7 +118,7 @@ nix build -o $TEST_ROOT/result --flake-registry $registry flake1: [[ -e $TEST_ROOT/result/hello ]] # Building a flake with an unlocked dependency should fail in pure mode. -(! nix build -o $TEST_ROOT/result --flake-registry $registry flake2:bar) +(! nix eval "(builtins.getFlake "$flake2Dir")") # But should succeed in impure mode. nix build -o $TEST_ROOT/result --flake-registry $registry flake2:bar --impure @@ -129,8 +129,8 @@ nix build -o $TEST_ROOT/result --flake-registry $registry $flake2Dir:bar git -C $flake2Dir commit flake.lock -m 'Add flake.lock' # Rerunning the build should not change the lockfile. -#nix build -o $TEST_ROOT/result --flake-registry $registry $flake2:bar -#[[ -z $(git -C $flake2 diff) ]] +nix build -o $TEST_ROOT/result --flake-registry $registry $flake2Dir:bar +[[ -z $(git -C $flake2Dir diff master) ]] # Now we should be able to build the flake in pure mode. nix build -o $TEST_ROOT/result --flake-registry $registry flake2:bar @@ -140,3 +140,32 @@ nix build -o $TEST_ROOT/result file://$flake2Dir:bar # Test whether indirect dependencies work. nix build -o $TEST_ROOT/result --flake-registry $registry $flake3Dir:xyzzy + +# Add dependency to flake3 +rm $flake3Dir/flake.nix + +cat > $flake3Dir/flake.nix < Date: Tue, 21 May 2019 14:55:43 +0200 Subject: [PATCH 134/634] Only rewrite the lockfile if it changed This removes spurious warnings about failure to write the lockfile. --- src/libexpr/primops/flake.cc | 28 ++++++++++++++++------------ src/libexpr/primops/flake.hh | 21 +++++++++++++++++++++ 2 files changed, 37 insertions(+), 12 deletions(-) diff --git a/src/libexpr/primops/flake.cc b/src/libexpr/primops/flake.cc index 3cbb0c1ef..788977c72 100644 --- a/src/libexpr/primops/flake.cc +++ b/src/libexpr/primops/flake.cc @@ -465,31 +465,35 @@ ResolvedFlake resolveFlakeFromLockFile(EvalState & state, const FlakeRef & flake ResolvedFlake resolveFlake(EvalState & state, const FlakeRef & topRef, HandleLockFile handleLockFile) { Flake flake = getFlake(state, topRef, allowedToUseRegistries(handleLockFile, true)); - LockFile lockFile; + LockFile oldLockFile; if (!recreateLockFile (handleLockFile)) { // If recreateLockFile, start with an empty lockfile - lockFile = readLockFile(flake.storePath + "/flake.lock"); // FIXME: symlink attack + oldLockFile = readLockFile(flake.storePath + "/flake.lock"); // FIXME: symlink attack } + LockFile lockFile(oldLockFile); + ResolvedFlake resFlake = resolveFlakeFromLockFile(state, topRef, handleLockFile, lockFile, true); lockFile = entryToLockFile(dependenciesToFlakeEntry(resFlake)); - if (allowedToWrite(handleLockFile)) { - if (auto refData = std::get_if(&topRef.data)) { - writeLockFile(lockFile, refData->path + (topRef.subdir == "" ? "" : "/" + topRef.subdir) + "/flake.lock"); + if (!(lockFile == oldLockFile)) { + if (allowedToWrite(handleLockFile)) { + if (auto refData = std::get_if(&topRef.data)) { + writeLockFile(lockFile, refData->path + (topRef.subdir == "" ? "" : "/" + topRef.subdir) + "/flake.lock"); - // Hack: Make sure that flake.lock is visible to Git, so it ends up in the Nix store. - runProgram("git", true, { "-C", refData->path, "add", - (topRef.subdir == "" ? "" : topRef.subdir + "/") + "flake.lock" }); - } else std::cout << "Cannot write lockfile because the FlakeRef isn't of the form IsPath." << std::endl; - } else if (handleLockFile != AllPure && handleLockFile != TopRefUsesRegistries) - std::cout << "Using updating lockfile without writing it to file" << std::endl; + // Hack: Make sure that flake.lock is visible to Git, so it ends up in the Nix store. + runProgram("git", true, { "-C", refData->path, "add", + (topRef.subdir == "" ? "" : topRef.subdir + "/") + "flake.lock" }); + } else std::cout << "Cannot write lockfile because the FlakeRef isn't of the form IsPath." << std::endl; + } else if (handleLockFile != AllPure && handleLockFile != TopRefUsesRegistries) + std::cout << "Using updating lockfile without writing it to file" << std::endl; + } return resFlake; } -void updateLockFile (EvalState & state, const FlakeUri & flakeUri, bool recreateLockFile) +void updateLockFile(EvalState & state, const FlakeUri & flakeUri, bool recreateLockFile) { FlakeRef flakeRef(flakeUri); resolveFlake(state, flakeRef, recreateLockFile ? RecreateLockFile : UpdateLockFile); diff --git a/src/libexpr/primops/flake.hh b/src/libexpr/primops/flake.hh index 6f91686a6..e43b860ee 100644 --- a/src/libexpr/primops/flake.hh +++ b/src/libexpr/primops/flake.hh @@ -24,6 +24,11 @@ struct LockFile FlakeRef ref; Hash contentHash; NonFlakeEntry(const FlakeRef & flakeRef, const Hash & hash) : ref(flakeRef), contentHash(hash) {}; + + bool operator ==(const NonFlakeEntry & other) const + { + return ref == other.ref && contentHash == other.contentHash; + } }; struct FlakeEntry @@ -33,10 +38,26 @@ struct LockFile std::map flakeEntries; std::map nonFlakeEntries; FlakeEntry(const FlakeRef & flakeRef, const Hash & hash) : ref(flakeRef), contentHash(hash) {}; + + bool operator ==(const FlakeEntry & other) const + { + return + ref == other.ref + && contentHash == other.contentHash + && flakeEntries == other.flakeEntries + && nonFlakeEntries == other.nonFlakeEntries; + } }; std::map flakeEntries; std::map nonFlakeEntries; + + bool operator ==(const LockFile & other) const + { + return + flakeEntries == other.flakeEntries + && nonFlakeEntries == other.nonFlakeEntries; + } }; typedef std::vector> Registries; From 5990b86391a7b51879c865eed39ee46f39cbdd11 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Tue, 21 May 2019 15:03:54 +0200 Subject: [PATCH 135/634] Use warn(), tweak messages --- src/libexpr/primops/flake.cc | 35 ++++++++++++----------------------- 1 file changed, 12 insertions(+), 23 deletions(-) diff --git a/src/libexpr/primops/flake.cc b/src/libexpr/primops/flake.cc index 788977c72..6998536ec 100644 --- a/src/libexpr/primops/flake.cc +++ b/src/libexpr/primops/flake.cc @@ -387,29 +387,17 @@ LockFile::FlakeEntry dependenciesToFlakeEntry(const ResolvedFlake & resolvedFlak return entry; } -bool allowedToWrite (HandleLockFile handle) +bool allowedToWrite(HandleLockFile handle) { - if (handle == AllPure) return false; - else if (handle == TopRefUsesRegistries) return false; - else if (handle == UpdateLockFile) return true; - else if (handle == UseUpdatedLockFile) return false; - else if (handle == RecreateLockFile) return true; - else if (handle == UseNewLockFile) return false; - else assert(false); + return handle == UpdateLockFile || handle == RecreateLockFile; } -bool recreateLockFile (HandleLockFile handle) +bool recreateLockFile(HandleLockFile handle) { - if (handle == AllPure) return false; - else if (handle == TopRefUsesRegistries) return false; - else if (handle == UpdateLockFile) return false; - else if (handle == UseUpdatedLockFile) return false; - else if (handle == RecreateLockFile) return true; - else if (handle == UseNewLockFile) return true; - else assert(false); + return handle == RecreateLockFile || handle == UseNewLockFile; } -bool allowedToUseRegistries (HandleLockFile handle, bool isTopRef) +bool allowedToUseRegistries(HandleLockFile handle, bool isTopRef) { if (handle == AllPure) return false; else if (handle == TopRefUsesRegistries) return isTopRef; @@ -433,11 +421,11 @@ ResolvedFlake resolveFlakeFromLockFile(EvalState & state, const FlakeRef & flake if (i != lockFile.nonFlakeEntries.end()) { NonFlake nonFlake = getNonFlake(state, i->second.ref, nonFlakeInfo.first); if (nonFlake.hash != i->second.contentHash) - throw Error("the content hash of flakeref %s doesn't match", i->second.ref.to_string()); + throw Error("the content hash of flakeref '%s' doesn't match", i->second.ref.to_string()); deps.nonFlakeDeps.push_back(nonFlake); } else { if (handleLockFile == AllPure || handleLockFile == TopRefUsesRegistries) - throw Error("the lockfile requires updating nonflake dependency %s in AllPure mode", nonFlakeInfo.first); + throw Error("cannot update non-flake dependency '%s' in pure mode", nonFlakeInfo.first); deps.nonFlakeDeps.push_back(getNonFlake(state, nonFlakeInfo.second, nonFlakeInfo.first)); } } @@ -447,11 +435,11 @@ ResolvedFlake resolveFlakeFromLockFile(EvalState & state, const FlakeRef & flake if (i != lockFile.flakeEntries.end()) { // Propagate lockFile downwards if possible ResolvedFlake newResFlake = resolveFlakeFromLockFile(state, i->second.ref, handleLockFile, entryToLockFile(i->second)); if (newResFlake.flake.hash != i->second.contentHash) - throw Error("the content hash of flakeref %s doesn't match", i->second.ref.to_string()); + throw Error("the content hash of flakeref '%s' doesn't match", i->second.ref.to_string()); deps.flakeDeps.insert_or_assign(newFlakeRef, newResFlake); } else { if (handleLockFile == AllPure || handleLockFile == TopRefUsesRegistries) - throw Error("the lockfile requires updating flake dependency %s in AllPure mode", newFlakeRef.to_string()); + throw Error("cannot update flake dependency '%s' in pure mode", newFlakeRef.to_string()); deps.flakeDeps.insert_or_assign(newFlakeRef, resolveFlakeFromLockFile(state, newFlakeRef, handleLockFile)); } } @@ -485,9 +473,10 @@ ResolvedFlake resolveFlake(EvalState & state, const FlakeRef & topRef, HandleLoc // Hack: Make sure that flake.lock is visible to Git, so it ends up in the Nix store. runProgram("git", true, { "-C", refData->path, "add", (topRef.subdir == "" ? "" : topRef.subdir + "/") + "flake.lock" }); - } else std::cout << "Cannot write lockfile because the FlakeRef isn't of the form IsPath." << std::endl; + } else + warn("cannot write lockfile of remote flake '%s'", topRef); } else if (handleLockFile != AllPure && handleLockFile != TopRefUsesRegistries) - std::cout << "Using updating lockfile without writing it to file" << std::endl; + warn("using updated lockfile without writing it to file"); } return resFlake; From 2468672e305faf672c3901c1a9605ca1cb175908 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Thu, 16 May 2019 22:48:16 +0200 Subject: [PATCH 136/634] Improve FlakeCommand It now handles commonality like calling getFlake() and resolving relative local flake refs. Fixes #2822. --- src/libexpr/primops/flake.cc | 11 +++--- src/libexpr/primops/flake.hh | 5 +-- src/nix/command.hh | 37 ++++++------------- src/nix/flake.cc | 69 ++++++++++++++++++++++++------------ src/nix/installables.cc | 2 +- 5 files changed, 65 insertions(+), 59 deletions(-) diff --git a/src/libexpr/primops/flake.cc b/src/libexpr/primops/flake.cc index 6998536ec..c08c30c9c 100644 --- a/src/libexpr/primops/flake.cc +++ b/src/libexpr/primops/flake.cc @@ -482,9 +482,8 @@ ResolvedFlake resolveFlake(EvalState & state, const FlakeRef & topRef, HandleLoc return resFlake; } -void updateLockFile(EvalState & state, const FlakeUri & flakeUri, bool recreateLockFile) +void updateLockFile(EvalState & state, const FlakeRef & flakeRef, bool recreateLockFile) { - FlakeRef flakeRef(flakeUri); resolveFlake(state, flakeRef, recreateLockFile ? RecreateLockFile : UpdateLockFile); } @@ -551,10 +550,8 @@ static void prim_getFlake(EvalState & state, const Pos & pos, Value * * args, Va static RegisterPrimOp r2("getFlake", 1, prim_getFlake); -void gitCloneFlake (std::string flakeUri, EvalState & state, Registries registries, - Path endDirectory) +void gitCloneFlake(FlakeRef flakeRef, EvalState & state, Registries registries, const Path & destDir) { - FlakeRef flakeRef(flakeUri); flakeRef = lookupFlake(state, flakeRef, registries); std::string uri; @@ -576,8 +573,8 @@ void gitCloneFlake (std::string flakeUri, EvalState & state, Registries registri } } - if (endDirectory != "") - args.push_back(endDirectory); + if (destDir != "") + args.push_back(destDir); runProgram("git", true, args); } diff --git a/src/libexpr/primops/flake.hh b/src/libexpr/primops/flake.hh index e43b860ee..677cdb7b7 100644 --- a/src/libexpr/primops/flake.hh +++ b/src/libexpr/primops/flake.hh @@ -133,7 +133,8 @@ struct ResolvedFlake ResolvedFlake resolveFlake(EvalState &, const FlakeRef &, HandleLockFile); -void updateLockFile(EvalState &, const FlakeUri &, bool recreateLockFile); +void updateLockFile(EvalState &, const FlakeRef & flakeRef, bool recreateLockFile); + +void gitCloneFlake(FlakeRef flakeRef, EvalState &, Registries, const Path & destDir); -void gitCloneFlake (std::string flakeUri, EvalState &, Registries, Path); } diff --git a/src/nix/command.hh b/src/nix/command.hh index 30d869b19..423ac5baa 100644 --- a/src/nix/command.hh +++ b/src/nix/command.hh @@ -35,26 +35,6 @@ struct Buildable typedef std::vector Buildables; -struct GitRepoCommand : virtual Args -{ - std::string gitPath = absPath("."); - - GitRepoCommand () - { - expectArg("git-path", &gitPath, true); - } -}; - -struct FlakeCommand : virtual Args -{ - std::string flakeUri; - - FlakeCommand() - { - expectArg("flake-uri", &flakeUri); - } -}; - struct Installable { virtual std::string what() = 0; @@ -72,7 +52,16 @@ struct Installable } }; -struct SourceExprCommand : virtual Args, StoreCommand, MixEvalArgs +struct EvalCommand : virtual StoreCommand, MixEvalArgs +{ + ref getEvalState(); + +private: + + std::shared_ptr evalState; +}; + +struct SourceExprCommand : virtual Args, EvalCommand { std::optional file; @@ -84,8 +73,6 @@ struct SourceExprCommand : virtual Args, StoreCommand, MixEvalArgs bool noRegistries = false; - ref getEvalState(); - std::vector> parseInstallables( ref store, std::vector ss); @@ -96,10 +83,6 @@ struct SourceExprCommand : virtual Args, StoreCommand, MixEvalArgs { return {"defaultPackage"}; } - -private: - - std::shared_ptr evalState; }; enum RealiseMode { Build, NoBuild, DryRun }; diff --git a/src/nix/flake.cc b/src/nix/flake.cc index bfb3178ad..bc2f1cb5b 100644 --- a/src/nix/flake.cc +++ b/src/nix/flake.cc @@ -10,7 +10,33 @@ using namespace nix; -struct CmdFlakeList : StoreCommand, MixEvalArgs +class FlakeCommand : virtual Args, public EvalCommand +{ + std::string flakeUri = "."; + +public: + + FlakeCommand() + { + expectArg("flake-uri", &flakeUri, true); + } + + FlakeRef getFlakeRef() + { + if (flakeUri.find('/') != std::string::npos || flakeUri == ".") + return FlakeRef(flakeUri, true); + else + return FlakeRef(flakeUri); + } + + Flake getFlake() + { + auto evalState = getEvalState(); + return nix::getFlake(*evalState, getFlakeRef(), true); + } +}; + +struct CmdFlakeList : EvalCommand { std::string name() override { @@ -24,9 +50,7 @@ struct CmdFlakeList : StoreCommand, MixEvalArgs void run(nix::ref store) override { - auto evalState = std::make_shared(searchPath, store); - - auto registries = evalState->getFlakeRegistries(); + auto registries = getEvalState()->getFlakeRegistries(); stopProgressBar(); @@ -60,7 +84,7 @@ void printFlakeInfo(Flake & flake, bool json) { std::cout << "URI: " << flake.resolvedRef.to_string() << "\n"; std::cout << "Description: " << flake.description << "\n"; if (flake.resolvedRef.ref) - std::cout << "Branch: " << *flake.resolvedRef.ref; + std::cout << "Branch: " << *flake.resolvedRef.ref << "\n"; if (flake.resolvedRef.rev) std::cout << "Revision: " << flake.resolvedRef.rev->to_string(Base16, false) << "\n"; if (flake.revCount) @@ -95,7 +119,7 @@ void printNonFlakeInfo(NonFlake & nonFlake, bool json) { } } -struct CmdFlakeDeps : FlakeCommand, MixJSON, StoreCommand, MixEvalArgs +struct CmdFlakeDeps : FlakeCommand, MixJSON { std::string name() override { @@ -109,12 +133,10 @@ struct CmdFlakeDeps : FlakeCommand, MixJSON, StoreCommand, MixEvalArgs void run(nix::ref store) override { - auto evalState = std::make_shared(searchPath, store); + auto evalState = getEvalState(); evalState->addRegistryOverrides(registryOverrides); - FlakeRef flakeRef(flakeUri); - - ResolvedFlake resFlake = resolveFlake(*evalState, flakeRef, UpdateLockFile); + ResolvedFlake resFlake = resolveFlake(*evalState, getFlakeRef(), UpdateLockFile); std::queue todo; todo.push(resFlake); @@ -132,7 +154,7 @@ struct CmdFlakeDeps : FlakeCommand, MixJSON, StoreCommand, MixEvalArgs } }; -struct CmdFlakeUpdate : StoreCommand, FlakeCommand, MixEvalArgs +struct CmdFlakeUpdate : FlakeCommand { std::string name() override { @@ -146,14 +168,18 @@ struct CmdFlakeUpdate : StoreCommand, FlakeCommand, MixEvalArgs void run(nix::ref store) override { - auto evalState = std::make_shared(searchPath, store); + auto evalState = getEvalState(); - bool recreateLockFile = true; - updateLockFile(*evalState, flakeUri, recreateLockFile); + auto flakeRef = getFlakeRef(); + + if (std::get_if(&flakeRef.data)) + updateLockFile(*evalState, flakeRef, true); + else + throw Error("cannot update lockfile of flake '%s'", flakeRef); } }; -struct CmdFlakeInfo : FlakeCommand, MixJSON, MixEvalArgs, StoreCommand +struct CmdFlakeInfo : FlakeCommand, MixJSON { std::string name() override { @@ -169,8 +195,7 @@ struct CmdFlakeInfo : FlakeCommand, MixJSON, MixEvalArgs, StoreCommand void run(nix::ref store) override { - auto evalState = std::make_shared(searchPath, store); - Flake flake = getFlake(*evalState, FlakeRef(flakeUri), true); + auto flake = getFlake(); printFlakeInfo(flake, json); } }; @@ -235,7 +260,7 @@ struct CmdFlakeRemove : virtual Args, MixEvalArgs, Command } }; -struct CmdFlakePin : virtual Args, StoreCommand, MixEvalArgs +struct CmdFlakePin : virtual Args, EvalCommand { FlakeUri alias; @@ -256,7 +281,7 @@ struct CmdFlakePin : virtual Args, StoreCommand, MixEvalArgs void run(nix::ref store) override { - auto evalState = std::make_shared(searchPath, store); + auto evalState = getEvalState(); Path userRegistryPath = getUserRegistryPath(); FlakeRegistry userRegistry = *readRegistry(userRegistryPath); @@ -307,7 +332,7 @@ struct CmdFlakeInit : virtual Args, Command } }; -struct CmdFlakeClone : StoreCommand, FlakeCommand, MixEvalArgs +struct CmdFlakeClone : FlakeCommand { Path endDirectory = ""; @@ -328,10 +353,10 @@ struct CmdFlakeClone : StoreCommand, FlakeCommand, MixEvalArgs void run(nix::ref store) override { - auto evalState = std::make_shared(searchPath, store); + auto evalState = getEvalState(); Registries registries = evalState->getFlakeRegistries(); - gitCloneFlake(flakeUri, *evalState, registries, endDirectory); + gitCloneFlake(getFlakeRef().to_string(), *evalState, registries, endDirectory); } }; diff --git a/src/nix/installables.cc b/src/nix/installables.cc index a2a55d949..85ef2cb56 100644 --- a/src/nix/installables.cc +++ b/src/nix/installables.cc @@ -38,7 +38,7 @@ SourceExprCommand::SourceExprCommand() .set(&noRegistries, true); } -ref SourceExprCommand::getEvalState() +ref EvalCommand::getEvalState() { if (!evalState) evalState = std::make_shared(searchPath, getStore()); From bc0fb109a946a1c3e125a5148280a0caba2d2c9a Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Thu, 16 May 2019 23:14:27 +0200 Subject: [PATCH 137/634] Add some tests --- tests/flakes.sh | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/tests/flakes.sh b/tests/flakes.sh index 45c5f2048..5137bc39a 100644 --- a/tests/flakes.sh +++ b/tests/flakes.sh @@ -104,6 +104,11 @@ EOF # Test 'nix flake info'. nix flake info --flake-registry $registry flake1 | grep -q 'ID: *flake1' +# Test 'nix flake info' on a local flake. +(cd $flake1Dir && nix flake info) | grep -q 'ID: *flake1' +(cd $flake1Dir && nix flake info .) | grep -q 'ID: *flake1' +nix flake info $flake1Dir | grep -q 'ID: *flake1' + # Test 'nix flake info --json'. json=$(nix flake info --flake-registry $registry flake1 --json | jq .) [[ $(echo "$json" | jq -r .description) = 'Bla bla' ]] From 70136a9bf46bcf5a97b63f356fefd8adabf4c23b Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Wed, 22 May 2019 13:46:07 +0200 Subject: [PATCH 138/634] Move flake-related flags into a separate class Also, rename --dont-save-lock-file to --no-save-lock-file and change noRegistries to useRegistries. --- src/libexpr/primops/flake.hh | 2 +- src/nix/command.hh | 21 ++++++++++------ src/nix/flake.cc | 18 ++++++++----- src/nix/installables.cc | 49 +++++++++++++++++++++--------------- 4 files changed, 56 insertions(+), 34 deletions(-) diff --git a/src/libexpr/primops/flake.hh b/src/libexpr/primops/flake.hh index 677cdb7b7..8eaac9d96 100644 --- a/src/libexpr/primops/flake.hh +++ b/src/libexpr/primops/flake.hh @@ -64,7 +64,7 @@ typedef std::vector> Registries; Path getUserRegistryPath(); -enum HandleLockFile +enum HandleLockFile : unsigned int { AllPure // Everything is handled 100% purely , TopRefUsesRegistries // The top FlakeRef uses the registries, apart from that, everything happens 100% purely , UpdateLockFile // Update the existing lockfile and write it to file diff --git a/src/nix/command.hh b/src/nix/command.hh index 423ac5baa..a841b879a 100644 --- a/src/nix/command.hh +++ b/src/nix/command.hh @@ -11,8 +11,8 @@ extern std::string programPath; struct Value; class Bindings; class EvalState; - class Store; +enum HandleLockFile : unsigned int; /* A command that require a Nix store. */ struct StoreCommand : virtual Command @@ -61,17 +61,24 @@ private: std::shared_ptr evalState; }; -struct SourceExprCommand : virtual Args, EvalCommand +struct MixFlakeOptions : virtual Args { - std::optional file; - - SourceExprCommand(); - bool recreateLockFile = false; bool saveLockFile = true; - bool noRegistries = false; + bool useRegistries = true; + + MixFlakeOptions(); + + HandleLockFile getLockFileMode(); +}; + +struct SourceExprCommand : virtual Args, EvalCommand, MixFlakeOptions +{ + std::optional file; + + SourceExprCommand(); std::vector> parseInstallables( ref store, std::vector ss); diff --git a/src/nix/flake.cc b/src/nix/flake.cc index bc2f1cb5b..b4f0f67be 100644 --- a/src/nix/flake.cc +++ b/src/nix/flake.cc @@ -10,7 +10,7 @@ using namespace nix; -class FlakeCommand : virtual Args, public EvalCommand +class FlakeCommand : virtual Args, public EvalCommand, public MixFlakeOptions { std::string flakeUri = "."; @@ -32,7 +32,12 @@ public: Flake getFlake() { auto evalState = getEvalState(); - return nix::getFlake(*evalState, getFlakeRef(), true); + return nix::getFlake(*evalState, getFlakeRef(), useRegistries); + } + + ResolvedFlake resolveFlake() + { + return nix::resolveFlake(*getEvalState(), getFlakeRef(), getLockFileMode()); } }; @@ -119,6 +124,7 @@ void printNonFlakeInfo(NonFlake & nonFlake, bool json) { } } +// FIXME: merge info CmdFlakeInfo? struct CmdFlakeDeps : FlakeCommand, MixJSON { std::string name() override @@ -136,7 +142,7 @@ struct CmdFlakeDeps : FlakeCommand, MixJSON auto evalState = getEvalState(); evalState->addRegistryOverrides(registryOverrides); - ResolvedFlake resFlake = resolveFlake(*evalState, getFlakeRef(), UpdateLockFile); + auto resFlake = resolveFlake(); std::queue todo; todo.push(resFlake); @@ -334,7 +340,7 @@ struct CmdFlakeInit : virtual Args, Command struct CmdFlakeClone : FlakeCommand { - Path endDirectory = ""; + Path destDir; std::string name() override { @@ -348,7 +354,7 @@ struct CmdFlakeClone : FlakeCommand CmdFlakeClone() { - expectArg("end-dir", &endDirectory, true); + expectArg("dest-dir", &destDir, true); } void run(nix::ref store) override @@ -356,7 +362,7 @@ struct CmdFlakeClone : FlakeCommand auto evalState = getEvalState(); Registries registries = evalState->getFlakeRegistries(); - gitCloneFlake(getFlakeRef().to_string(), *evalState, registries, endDirectory); + gitCloneFlake(getFlakeRef().to_string(), *evalState, registries, destDir); } }; diff --git a/src/nix/installables.cc b/src/nix/installables.cc index 85ef2cb56..1a79f49fb 100644 --- a/src/nix/installables.cc +++ b/src/nix/installables.cc @@ -13,6 +13,34 @@ namespace nix { +MixFlakeOptions::MixFlakeOptions() +{ + mkFlag() + .longName("recreate-lock-file") + .description("recreate lock file from scratch") + .set(&recreateLockFile, true); + + mkFlag() + .longName("no-save-lock-file") + .description("do not save the newly generated lock file") + .set(&saveLockFile, false); + + mkFlag() + .longName("no-registries") + .description("don't use flake registries") + .set(&useRegistries, false); +} + +HandleLockFile MixFlakeOptions::getLockFileMode() +{ + return + useRegistries + ? recreateLockFile + ? (saveLockFile ? RecreateLockFile : UseNewLockFile) + : (saveLockFile ? UpdateLockFile : UseUpdatedLockFile) + : AllPure; +} + SourceExprCommand::SourceExprCommand() { mkFlag() @@ -21,21 +49,6 @@ SourceExprCommand::SourceExprCommand() .label("file") .description("evaluate a set of attributes from FILE (deprecated)") .dest(&file); - - mkFlag() - .longName("recreate-lock-file") - .description("recreate lock file from scratch") - .set(&recreateLockFile, true); - - mkFlag() - .longName("dont-save-lock-file") - .description("save the newly generated lock file") - .set(&saveLockFile, false); - - mkFlag() - .longName("no-registries") - .description("don't use flake registries") - .set(&noRegistries, true); } ref EvalCommand::getEvalState() @@ -169,11 +182,7 @@ struct InstallableFlake : InstallableValue { auto vFlake = state.allocValue(); - HandleLockFile handle = cmd.noRegistries ? AllPure : - cmd.recreateLockFile ? - (cmd.saveLockFile ? RecreateLockFile : UseNewLockFile) - : (cmd.saveLockFile ? UpdateLockFile : UseUpdatedLockFile); - makeFlakeValue(state, flakeRef, handle, *vFlake); + makeFlakeValue(state, flakeRef, cmd.getLockFileMode(), *vFlake); auto vProvides = (*vFlake->attrs->get(state.symbols.create("provides")))->value; From 3e8ef9eb22a0e36ef5ecaabf414b6edd46b87858 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Wed, 22 May 2019 13:57:19 +0200 Subject: [PATCH 139/634] nix flake deps: Print flake dependencies --- src/nix/flake.cc | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/src/nix/flake.cc b/src/nix/flake.cc index b4f0f67be..ecbb3b81f 100644 --- a/src/nix/flake.cc +++ b/src/nix/flake.cc @@ -70,7 +70,7 @@ struct CmdFlakeList : EvalCommand } }; -void printFlakeInfo(Flake & flake, bool json) { +void printFlakeInfo(const Flake & flake, bool json) { if (json) { nlohmann::json j; j["id"] = flake.id; @@ -98,7 +98,7 @@ void printFlakeInfo(Flake & flake, bool json) { } } -void printNonFlakeInfo(NonFlake & nonFlake, bool json) { +void printNonFlakeInfo(const NonFlake & nonFlake, bool json) { if (json) { nlohmann::json j; j["id"] = nonFlake.alias; @@ -142,20 +142,20 @@ struct CmdFlakeDeps : FlakeCommand, MixJSON auto evalState = getEvalState(); evalState->addRegistryOverrides(registryOverrides); - auto resFlake = resolveFlake(); - std::queue todo; - todo.push(resFlake); + todo.push(resolveFlake()); while (!todo.empty()) { - resFlake = todo.front(); + auto resFlake = std::move(todo.front()); todo.pop(); - for (NonFlake & nonFlake : resFlake.nonFlakeDeps) + for (auto & nonFlake : resFlake.nonFlakeDeps) printNonFlakeInfo(nonFlake, json); - for (auto info : resFlake.flakeDeps) + for (auto & info : resFlake.flakeDeps) { + printFlakeInfo(info.second.flake, json); todo.push(info.second); + } } } }; From e414bde6f9f58e599d48307ff3cb0ab64cb47d9a Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Wed, 22 May 2019 14:31:40 +0200 Subject: [PATCH 140/634] Check the flake epoch Closes #2883. --- src/libexpr/primops/flake.cc | 11 +++++++++-- src/libexpr/primops/flake.hh | 3 ++- src/nix/flake.cc | 2 ++ tests/flakes.sh | 6 +++++- 4 files changed, 18 insertions(+), 4 deletions(-) diff --git a/src/libexpr/primops/flake.cc b/src/libexpr/primops/flake.cc index c08c30c9c..9131080bf 100644 --- a/src/libexpr/primops/flake.cc +++ b/src/libexpr/primops/flake.cc @@ -309,10 +309,17 @@ Flake getFlake(EvalState & state, const FlakeRef & flakeRef, bool impureIsAllowe state.forceAttrs(vInfo); + if (auto epoch = vInfo.attrs->get(state.symbols.create("epoch"))) { + flake.epoch = state.forceInt(*(**epoch).value, *(**epoch).pos); + if (flake.epoch > 2019) + throw Error("flake '%s' requires unsupported epoch %d; please upgrade Nix", flakeRef, flake.epoch); + } else + throw Error("flake '%s' lacks attribute 'epoch'", flakeRef); + if (auto name = vInfo.attrs->get(state.sName)) flake.id = state.forceStringNoCtx(*(**name).value, *(**name).pos); else - throw Error("flake lacks attribute 'name'"); + throw Error("flake '%s' lacks attribute 'name'", flakeRef); if (auto description = vInfo.attrs->get(state.sDescription)) flake.description = state.forceStringNoCtx(*(**description).value, *(**description).pos); @@ -337,7 +344,7 @@ Flake getFlake(EvalState & state, const FlakeRef & flakeRef, bool impureIsAllowe state.forceFunction(*(**provides).value, *(**provides).pos); flake.vProvides = (**provides).value; } else - throw Error("flake lacks attribute 'provides'"); + throw Error("flake '%s' lacks attribute 'provides'", flakeRef); return flake; } diff --git a/src/libexpr/primops/flake.hh b/src/libexpr/primops/flake.hh index 8eaac9d96..983c0eab6 100644 --- a/src/libexpr/primops/flake.hh +++ b/src/libexpr/primops/flake.hh @@ -101,7 +101,8 @@ struct Flake std::map nonFlakeRequires; Value * vProvides; // FIXME: gc // date - // content hash + unsigned int epoch; + Flake(const FlakeRef & origRef, const SourceInfo & sourceInfo) : originalRef(origRef), resolvedRef(sourceInfo.resolvedRef), revCount(sourceInfo.revCount), storePath(sourceInfo.storePath) {}; }; diff --git a/src/nix/flake.cc b/src/nix/flake.cc index ecbb3b81f..2dcdfc663 100644 --- a/src/nix/flake.cc +++ b/src/nix/flake.cc @@ -83,6 +83,7 @@ void printFlakeInfo(const Flake & flake, bool json) { if (flake.revCount) j["revCount"] = *flake.revCount; j["path"] = flake.storePath; + j["epoch"] = flake.epoch; std::cout << j.dump(4) << std::endl; } else { std::cout << "ID: " << flake.id << "\n"; @@ -95,6 +96,7 @@ void printFlakeInfo(const Flake & flake, bool json) { if (flake.revCount) std::cout << "Revcount: " << *flake.revCount << "\n"; std::cout << "Path: " << flake.storePath << "\n"; + std::cout << "Epoch: " << flake.epoch << "\n"; } } diff --git a/tests/flakes.sh b/tests/flakes.sh index 5137bc39a..6c987ad14 100644 --- a/tests/flakes.sh +++ b/tests/flakes.sh @@ -146,7 +146,7 @@ nix build -o $TEST_ROOT/result file://$flake2Dir:bar # Test whether indirect dependencies work. nix build -o $TEST_ROOT/result --flake-registry $registry $flake3Dir:xyzzy -# Add dependency to flake3 +# Add dependency to flake3. rm $flake3Dir/flake.nix cat > $flake3Dir/flake.nix <&1 | grep 'unsupported epoch' From 7c7105e0f89fb7b0971fbe012296f811f245e798 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Wed, 22 May 2019 14:52:57 +0200 Subject: [PATCH 141/634] flake-registry: tweag/nix -> NixOS/nix --- flake-registry.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/flake-registry.json b/flake-registry.json index 348bd010a..725bcef07 100644 --- a/flake-registry.json +++ b/flake-registry.json @@ -4,7 +4,7 @@ "uri": "github:edolstra/dwarffs/flake" }, "nix": { - "uri": "github:tweag/nix/flakes" + "uri": "github:NixOS/nix/flakes" }, "nixpkgs": { "uri": "github:edolstra/nixpkgs/release-19.03" From 66f1d7ee95ba693a15ae5dc413289fee954f0f04 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Wed, 22 May 2019 22:56:46 +0200 Subject: [PATCH 142/634] Fetch the flake registry from the NixOS/flake-registry repo --- flake-registry.json | 20 -------------------- local.mk | 2 -- src/libexpr/eval.cc | 8 -------- src/libexpr/eval.hh | 10 +++++----- src/libexpr/primops/flake.cc | 16 +++++++++++++--- src/libexpr/primops/flake.hh | 2 -- src/nix/flake.cc | 2 +- 7 files changed, 19 insertions(+), 41 deletions(-) delete mode 100644 flake-registry.json diff --git a/flake-registry.json b/flake-registry.json deleted file mode 100644 index 725bcef07..000000000 --- a/flake-registry.json +++ /dev/null @@ -1,20 +0,0 @@ -{ - "flakes": { - "dwarffs": { - "uri": "github:edolstra/dwarffs/flake" - }, - "nix": { - "uri": "github:NixOS/nix/flakes" - }, - "nixpkgs": { - "uri": "github:edolstra/nixpkgs/release-19.03" - }, - "hydra": { - "uri": "github:NixOS/hydra/flake" - }, - "patchelf": { - "uri": "github:NixOS/patchelf" - } - }, - "version": 1 -} diff --git a/local.mk b/local.mk index 11ed9c0a6..4b380176f 100644 --- a/local.mk +++ b/local.mk @@ -10,5 +10,3 @@ GLOBAL_CXXFLAGS += -I . -I src -I src/libutil -I src/libstore -I src/libmain -I $(foreach i, config.h $(call rwildcard, src/lib*, *.hh), \ $(eval $(call install-file-in, $(i), $(includedir)/nix, 0644))) - -$(eval $(call install-data-in,$(d)/flake-registry.json,$(datadir)/nix)) diff --git a/src/libexpr/eval.cc b/src/libexpr/eval.cc index 2789ea313..0f8a105b1 100644 --- a/src/libexpr/eval.cc +++ b/src/libexpr/eval.cc @@ -1978,14 +1978,6 @@ std::ostream & operator << (std::ostream & str, const ExternalValueBase & v) { EvalSettings evalSettings; -EvalSettings::EvalSettings() -{ - if (flakeRegistry == "") - // FIXME: static initialization order fiasco. But this will go - // away when we switch to an online registry. - flakeRegistry = settings.nixDataDir + "/nix/flake-registry.json"; -} - static GlobalConfig::Register r1(&evalSettings); diff --git a/src/libexpr/eval.hh b/src/libexpr/eval.hh index b0bf777fc..1e45bc1a8 100644 --- a/src/libexpr/eval.hh +++ b/src/libexpr/eval.hh @@ -325,9 +325,11 @@ public: const std::vector> getFlakeRegistries(); + std::shared_ptr getGlobalFlakeRegistry(); + private: - std::shared_ptr _flakeRegistry; - std::once_flag _flakeRegistryInit; + std::shared_ptr _globalFlakeRegistry; + std::once_flag _globalFlakeRegistryInit; }; @@ -368,10 +370,8 @@ struct EvalSettings : Config Setting allowedUris{this, {}, "allowed-uris", "Prefixes of URIs that builtin functions such as fetchurl and fetchGit are allowed to fetch."}; - Setting flakeRegistry{this, "", "flake-registry", + Setting flakeRegistry{this, "https://raw.githubusercontent.com/NixOS/flake-registry/master/flake-registry.json", "flake-registry", "Path or URI of the global flake registry."}; - - EvalSettings(); }; extern EvalSettings evalSettings; diff --git a/src/libexpr/primops/flake.cc b/src/libexpr/primops/flake.cc index 9131080bf..c6c380118 100644 --- a/src/libexpr/primops/flake.cc +++ b/src/libexpr/primops/flake.cc @@ -131,9 +131,19 @@ void writeLockFile(const LockFile & lockFile, const Path & path) writeFile(path, json.dump(4) + "\n"); // '4' = indentation in json file } -std::shared_ptr getGlobalRegistry() +std::shared_ptr EvalState::getGlobalFlakeRegistry() { - return readRegistry(evalSettings.flakeRegistry); + std::call_once(_globalFlakeRegistryInit, [&]() { + auto path = evalSettings.flakeRegistry; + + if (!hasPrefix(path, "/")) + path = getDownloader()->downloadCached(store, + evalSettings.flakeRegistry, false, "registry").path; + + _globalFlakeRegistry = readRegistry(path); + }); + + return _globalFlakeRegistry; } Path getUserRegistryPath() @@ -162,7 +172,7 @@ const Registries EvalState::getFlakeRegistries() Registries registries; registries.push_back(getFlagRegistry(registryOverrides)); registries.push_back(getUserRegistry()); - registries.push_back(getGlobalRegistry()); + registries.push_back(getGlobalFlakeRegistry()); return registries; } diff --git a/src/libexpr/primops/flake.hh b/src/libexpr/primops/flake.hh index 983c0eab6..b965aa9e7 100644 --- a/src/libexpr/primops/flake.hh +++ b/src/libexpr/primops/flake.hh @@ -120,8 +120,6 @@ struct NonFlake resolvedRef(sourceInfo.resolvedRef), revCount(sourceInfo.revCount), storePath(sourceInfo.storePath) {}; }; -std::shared_ptr getGlobalRegistry(); - Flake getFlake(EvalState &, const FlakeRef &, bool impureIsAllowed); struct ResolvedFlake diff --git a/src/nix/flake.cc b/src/nix/flake.cc index 2dcdfc663..912b154c1 100644 --- a/src/nix/flake.cc +++ b/src/nix/flake.cc @@ -298,7 +298,7 @@ struct CmdFlakePin : virtual Args, EvalCommand it->second = getFlake(*evalState, it->second, true).resolvedRef; writeRegistry(userRegistry, userRegistryPath); } else { - std::shared_ptr globalReg = getGlobalRegistry(); + std::shared_ptr globalReg = evalState->getGlobalFlakeRegistry(); it = globalReg->entries.find(FlakeRef(alias)); if (it != globalReg->entries.end()) { FlakeRef newRef = getFlake(*evalState, it->second, true).resolvedRef; From df3f5a78d5ab0a1f2dc9d288b271b38a9b8b33b5 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Wed, 22 May 2019 23:36:29 +0200 Subject: [PATCH 143/634] Refactor downloadCached() interface --- src/libexpr/common-eval-args.cc | 8 +++--- src/libexpr/parser.y | 4 ++- src/libexpr/primops.cc | 22 ++++++++--------- src/libexpr/primops/flake.cc | 15 +++++++---- src/libstore/download.cc | 44 ++++++++++++++++----------------- src/libstore/download.hh | 20 +++++++++++---- src/nix-channel/nix-channel.cc | 14 +++++------ 7 files changed, 73 insertions(+), 54 deletions(-) diff --git a/src/libexpr/common-eval-args.cc b/src/libexpr/common-eval-args.cc index 8e94d358e..7c0d268bd 100644 --- a/src/libexpr/common-eval-args.cc +++ b/src/libexpr/common-eval-args.cc @@ -61,9 +61,11 @@ Bindings * MixEvalArgs::getAutoArgs(EvalState & state) Path lookupFileArg(EvalState & state, string s) { - if (isUri(s)) - return getDownloader()->downloadCached(state.store, s, true).path; - else if (s.size() > 2 && s.at(0) == '<' && s.at(s.size() - 1) == '>') { + if (isUri(s)) { + CachedDownloadRequest request(s); + request.unpack = true; + return getDownloader()->downloadCached(state.store, request).path; + } else if (s.size() > 2 && s.at(0) == '<' && s.at(s.size() - 1) == '>') { Path p = s.substr(1, s.size() - 2); return state.findFile(p); } else diff --git a/src/libexpr/parser.y b/src/libexpr/parser.y index 75f04df3e..0cfe29c96 100644 --- a/src/libexpr/parser.y +++ b/src/libexpr/parser.y @@ -657,7 +657,9 @@ std::pair EvalState::resolveSearchPathElem(const SearchPathEl if (isUri(elem.second)) { try { - res = { true, getDownloader()->downloadCached(store, elem.second, true).path }; + CachedDownloadRequest request(elem.second); + request.unpack = true; + res = { true, getDownloader()->downloadCached(store, request).path }; } catch (DownloadError & e) { printError(format("warning: Nix search path entry '%1%' cannot be downloaded, ignoring") % elem.second); res = { false, "" }; diff --git a/src/libexpr/primops.cc b/src/libexpr/primops.cc index 55a1bde11..070e72f3a 100644 --- a/src/libexpr/primops.cc +++ b/src/libexpr/primops.cc @@ -2050,9 +2050,9 @@ static void prim_splitVersion(EvalState & state, const Pos & pos, Value * * args void fetch(EvalState & state, const Pos & pos, Value * * args, Value & v, const string & who, bool unpack, const std::string & defaultName) { - string url; - Hash expectedHash; - string name = defaultName; + CachedDownloadRequest request(""); + request.unpack = unpack; + request.name = defaultName; state.forceValue(*args[0]); @@ -2063,27 +2063,27 @@ void fetch(EvalState & state, const Pos & pos, Value * * args, Value & v, for (auto & attr : *args[0]->attrs) { string n(attr.name); if (n == "url") - url = state.forceStringNoCtx(*attr.value, *attr.pos); + request.uri = state.forceStringNoCtx(*attr.value, *attr.pos); else if (n == "sha256") - expectedHash = Hash(state.forceStringNoCtx(*attr.value, *attr.pos), htSHA256); + request.expectedHash = Hash(state.forceStringNoCtx(*attr.value, *attr.pos), htSHA256); else if (n == "name") - name = state.forceStringNoCtx(*attr.value, *attr.pos); + request.name = state.forceStringNoCtx(*attr.value, *attr.pos); else throw EvalError(format("unsupported argument '%1%' to '%2%', at %3%") % attr.name % who % attr.pos); } - if (url.empty()) + if (request.uri.empty()) throw EvalError(format("'url' argument required, at %1%") % pos); } else - url = state.forceStringNoCtx(*args[0], pos); + request.uri = state.forceStringNoCtx(*args[0], pos); - state.checkURI(url); + state.checkURI(request.uri); - if (evalSettings.pureEval && !expectedHash) + if (evalSettings.pureEval && !request.expectedHash) throw Error("in pure evaluation mode, '%s' requires a 'sha256' argument", who); - Path res = getDownloader()->downloadCached(state.store, url, unpack, name, expectedHash).path; + Path res = getDownloader()->downloadCached(state.store, request).path; if (state.allowedPaths) state.allowedPaths->insert(res); diff --git a/src/libexpr/primops/flake.cc b/src/libexpr/primops/flake.cc index c6c380118..e2fdf08ca 100644 --- a/src/libexpr/primops/flake.cc +++ b/src/libexpr/primops/flake.cc @@ -136,9 +136,11 @@ std::shared_ptr EvalState::getGlobalFlakeRegistry() std::call_once(_globalFlakeRegistryInit, [&]() { auto path = evalSettings.flakeRegistry; - if (!hasPrefix(path, "/")) - path = getDownloader()->downloadCached(store, - evalSettings.flakeRegistry, false, "registry").path; + if (!hasPrefix(path, "/")) { + CachedDownloadRequest request(evalSettings.flakeRegistry); + request.name = "flake-registry.json"; + path = getDownloader()->downloadCached(store, request).path; + } _globalFlakeRegistry = readRegistry(path); }); @@ -244,8 +246,11 @@ static SourceInfo fetchFlake(EvalState & state, const FlakeRef & flakeRef, bool if (accessToken != "") url += "?access_token=" + accessToken; - auto result = getDownloader()->downloadCached(state.store, url, true, "source", - Hash(), nullptr, resolvedRef.rev ? 1000000000 : settings.tarballTtl); + CachedDownloadRequest request(url); + request.unpack = true; + request.name = "source"; + request.ttl = resolvedRef.rev ? 1000000000 : settings.tarballTtl; + auto result = getDownloader()->downloadCached(state.store, request); if (!result.etag) throw Error("did not receive an ETag header from '%s'", url); diff --git a/src/libstore/download.cc b/src/libstore/download.cc index 975cfd97d..a7c2600f6 100644 --- a/src/libstore/download.cc +++ b/src/libstore/download.cc @@ -319,10 +319,10 @@ struct CurlDownloader : public Downloader long httpStatus = 0; curl_easy_getinfo(req, CURLINFO_RESPONSE_CODE, &httpStatus); - char * effectiveUrlCStr; - curl_easy_getinfo(req, CURLINFO_EFFECTIVE_URL, &effectiveUrlCStr); - if (effectiveUrlCStr) - result.effectiveUrl = effectiveUrlCStr; + char * effectiveUriCStr; + curl_easy_getinfo(req, CURLINFO_EFFECTIVE_URL, &effectiveUriCStr); + if (effectiveUriCStr) + result.effectiveUri = effectiveUriCStr; debug("finished %s of '%s'; curl status = %d, HTTP status = %d, body = %d bytes", request.verb(), request.uri, code, httpStatus, result.bodySize); @@ -790,18 +790,20 @@ void Downloader::download(DownloadRequest && request, Sink & sink) } } -CachedDownloadResult Downloader::downloadCached(ref store, const string & url_, bool unpack, string name, const Hash & expectedHash, string * effectiveUrl, int ttl) +CachedDownloadResult Downloader::downloadCached( + ref store, const CachedDownloadRequest & request) { - auto url = resolveUri(url_); + auto url = resolveUri(request.uri); + auto name = request.name; if (name == "") { auto p = url.rfind('/'); if (p != string::npos) name = string(url, p + 1); } Path expectedStorePath; - if (expectedHash) { - expectedStorePath = store->makeFixedOutputPath(unpack, expectedHash, name); + if (request.expectedHash) { + expectedStorePath = store->makeFixedOutputPath(request.unpack, request.expectedHash, name); if (store->isValidPath(expectedStorePath)) { CachedDownloadResult result; result.storePath = expectedStorePath; @@ -835,10 +837,9 @@ CachedDownloadResult Downloader::downloadCached(ref store, const string & auto ss = tokenizeString>(readFile(dataFile), "\n"); if (ss.size() >= 3 && ss[0] == url) { time_t lastChecked; - if (string2Int(ss[2], lastChecked) && lastChecked + ttl >= time(0)) { + if (string2Int(ss[2], lastChecked) && lastChecked + request.ttl >= time(0)) { skip = true; - if (effectiveUrl) - *effectiveUrl = url_; + result.effectiveUri = request.uri; result.etag = ss[1]; } else if (!ss[1].empty()) { debug(format("verifying previous ETag '%1%'") % ss[1]); @@ -852,18 +853,17 @@ CachedDownloadResult Downloader::downloadCached(ref store, const string & if (!skip) { try { - DownloadRequest request(url); - request.expectedETag = expectedETag; - auto res = download(request); - if (effectiveUrl) - *effectiveUrl = res.effectiveUrl; + DownloadRequest request2(url); + request2.expectedETag = expectedETag; + auto res = download(request2); + result.effectiveUri = res.effectiveUri; result.etag = res.etag; if (!res.cached) { ValidPathInfo info; StringSink sink; dumpString(*res.data, sink); - Hash hash = hashString(expectedHash ? expectedHash.type : htSHA256, *res.data); + Hash hash = hashString(request.expectedHash ? request.expectedHash.type : htSHA256, *res.data); info.path = store->makeFixedOutputPath(false, hash, name); info.narHash = hashString(htSHA256, *sink.s); info.narSize = sink.s->size(); @@ -883,7 +883,7 @@ CachedDownloadResult Downloader::downloadCached(ref store, const string & } } - if (unpack) { + if (request.unpack) { Path unpackedLink = cacheDir + "/" + baseNameOf(storePath) + "-unpacked"; PathLocks lock2({unpackedLink}, fmt("waiting for lock on '%1%'...", unpackedLink)); Path unpackedStorePath; @@ -906,11 +906,11 @@ CachedDownloadResult Downloader::downloadCached(ref store, const string & } if (expectedStorePath != "" && storePath != expectedStorePath) { - Hash gotHash = unpack - ? hashPath(expectedHash.type, store->toRealPath(storePath)).first - : hashFile(expectedHash.type, store->toRealPath(storePath)); + Hash gotHash = request.unpack + ? hashPath(request.expectedHash.type, store->toRealPath(storePath)).first + : hashFile(request.expectedHash.type, store->toRealPath(storePath)); throw nix::Error("hash mismatch in file downloaded from '%s':\n wanted: %s\n got: %s", - url, expectedHash.to_string(), gotHash.to_string()); + url, request.expectedHash.to_string(), gotHash.to_string()); } result.storePath = storePath; diff --git a/src/libstore/download.hh b/src/libstore/download.hh index aa8c34be2..b676a1a7b 100644 --- a/src/libstore/download.hh +++ b/src/libstore/download.hh @@ -36,11 +36,23 @@ struct DownloadResult { bool cached = false; std::string etag; - std::string effectiveUrl; + std::string effectiveUri; std::shared_ptr data; uint64_t bodySize = 0; }; +struct CachedDownloadRequest +{ + std::string uri; + bool unpack = false; + std::string name; + Hash expectedHash; + unsigned int ttl = settings.tarballTtl; + + CachedDownloadRequest(const std::string & uri) + : uri(uri) { } +}; + struct CachedDownloadResult { // Note: 'storePath' may be different from 'path' when using a @@ -48,6 +60,7 @@ struct CachedDownloadResult Path storePath; Path path; std::optional etag; + std::string effectiveUri; }; class Store; @@ -73,10 +86,7 @@ struct Downloader and is more recent than ‘tarball-ttl’ seconds. Otherwise, use the recorded ETag to verify if the server has a more recent version, and if so, download it to the Nix store. */ - CachedDownloadResult downloadCached( - ref store, const string & uri, bool unpack, string name = "", - const Hash & expectedHash = Hash(), string * effectiveUri = nullptr, - int ttl = settings.tarballTtl); + CachedDownloadResult downloadCached(ref store, const CachedDownloadRequest & request); enum Error { NotFound, Forbidden, Misc, Transient, Interrupted }; }; diff --git a/src/nix-channel/nix-channel.cc b/src/nix-channel/nix-channel.cc index 7b23088a2..bd1371dba 100755 --- a/src/nix-channel/nix-channel.cc +++ b/src/nix-channel/nix-channel.cc @@ -86,10 +86,12 @@ static void update(const StringSet & channelNames) // We want to download the url to a file to see if it's a tarball while also checking if we // got redirected in the process, so that we can grab the various parts of a nix channel // definition from a consistent location if the redirect changes mid-download. - std::string effectiveUrl; + CachedDownloadRequest request(url); + request.ttl = 0; auto dl = getDownloader(); - auto filename = dl->downloadCached(store, url, false, "", Hash(), &effectiveUrl, 0).path; - url = chomp(std::move(effectiveUrl)); + auto result = dl->downloadCached(store, request); + auto filename = result.path; + url = chomp(result.effectiveUri); // If the URL contains a version number, append it to the name // attribute (so that "nix-env -q" on the channels profile @@ -121,12 +123,10 @@ static void update(const StringSet & channelNames) } // Download the channel tarball. - auto fullURL = url + "/nixexprs.tar.xz"; try { - filename = dl->downloadCached(store, fullURL, false).path; + filename = dl->downloadCached(store, CachedDownloadRequest(url + "/nixexprs.tar.xz")).path; } catch (DownloadError & e) { - fullURL = url + "/nixexprs.tar.bz2"; - filename = dl->downloadCached(store, fullURL, false).path; + filename = dl->downloadCached(store, CachedDownloadRequest(url + "/nixexprs.tar.bz2")).path; } chomp(filename); } From f0d6d67af93b63c1da1809dc7630026624c19b14 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Wed, 22 May 2019 23:43:58 +0200 Subject: [PATCH 144/634] Prevent the global registry from being GC'ed Issue #2868. --- src/libexpr/primops/flake.cc | 1 + src/libstore/download.cc | 3 +++ src/libstore/download.hh | 1 + 3 files changed, 5 insertions(+) diff --git a/src/libexpr/primops/flake.cc b/src/libexpr/primops/flake.cc index e2fdf08ca..3c3d5e0c7 100644 --- a/src/libexpr/primops/flake.cc +++ b/src/libexpr/primops/flake.cc @@ -139,6 +139,7 @@ std::shared_ptr EvalState::getGlobalFlakeRegistry() if (!hasPrefix(path, "/")) { CachedDownloadRequest request(evalSettings.flakeRegistry); request.name = "flake-registry.json"; + request.gcRoot = true; path = getDownloader()->downloadCached(store, request).path; } diff --git a/src/libstore/download.cc b/src/libstore/download.cc index a7c2600f6..0d1974d3b 100644 --- a/src/libstore/download.cc +++ b/src/libstore/download.cc @@ -913,6 +913,9 @@ CachedDownloadResult Downloader::downloadCached( url, request.expectedHash.to_string(), gotHash.to_string()); } + if (request.gcRoot) + store->addIndirectRoot(fileLink); + result.storePath = storePath; result.path = store->toRealPath(storePath); return result; diff --git a/src/libstore/download.hh b/src/libstore/download.hh index b676a1a7b..404e51195 100644 --- a/src/libstore/download.hh +++ b/src/libstore/download.hh @@ -48,6 +48,7 @@ struct CachedDownloadRequest std::string name; Hash expectedHash; unsigned int ttl = settings.tarballTtl; + bool gcRoot = false; CachedDownloadRequest(const std::string & uri) : uri(uri) { } From a4ba6e5590285a78e51079be7664a459b2ae7ee4 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Wed, 22 May 2019 23:52:29 +0200 Subject: [PATCH 145/634] Add a test for the registry GC root --- tests/flakes.sh | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/tests/flakes.sh b/tests/flakes.sh index 6c987ad14..977728e43 100644 --- a/tests/flakes.sh +++ b/tests/flakes.sh @@ -178,3 +178,8 @@ nix build -o $TEST_ROOT/result --flake-registry $registry $flake3Dir:sth # Unsupported epochs should be an error. sed -i $flake3Dir/flake.nix -e s/2019/2030/ nix build -o $TEST_ROOT/result --flake-registry $registry $flake3Dir:sth 2>&1 | grep 'unsupported epoch' + +# Test whether registry caching works. +nix flake list --flake-registry file://$registry | grep -q flake3 +mv $registry $registry.tmp +nix flake list --flake-registry file://$registry --tarball-ttl 0 | grep -q flake3 From 013f4928c8a6dd21685531ceb631fbcff85cefef Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Thu, 23 May 2019 16:36:12 +0200 Subject: [PATCH 146/634] Fix tests https://hydra.nixos.org/eval/1521131 --- tests/flakes.sh | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/tests/flakes.sh b/tests/flakes.sh index 977728e43..822c94e42 100644 --- a/tests/flakes.sh +++ b/tests/flakes.sh @@ -105,9 +105,9 @@ EOF nix flake info --flake-registry $registry flake1 | grep -q 'ID: *flake1' # Test 'nix flake info' on a local flake. -(cd $flake1Dir && nix flake info) | grep -q 'ID: *flake1' -(cd $flake1Dir && nix flake info .) | grep -q 'ID: *flake1' -nix flake info $flake1Dir | grep -q 'ID: *flake1' +(cd $flake1Dir && nix flake info --flake-registry $registry) | grep -q 'ID: *flake1' +(cd $flake1Dir && nix flake info --flake-registry $registry .) | grep -q 'ID: *flake1' +nix flake info --flake-registry $registry $flake1Dir | grep -q 'ID: *flake1' # Test 'nix flake info --json'. json=$(nix flake info --flake-registry $registry flake1 --json | jq .) @@ -141,7 +141,8 @@ nix build -o $TEST_ROOT/result --flake-registry $registry $flake2Dir:bar nix build -o $TEST_ROOT/result --flake-registry $registry flake2:bar # Or without a registry. -nix build -o $TEST_ROOT/result file://$flake2Dir:bar +# FIXME: shouldn't need '--flake-registry /no-registry'? +nix build -o $TEST_ROOT/result --flake-registry /no-registry file://$flake2Dir:bar # Test whether indirect dependencies work. nix build -o $TEST_ROOT/result --flake-registry $registry $flake3Dir:xyzzy From 6e984431dd27326681a7cbb56404665c353c834a Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Thu, 23 May 2019 23:38:40 +0200 Subject: [PATCH 147/634] fetchGit: Don't barf if we can't update our Git clone Instead print a warning that we're continuing with the most recently fetched version. --- src/libexpr/primops/fetchGit.cc | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/src/libexpr/primops/fetchGit.cc b/src/libexpr/primops/fetchGit.cc index 00bbeb6d8..f6b096c4a 100644 --- a/src/libexpr/primops/fetchGit.cc +++ b/src/libexpr/primops/fetchGit.cc @@ -139,7 +139,12 @@ GitInfo exportGit(ref store, std::string uri, // FIXME: git stderr messes up our progress indicator, so // we're using --quiet for now. Should process its stderr. - runProgram("git", true, { "-C", repoDir, "fetch", "--quiet", "--force", "--", uri, fmt("%s:%s", *ref, *ref) }); + try { + runProgram("git", true, { "-C", repoDir, "fetch", "--quiet", "--force", "--", uri, fmt("%s:%s", *ref, *ref) }); + } catch (Error & e) { + if (!pathExists(localRefFile)) throw; + warn("could not update local clone of Git repository '%s'; continuing with the most recent version", uri); + } struct timeval times[2]; times[0].tv_sec = now; From 6b77bfc28d36d5b8111e23e15fbe4513a0797f47 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Thu, 23 May 2019 23:39:58 +0200 Subject: [PATCH 148/634] FlakeRef::to_string(): Check round trip --- src/libexpr/primops/flakeref.cc | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/libexpr/primops/flakeref.cc b/src/libexpr/primops/flakeref.cc index 3842b3f1a..c42616374 100644 --- a/src/libexpr/primops/flakeref.cc +++ b/src/libexpr/primops/flakeref.cc @@ -194,6 +194,8 @@ std::string FlakeRef::to_string() const else abort(); + assert(FlakeRef(string) == *this); + return string; } From 90fe1dfd2fd85f17609166c92a2163dfaa09ea99 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Thu, 23 May 2019 23:42:13 +0200 Subject: [PATCH 149/634] Register flake source trees as GC roots This ensures that flakes don't get garbage-collected, which is important to get nix-channel-like behaviour. For example, running $ nix build hydra: will create a GC root ~/.cache/nix/flake-closures/hydra -> /nix/store/xarfiqcwa4w8r4qpz1a769xxs8c3phgn-flake-closure where the contents/references of the linked file in the store are the flake source trees used by the 'hydra' flake: /nix/store/n6d5f5lkpfjbmkyby0nlg8y1wbkmbc7i-source /nix/store/vbkg4zy1qd29fnhflsv9k2j9jnbqd5m2-source /nix/store/z46xni7d47s5wk694359mq9ay353ar94-source Note that this in itself is not enough to allow offline use; the fetcher for the flakeref (e.g. fetchGit or downloadCached) must not fail if it cannot fetch the latest version of the file, so long as it knows a cached version. Issue #2868. --- src/libexpr/primops/flake.hh | 2 ++ src/nix/installables.cc | 45 +++++++++++++++++++++++++++++++++++- tests/flakes.sh | 19 +++++++++++++-- 3 files changed, 63 insertions(+), 3 deletions(-) diff --git a/src/libexpr/primops/flake.hh b/src/libexpr/primops/flake.hh index b965aa9e7..f85e62e7f 100644 --- a/src/libexpr/primops/flake.hh +++ b/src/libexpr/primops/flake.hh @@ -132,6 +132,8 @@ struct ResolvedFlake ResolvedFlake resolveFlake(EvalState &, const FlakeRef &, HandleLockFile); +void callFlake(EvalState & state, const ResolvedFlake & resFlake, Value & v); + void updateLockFile(EvalState &, const FlakeRef & flakeRef, bool recreateLockFile); void gitCloneFlake(FlakeRef flakeRef, EvalState &, Registries, const Path & destDir); diff --git a/src/nix/installables.cc b/src/nix/installables.cc index 1a79f49fb..6cab06240 100644 --- a/src/nix/installables.cc +++ b/src/nix/installables.cc @@ -10,6 +10,7 @@ #include "primops/flake.hh" #include +#include namespace nix { @@ -162,6 +163,44 @@ struct InstallableAttrPath : InstallableValue } }; +void makeFlakeClosureGCRoot(Store & store, const FlakeRef & origFlakeRef, const ResolvedFlake & resFlake) +{ + if (std::get_if(&origFlakeRef.data)) return; + + /* Get the store paths of all non-local flakes. */ + PathSet closure; + + std::queue> queue; + queue.push(resFlake); + + while (!queue.empty()) { + const ResolvedFlake & flake = queue.front(); + queue.pop(); + if (!std::get_if(&flake.flake.resolvedRef.data)) + closure.insert(flake.flake.storePath); + for (const auto & dep : flake.flakeDeps) + queue.push(dep.second); + } + + if (closure.empty()) return; + + /* Write the closure to a file in the store. */ + auto closurePath = store.addTextToStore("flake-closure", concatStringsSep(" ", closure), closure); + + Path cacheDir = getCacheDir() + "/nix/flake-closures"; + createDirs(cacheDir); + + auto s = origFlakeRef.to_string(); + assert(s[0] != '.'); + s = replaceStrings(s, "%", "%25"); + s = replaceStrings(s, "/", "%2f"); + s = replaceStrings(s, ":", "%3a"); + Path symlink = cacheDir + "/" + s; + debug("writing GC root '%s' for flake closure of '%s'", symlink, origFlakeRef); + replaceSymlink(closurePath, symlink); + store.addIndirectRoot(symlink); +} + struct InstallableFlake : InstallableValue { FlakeRef flakeRef; @@ -182,7 +221,11 @@ struct InstallableFlake : InstallableValue { auto vFlake = state.allocValue(); - makeFlakeValue(state, flakeRef, cmd.getLockFileMode(), *vFlake); + auto resFlake = resolveFlake(state, flakeRef, cmd.getLockFileMode()); + + callFlake(state, resFlake, *vFlake); + + makeFlakeClosureGCRoot(*state.store, flakeRef, resFlake); auto vProvides = (*vFlake->attrs->get(state.symbols.create("provides")))->value; diff --git a/tests/flakes.sh b/tests/flakes.sh index 822c94e42..179fef320 100644 --- a/tests/flakes.sh +++ b/tests/flakes.sh @@ -5,7 +5,10 @@ if [[ -z $(type -p git) ]]; then exit 99 fi +export _NIX_FORCE_HTTP=1 + clearStore +rm -rf $TEST_HOME/.cache registry=$TEST_ROOT/registry.json @@ -14,7 +17,7 @@ flake2Dir=$TEST_ROOT/flake2 flake3Dir=$TEST_ROOT/flake3 for repo in $flake1Dir $flake2Dir $flake3Dir; do - rm -rf $repo + rm -rf $repo $repo.tmp mkdir $repo git -C $repo init git -C $repo config user.email "foobar@example.com" @@ -142,7 +145,7 @@ nix build -o $TEST_ROOT/result --flake-registry $registry flake2:bar # Or without a registry. # FIXME: shouldn't need '--flake-registry /no-registry'? -nix build -o $TEST_ROOT/result --flake-registry /no-registry file://$flake2Dir:bar +nix build -o $TEST_ROOT/result --flake-registry /no-registry file://$flake2Dir:bar --tarball-ttl 0 # Test whether indirect dependencies work. nix build -o $TEST_ROOT/result --flake-registry $registry $flake3Dir:xyzzy @@ -184,3 +187,15 @@ nix build -o $TEST_ROOT/result --flake-registry $registry $flake3Dir:sth 2>&1 | nix flake list --flake-registry file://$registry | grep -q flake3 mv $registry $registry.tmp nix flake list --flake-registry file://$registry --tarball-ttl 0 | grep -q flake3 +mv $registry.tmp $registry + +# Test whether flakes are registered as GC roots for offline use. +rm -rf $TEST_HOME/.cache +nix build -o $TEST_ROOT/result --flake-registry file://$registry file://$flake2Dir:bar +mv $flake1Dir $flake1Dir.tmp +mv $flake2Dir $flake2Dir.tmp +nix-store --gc +nix build -o $TEST_ROOT/result --flake-registry file://$registry file://$flake2Dir:bar +nix build -o $TEST_ROOT/result --flake-registry file://$registry file://$flake2Dir:bar --tarball-ttl 0 +mv $flake1Dir.tmp $flake1Dir +mv $flake2Dir.tmp $flake2Dir From 638c56caeddbb10eeb7c636f7c0f4562ef6756e0 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Sun, 26 May 2019 15:59:50 +0200 Subject: [PATCH 150/634] Remove outdated fetchGit test It's no longer an error if we can't update our clone. --- tests/fetchGit.sh | 3 --- 1 file changed, 3 deletions(-) diff --git a/tests/fetchGit.sh b/tests/fetchGit.sh index d87ce8560..be46d24a7 100644 --- a/tests/fetchGit.sh +++ b/tests/fetchGit.sh @@ -50,9 +50,6 @@ path2=$(nix eval --impure --raw "(builtins.fetchGit file://$repo).outPath") [[ $(nix eval --impure "(builtins.fetchGit file://$repo).revCount") = 2 ]] [[ $(nix eval --impure --raw "(builtins.fetchGit file://$repo).rev") = $rev2 ]] -# But with TTL 0, it should fail. -(! nix eval --impure --tarball-ttl 0 "(builtins.fetchGit file://$repo)" -vvvvv) - # Fetching with a explicit hash should succeed. path2=$(nix eval --tarball-ttl 0 --raw "(builtins.fetchGit { url = file://$repo; rev = \"$rev2\"; }).outPath") [[ $path = $path2 ]] From 6fb7545fa19fe139305972020ee6ab189c97cd6e Mon Sep 17 00:00:00 2001 From: Nick Van den Broeck Date: Tue, 28 May 2019 10:17:28 +0200 Subject: [PATCH 151/634] Fixed relative path parsing Fixed #2821 --- src/libexpr/primops/flakeref.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/libexpr/primops/flakeref.cc b/src/libexpr/primops/flakeref.cc index c42616374..3c805eff8 100644 --- a/src/libexpr/primops/flakeref.cc +++ b/src/libexpr/primops/flakeref.cc @@ -134,7 +134,7 @@ FlakeRef::FlakeRef(const std::string & uri, bool allowRelative) data = d; } - else if (hasPrefix(uri, "/") || (allowRelative && (hasPrefix(uri, "./") || uri == "."))) { + else if (hasPrefix(uri, "/") || (allowRelative && (hasPrefix(uri, "./") || hasPrefix(uri, "../") || uri == "."))) { IsPath d; d.path = allowRelative ? absPath(uri) : canonPath(uri); data = d; From 4d030a8d96a3baf7855d840f0ca475d43cbdb18a Mon Sep 17 00:00:00 2001 From: Nick Van den Broeck Date: Tue, 28 May 2019 10:51:45 +0200 Subject: [PATCH 152/634] Added nonFlakeRequires test Fixes #2888 --- src/libexpr/primops/flake.cc | 6 +++--- tests/flakes.sh | 40 +++++++++++++++++++++++++++++++++++- 2 files changed, 42 insertions(+), 4 deletions(-) diff --git a/src/libexpr/primops/flake.cc b/src/libexpr/primops/flake.cc index 3c3d5e0c7..d82c2389d 100644 --- a/src/libexpr/primops/flake.cc +++ b/src/libexpr/primops/flake.cc @@ -366,9 +366,9 @@ Flake getFlake(EvalState & state, const FlakeRef & flakeRef, bool impureIsAllowe } // Get the `NonFlake` corresponding to a `FlakeRef`. -NonFlake getNonFlake(EvalState & state, const FlakeRef & flakeRef, FlakeAlias alias) +NonFlake getNonFlake(EvalState & state, const FlakeRef & flakeRef, FlakeAlias alias, bool impureIsAllowed = false) { - SourceInfo sourceInfo = fetchFlake(state, flakeRef); + SourceInfo sourceInfo = fetchFlake(state, flakeRef, impureIsAllowed); debug("got non-flake source '%s' with flakeref %s", sourceInfo.storePath, sourceInfo.resolvedRef.to_string()); FlakeRef resolvedRef = sourceInfo.resolvedRef; @@ -449,7 +449,7 @@ ResolvedFlake resolveFlakeFromLockFile(EvalState & state, const FlakeRef & flake } else { if (handleLockFile == AllPure || handleLockFile == TopRefUsesRegistries) throw Error("cannot update non-flake dependency '%s' in pure mode", nonFlakeInfo.first); - deps.nonFlakeDeps.push_back(getNonFlake(state, nonFlakeInfo.second, nonFlakeInfo.first)); + deps.nonFlakeDeps.push_back(getNonFlake(state, nonFlakeInfo.second, nonFlakeInfo.first, allowedToUseRegistries(handleLockFile, false))); } } diff --git a/tests/flakes.sh b/tests/flakes.sh index 179fef320..6081e8939 100644 --- a/tests/flakes.sh +++ b/tests/flakes.sh @@ -15,8 +15,9 @@ registry=$TEST_ROOT/registry.json flake1Dir=$TEST_ROOT/flake1 flake2Dir=$TEST_ROOT/flake2 flake3Dir=$TEST_ROOT/flake3 +nonFlakeDir=$TEST_ROOT/nonFlake -for repo in $flake1Dir $flake2Dir $flake3Dir; do +for repo in $flake1Dir $flake2Dir $flake3Dir $nonFlakeDir; do rm -rf $repo $repo.tmp mkdir $repo git -C $repo init @@ -81,6 +82,13 @@ EOF git -C $flake3Dir add flake.nix git -C $flake3Dir commit -m 'Initial' +cat > $nonFlakeDir/README.md < $registry < $flake3Dir/flake.nix < Date: Tue, 28 May 2019 12:58:28 +0200 Subject: [PATCH 153/634] Store SourceInfo in Flake and NonFlake This deduplicates some shared fields. Factoring out the commonality is useful in places like makeFlakeValue(). --- src/libexpr/primops/flake.cc | 21 +++++++++++---------- src/libexpr/primops/flake.hh | 14 +++++--------- src/nix/flake.cc | 24 ++++++++++++------------ src/nix/installables.cc | 2 +- 4 files changed, 29 insertions(+), 32 deletions(-) diff --git a/src/libexpr/primops/flake.cc b/src/libexpr/primops/flake.cc index d82c2389d..a8e3e0859 100644 --- a/src/libexpr/primops/flake.cc +++ b/src/libexpr/primops/flake.cc @@ -368,19 +368,19 @@ Flake getFlake(EvalState & state, const FlakeRef & flakeRef, bool impureIsAllowe // Get the `NonFlake` corresponding to a `FlakeRef`. NonFlake getNonFlake(EvalState & state, const FlakeRef & flakeRef, FlakeAlias alias, bool impureIsAllowed = false) { - SourceInfo sourceInfo = fetchFlake(state, flakeRef, impureIsAllowed); + auto sourceInfo = fetchFlake(state, flakeRef, impureIsAllowed); debug("got non-flake source '%s' with flakeref %s", sourceInfo.storePath, sourceInfo.resolvedRef.to_string()); FlakeRef resolvedRef = sourceInfo.resolvedRef; NonFlake nonFlake(flakeRef, sourceInfo); - state.store->assertStorePath(nonFlake.storePath); + state.store->assertStorePath(nonFlake.sourceInfo.storePath); if (state.allowedPaths) - state.allowedPaths->insert(nonFlake.storePath); + state.allowedPaths->insert(nonFlake.sourceInfo.storePath); - nonFlake.hash = state.store->queryPathInfo(sourceInfo.storePath)->narHash; + nonFlake.hash = state.store->queryPathInfo(nonFlake.sourceInfo.storePath)->narHash; nonFlake.alias = alias; @@ -480,7 +480,7 @@ ResolvedFlake resolveFlake(EvalState & state, const FlakeRef & topRef, HandleLoc if (!recreateLockFile (handleLockFile)) { // If recreateLockFile, start with an empty lockfile - oldLockFile = readLockFile(flake.storePath + "/flake.lock"); // FIXME: symlink attack + oldLockFile = readLockFile(flake.sourceInfo.storePath + "/flake.lock"); // FIXME: symlink attack } LockFile lockFile(oldLockFile); @@ -527,15 +527,16 @@ void callFlake(EvalState & state, const ResolvedFlake & resFlake, Value & v) auto vNonFlake = state.allocAttr(v, nonFlake.alias); state.mkAttrs(*vNonFlake, 4); - state.store->isValidPath(nonFlake.storePath); - mkString(*state.allocAttr(*vNonFlake, state.sOutPath), nonFlake.storePath, {nonFlake.storePath}); + state.store->isValidPath(nonFlake.sourceInfo.storePath); + mkString(*state.allocAttr(*vNonFlake, state.sOutPath), + nonFlake.sourceInfo.storePath, {nonFlake.sourceInfo.storePath}); // FIXME: add rev, shortRev, revCount, ... } mkString(*state.allocAttr(v, state.sDescription), resFlake.flake.description); - auto & path = resFlake.flake.storePath; + auto & path = resFlake.flake.sourceInfo.storePath; state.store->isValidPath(path); mkString(*state.allocAttr(v, state.sOutPath), path, {path}); @@ -546,8 +547,8 @@ void callFlake(EvalState & state, const ResolvedFlake & resFlake, Value & v) resFlake.flake.resolvedRef.rev->gitShortRev()); } - if (resFlake.flake.revCount) - mkInt(*state.allocAttr(v, state.symbols.create("revCount")), *resFlake.flake.revCount); + if (resFlake.flake.sourceInfo.revCount) + mkInt(*state.allocAttr(v, state.symbols.create("revCount")), *resFlake.flake.sourceInfo.revCount); auto vProvides = state.allocAttr(v, state.symbols.create("provides")); mkApp(*vProvides, *resFlake.flake.vProvides, v); diff --git a/src/libexpr/primops/flake.hh b/src/libexpr/primops/flake.hh index f85e62e7f..46489c085 100644 --- a/src/libexpr/primops/flake.hh +++ b/src/libexpr/primops/flake.hh @@ -94,17 +94,15 @@ struct Flake FlakeRef originalRef; FlakeRef resolvedRef; std::string description; - std::optional revCount; - Path storePath; + SourceInfo sourceInfo; Hash hash; // content hash std::vector requires; std::map nonFlakeRequires; Value * vProvides; // FIXME: gc - // date unsigned int epoch; Flake(const FlakeRef & origRef, const SourceInfo & sourceInfo) : originalRef(origRef), - resolvedRef(sourceInfo.resolvedRef), revCount(sourceInfo.revCount), storePath(sourceInfo.storePath) {}; + resolvedRef(sourceInfo.resolvedRef), sourceInfo(sourceInfo) {}; }; struct NonFlake @@ -112,12 +110,10 @@ struct NonFlake FlakeAlias alias; FlakeRef originalRef; FlakeRef resolvedRef; - std::optional revCount; + SourceInfo sourceInfo; Hash hash; - Path storePath; - // date - NonFlake(const FlakeRef & origRef, const SourceInfo & sourceInfo) : originalRef(origRef), - resolvedRef(sourceInfo.resolvedRef), revCount(sourceInfo.revCount), storePath(sourceInfo.storePath) {}; + NonFlake(const FlakeRef & origRef, const SourceInfo & sourceInfo) : + originalRef(origRef), resolvedRef(sourceInfo.resolvedRef), sourceInfo(sourceInfo) {}; }; Flake getFlake(EvalState &, const FlakeRef &, bool impureIsAllowed); diff --git a/src/nix/flake.cc b/src/nix/flake.cc index 912b154c1..71a6c16d9 100644 --- a/src/nix/flake.cc +++ b/src/nix/flake.cc @@ -80,9 +80,9 @@ void printFlakeInfo(const Flake & flake, bool json) { j["branch"] = *flake.resolvedRef.ref; if (flake.resolvedRef.rev) j["revision"] = flake.resolvedRef.rev->to_string(Base16, false); - if (flake.revCount) - j["revCount"] = *flake.revCount; - j["path"] = flake.storePath; + if (flake.sourceInfo.revCount) + j["revCount"] = *flake.sourceInfo.revCount; + j["path"] = flake.sourceInfo.storePath; j["epoch"] = flake.epoch; std::cout << j.dump(4) << std::endl; } else { @@ -93,9 +93,9 @@ void printFlakeInfo(const Flake & flake, bool json) { std::cout << "Branch: " << *flake.resolvedRef.ref << "\n"; if (flake.resolvedRef.rev) std::cout << "Revision: " << flake.resolvedRef.rev->to_string(Base16, false) << "\n"; - if (flake.revCount) - std::cout << "Revcount: " << *flake.revCount << "\n"; - std::cout << "Path: " << flake.storePath << "\n"; + if (flake.sourceInfo.revCount) + std::cout << "Revcount: " << *flake.sourceInfo.revCount << "\n"; + std::cout << "Path: " << flake.sourceInfo.storePath << "\n"; std::cout << "Epoch: " << flake.epoch << "\n"; } } @@ -109,9 +109,9 @@ void printNonFlakeInfo(const NonFlake & nonFlake, bool json) { j["branch"] = *nonFlake.resolvedRef.ref; if (nonFlake.resolvedRef.rev) j["revision"] = nonFlake.resolvedRef.rev->to_string(Base16, false); - if (nonFlake.revCount) - j["revCount"] = *nonFlake.revCount; - j["path"] = nonFlake.storePath; + if (nonFlake.sourceInfo.revCount) + j["revCount"] = *nonFlake.sourceInfo.revCount; + j["path"] = nonFlake.sourceInfo.storePath; std::cout << j.dump(4) << std::endl; } else { std::cout << "ID: " << nonFlake.alias << "\n"; @@ -120,9 +120,9 @@ void printNonFlakeInfo(const NonFlake & nonFlake, bool json) { std::cout << "Branch: " << *nonFlake.resolvedRef.ref; if (nonFlake.resolvedRef.rev) std::cout << "Revision: " << nonFlake.resolvedRef.rev->to_string(Base16, false) << "\n"; - if (nonFlake.revCount) - std::cout << "Revcount: " << *nonFlake.revCount << "\n"; - std::cout << "Path: " << nonFlake.storePath << "\n"; + if (nonFlake.sourceInfo.revCount) + std::cout << "Revcount: " << *nonFlake.sourceInfo.revCount << "\n"; + std::cout << "Path: " << nonFlake.sourceInfo.storePath << "\n"; } } diff --git a/src/nix/installables.cc b/src/nix/installables.cc index 6cab06240..ce09a43d0 100644 --- a/src/nix/installables.cc +++ b/src/nix/installables.cc @@ -177,7 +177,7 @@ void makeFlakeClosureGCRoot(Store & store, const FlakeRef & origFlakeRef, const const ResolvedFlake & flake = queue.front(); queue.pop(); if (!std::get_if(&flake.flake.resolvedRef.data)) - closure.insert(flake.flake.storePath); + closure.insert(flake.flake.sourceInfo.storePath); for (const auto & dep : flake.flakeDeps) queue.push(dep.second); } From 894e0074453d58eb3e1902c3de3d483538e2a026 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Tue, 28 May 2019 13:07:15 +0200 Subject: [PATCH 154/634] Move hash into SourceInfo and rename to narHash to avoid ambiguity --- src/libexpr/primops/flake.cc | 14 +++++++------- src/libexpr/primops/flake.hh | 4 +--- 2 files changed, 8 insertions(+), 10 deletions(-) diff --git a/src/libexpr/primops/flake.cc b/src/libexpr/primops/flake.cc index a8e3e0859..ff3cd8184 100644 --- a/src/libexpr/primops/flake.cc +++ b/src/libexpr/primops/flake.cc @@ -263,6 +263,7 @@ static SourceInfo fetchFlake(EvalState & state, const FlakeRef & flakeRef, bool ref.rev = Hash(std::string(*result.etag, 1, result.etag->size() - 2), htSHA1); SourceInfo info(ref); info.storePath = result.storePath; + info.narHash = state.store->queryPathInfo(info.storePath)->narHash; return info; } @@ -276,6 +277,7 @@ static SourceInfo fetchFlake(EvalState & state, const FlakeRef & flakeRef, bool SourceInfo info(ref); info.storePath = gitInfo.storePath; info.revCount = gitInfo.revCount; + info.narHash = state.store->queryPathInfo(info.storePath)->narHash; return info; } @@ -289,6 +291,7 @@ static SourceInfo fetchFlake(EvalState & state, const FlakeRef & flakeRef, bool SourceInfo info(ref); info.storePath = gitInfo.storePath; info.revCount = gitInfo.revCount; + info.narHash = state.store->queryPathInfo(info.storePath)->narHash; return info; } @@ -315,7 +318,6 @@ Flake getFlake(EvalState & state, const FlakeRef & flakeRef, bool impureIsAllowe throw Error("'flake.nix' file of flake '%s' escapes from '%s'", resolvedRef, sourceInfo.storePath); Flake flake(flakeRef, sourceInfo); - flake.hash = state.store->queryPathInfo(sourceInfo.storePath)->narHash; if (!pathExists(realFlakeFile)) throw Error("source tree referenced by '%s' does not contain a '%s/flake.nix' file", resolvedRef, resolvedRef.subdir); @@ -380,8 +382,6 @@ NonFlake getNonFlake(EvalState & state, const FlakeRef & flakeRef, FlakeAlias al if (state.allowedPaths) state.allowedPaths->insert(nonFlake.sourceInfo.storePath); - nonFlake.hash = state.store->queryPathInfo(nonFlake.sourceInfo.storePath)->narHash; - nonFlake.alias = alias; return nonFlake; @@ -397,13 +397,13 @@ LockFile entryToLockFile(const LockFile::FlakeEntry & entry) LockFile::FlakeEntry dependenciesToFlakeEntry(const ResolvedFlake & resolvedFlake) { - LockFile::FlakeEntry entry(resolvedFlake.flake.resolvedRef, resolvedFlake.flake.hash); + LockFile::FlakeEntry entry(resolvedFlake.flake.resolvedRef, resolvedFlake.flake.sourceInfo.narHash); for (auto & info : resolvedFlake.flakeDeps) entry.flakeEntries.insert_or_assign(info.first.to_string(), dependenciesToFlakeEntry(info.second)); for (auto & nonFlake : resolvedFlake.nonFlakeDeps) { - LockFile::NonFlakeEntry nonEntry(nonFlake.resolvedRef, nonFlake.hash); + LockFile::NonFlakeEntry nonEntry(nonFlake.resolvedRef, nonFlake.sourceInfo.narHash); entry.nonFlakeEntries.insert_or_assign(nonFlake.alias, nonEntry); } @@ -443,7 +443,7 @@ ResolvedFlake resolveFlakeFromLockFile(EvalState & state, const FlakeRef & flake auto i = lockFile.nonFlakeEntries.find(nonFlakeInfo.first); if (i != lockFile.nonFlakeEntries.end()) { NonFlake nonFlake = getNonFlake(state, i->second.ref, nonFlakeInfo.first); - if (nonFlake.hash != i->second.contentHash) + if (nonFlake.sourceInfo.narHash != i->second.contentHash) throw Error("the content hash of flakeref '%s' doesn't match", i->second.ref.to_string()); deps.nonFlakeDeps.push_back(nonFlake); } else { @@ -457,7 +457,7 @@ ResolvedFlake resolveFlakeFromLockFile(EvalState & state, const FlakeRef & flake auto i = lockFile.flakeEntries.find(newFlakeRef); if (i != lockFile.flakeEntries.end()) { // Propagate lockFile downwards if possible ResolvedFlake newResFlake = resolveFlakeFromLockFile(state, i->second.ref, handleLockFile, entryToLockFile(i->second)); - if (newResFlake.flake.hash != i->second.contentHash) + if (newResFlake.flake.sourceInfo.narHash != i->second.contentHash) throw Error("the content hash of flakeref '%s' doesn't match", i->second.ref.to_string()); deps.flakeDeps.insert_or_assign(newFlakeRef, newResFlake); } else { diff --git a/src/libexpr/primops/flake.hh b/src/libexpr/primops/flake.hh index 46489c085..760d66057 100644 --- a/src/libexpr/primops/flake.hh +++ b/src/libexpr/primops/flake.hh @@ -84,7 +84,7 @@ struct SourceInfo FlakeRef resolvedRef; Path storePath; std::optional revCount; - // date + Hash narHash; // store path hash SourceInfo(const FlakeRef & resolvRef) : resolvedRef(resolvRef) {}; }; @@ -95,7 +95,6 @@ struct Flake FlakeRef resolvedRef; std::string description; SourceInfo sourceInfo; - Hash hash; // content hash std::vector requires; std::map nonFlakeRequires; Value * vProvides; // FIXME: gc @@ -111,7 +110,6 @@ struct NonFlake FlakeRef originalRef; FlakeRef resolvedRef; SourceInfo sourceInfo; - Hash hash; NonFlake(const FlakeRef & origRef, const SourceInfo & sourceInfo) : originalRef(origRef), resolvedRef(sourceInfo.resolvedRef), sourceInfo(sourceInfo) {}; }; From 48463045415d6099b01e360d27f3cdd1053c9ed0 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Tue, 28 May 2019 13:08:40 +0200 Subject: [PATCH 155/634] Rename contentHash -> narHash for consistency --- src/libexpr/primops/flake.cc | 10 +++++----- src/libexpr/primops/flake.hh | 12 ++++++------ 2 files changed, 11 insertions(+), 11 deletions(-) diff --git a/src/libexpr/primops/flake.cc b/src/libexpr/primops/flake.cc index ff3cd8184..d04f665a8 100644 --- a/src/libexpr/primops/flake.cc +++ b/src/libexpr/primops/flake.cc @@ -105,10 +105,10 @@ nlohmann::json flakeEntryToJson(const LockFile::FlakeEntry & entry) { nlohmann::json json; json["uri"] = entry.ref.to_string(); - json["contentHash"] = entry.contentHash.to_string(SRI); + json["contentHash"] = entry.narHash.to_string(SRI); for (auto & x : entry.nonFlakeEntries) { json["nonFlakeRequires"][x.first]["uri"] = x.second.ref.to_string(); - json["nonFlakeRequires"][x.first]["contentHash"] = x.second.contentHash.to_string(SRI); + json["nonFlakeRequires"][x.first]["contentHash"] = x.second.narHash.to_string(SRI); } for (auto & x : entry.flakeEntries) json["requires"][x.first.to_string()] = flakeEntryToJson(x.second); @@ -122,7 +122,7 @@ void writeLockFile(const LockFile & lockFile, const Path & path) json["nonFlakeRequires"] = nlohmann::json::object(); for (auto & x : lockFile.nonFlakeEntries) { json["nonFlakeRequires"][x.first]["uri"] = x.second.ref.to_string(); - json["nonFlakeRequires"][x.first]["contentHash"] = x.second.contentHash.to_string(SRI); + json["nonFlakeRequires"][x.first]["contentHash"] = x.second.narHash.to_string(SRI); } json["requires"] = nlohmann::json::object(); for (auto & x : lockFile.flakeEntries) @@ -443,7 +443,7 @@ ResolvedFlake resolveFlakeFromLockFile(EvalState & state, const FlakeRef & flake auto i = lockFile.nonFlakeEntries.find(nonFlakeInfo.first); if (i != lockFile.nonFlakeEntries.end()) { NonFlake nonFlake = getNonFlake(state, i->second.ref, nonFlakeInfo.first); - if (nonFlake.sourceInfo.narHash != i->second.contentHash) + if (nonFlake.sourceInfo.narHash != i->second.narHash) throw Error("the content hash of flakeref '%s' doesn't match", i->second.ref.to_string()); deps.nonFlakeDeps.push_back(nonFlake); } else { @@ -457,7 +457,7 @@ ResolvedFlake resolveFlakeFromLockFile(EvalState & state, const FlakeRef & flake auto i = lockFile.flakeEntries.find(newFlakeRef); if (i != lockFile.flakeEntries.end()) { // Propagate lockFile downwards if possible ResolvedFlake newResFlake = resolveFlakeFromLockFile(state, i->second.ref, handleLockFile, entryToLockFile(i->second)); - if (newResFlake.flake.sourceInfo.narHash != i->second.contentHash) + if (newResFlake.flake.sourceInfo.narHash != i->second.narHash) throw Error("the content hash of flakeref '%s' doesn't match", i->second.ref.to_string()); deps.flakeDeps.insert_or_assign(newFlakeRef, newResFlake); } else { diff --git a/src/libexpr/primops/flake.hh b/src/libexpr/primops/flake.hh index 760d66057..d90404ae6 100644 --- a/src/libexpr/primops/flake.hh +++ b/src/libexpr/primops/flake.hh @@ -22,28 +22,28 @@ struct LockFile struct NonFlakeEntry { FlakeRef ref; - Hash contentHash; - NonFlakeEntry(const FlakeRef & flakeRef, const Hash & hash) : ref(flakeRef), contentHash(hash) {}; + Hash narHash; + NonFlakeEntry(const FlakeRef & flakeRef, const Hash & hash) : ref(flakeRef), narHash(hash) {}; bool operator ==(const NonFlakeEntry & other) const { - return ref == other.ref && contentHash == other.contentHash; + return ref == other.ref && narHash == other.narHash; } }; struct FlakeEntry { FlakeRef ref; - Hash contentHash; + Hash narHash; std::map flakeEntries; std::map nonFlakeEntries; - FlakeEntry(const FlakeRef & flakeRef, const Hash & hash) : ref(flakeRef), contentHash(hash) {}; + FlakeEntry(const FlakeRef & flakeRef, const Hash & hash) : ref(flakeRef), narHash(hash) {}; bool operator ==(const FlakeEntry & other) const { return ref == other.ref - && contentHash == other.contentHash + && narHash == other.narHash && flakeEntries == other.flakeEntries && nonFlakeEntries == other.nonFlakeEntries; } From dda4f7167b9a421f1bb85ee5eed79a1bf03a13e4 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Tue, 28 May 2019 13:12:43 +0200 Subject: [PATCH 156/634] Remove redundant resolvedRef fields since they're already in SourceInfo --- src/libexpr/primops/flake.cc | 14 ++++++++---- src/libexpr/primops/flake.hh | 10 ++++---- src/nix/flake.cc | 44 ++++++++++++++++++------------------ src/nix/installables.cc | 2 +- 4 files changed, 36 insertions(+), 34 deletions(-) diff --git a/src/libexpr/primops/flake.cc b/src/libexpr/primops/flake.cc index d04f665a8..4de742862 100644 --- a/src/libexpr/primops/flake.cc +++ b/src/libexpr/primops/flake.cc @@ -397,13 +397,17 @@ LockFile entryToLockFile(const LockFile::FlakeEntry & entry) LockFile::FlakeEntry dependenciesToFlakeEntry(const ResolvedFlake & resolvedFlake) { - LockFile::FlakeEntry entry(resolvedFlake.flake.resolvedRef, resolvedFlake.flake.sourceInfo.narHash); + LockFile::FlakeEntry entry( + resolvedFlake.flake.sourceInfo.resolvedRef, + resolvedFlake.flake.sourceInfo.narHash); for (auto & info : resolvedFlake.flakeDeps) entry.flakeEntries.insert_or_assign(info.first.to_string(), dependenciesToFlakeEntry(info.second)); for (auto & nonFlake : resolvedFlake.nonFlakeDeps) { - LockFile::NonFlakeEntry nonEntry(nonFlake.resolvedRef, nonFlake.sourceInfo.narHash); + LockFile::NonFlakeEntry nonEntry( + nonFlake.sourceInfo.resolvedRef, + nonFlake.sourceInfo.narHash); entry.nonFlakeEntries.insert_or_assign(nonFlake.alias, nonEntry); } @@ -540,11 +544,11 @@ void callFlake(EvalState & state, const ResolvedFlake & resFlake, Value & v) state.store->isValidPath(path); mkString(*state.allocAttr(v, state.sOutPath), path, {path}); - if (resFlake.flake.resolvedRef.rev) { + if (resFlake.flake.sourceInfo.resolvedRef.rev) { mkString(*state.allocAttr(v, state.symbols.create("rev")), - resFlake.flake.resolvedRef.rev->gitRev()); + resFlake.flake.sourceInfo.resolvedRef.rev->gitRev()); mkString(*state.allocAttr(v, state.symbols.create("shortRev")), - resFlake.flake.resolvedRef.rev->gitShortRev()); + resFlake.flake.sourceInfo.resolvedRef.rev->gitShortRev()); } if (resFlake.flake.sourceInfo.revCount) diff --git a/src/libexpr/primops/flake.hh b/src/libexpr/primops/flake.hh index d90404ae6..a26103736 100644 --- a/src/libexpr/primops/flake.hh +++ b/src/libexpr/primops/flake.hh @@ -92,7 +92,6 @@ struct Flake { FlakeId id; FlakeRef originalRef; - FlakeRef resolvedRef; std::string description; SourceInfo sourceInfo; std::vector requires; @@ -100,18 +99,17 @@ struct Flake Value * vProvides; // FIXME: gc unsigned int epoch; - Flake(const FlakeRef & origRef, const SourceInfo & sourceInfo) : originalRef(origRef), - resolvedRef(sourceInfo.resolvedRef), sourceInfo(sourceInfo) {}; + Flake(const FlakeRef & origRef, const SourceInfo & sourceInfo) + : originalRef(origRef), sourceInfo(sourceInfo) {}; }; struct NonFlake { FlakeAlias alias; FlakeRef originalRef; - FlakeRef resolvedRef; SourceInfo sourceInfo; - NonFlake(const FlakeRef & origRef, const SourceInfo & sourceInfo) : - originalRef(origRef), resolvedRef(sourceInfo.resolvedRef), sourceInfo(sourceInfo) {}; + NonFlake(const FlakeRef & origRef, const SourceInfo & sourceInfo) + : originalRef(origRef), sourceInfo(sourceInfo) {}; }; Flake getFlake(EvalState &, const FlakeRef &, bool impureIsAllowed); diff --git a/src/nix/flake.cc b/src/nix/flake.cc index 71a6c16d9..ce5ce8742 100644 --- a/src/nix/flake.cc +++ b/src/nix/flake.cc @@ -74,12 +74,12 @@ void printFlakeInfo(const Flake & flake, bool json) { if (json) { nlohmann::json j; j["id"] = flake.id; - j["uri"] = flake.resolvedRef.to_string(); + j["uri"] = flake.sourceInfo.resolvedRef.to_string(); j["description"] = flake.description; - if (flake.resolvedRef.ref) - j["branch"] = *flake.resolvedRef.ref; - if (flake.resolvedRef.rev) - j["revision"] = flake.resolvedRef.rev->to_string(Base16, false); + if (flake.sourceInfo.resolvedRef.ref) + j["branch"] = *flake.sourceInfo.resolvedRef.ref; + if (flake.sourceInfo.resolvedRef.rev) + j["revision"] = flake.sourceInfo.resolvedRef.rev->to_string(Base16, false); if (flake.sourceInfo.revCount) j["revCount"] = *flake.sourceInfo.revCount; j["path"] = flake.sourceInfo.storePath; @@ -87,12 +87,12 @@ void printFlakeInfo(const Flake & flake, bool json) { std::cout << j.dump(4) << std::endl; } else { std::cout << "ID: " << flake.id << "\n"; - std::cout << "URI: " << flake.resolvedRef.to_string() << "\n"; + std::cout << "URI: " << flake.sourceInfo.resolvedRef.to_string() << "\n"; std::cout << "Description: " << flake.description << "\n"; - if (flake.resolvedRef.ref) - std::cout << "Branch: " << *flake.resolvedRef.ref << "\n"; - if (flake.resolvedRef.rev) - std::cout << "Revision: " << flake.resolvedRef.rev->to_string(Base16, false) << "\n"; + if (flake.sourceInfo.resolvedRef.ref) + std::cout << "Branch: " << *flake.sourceInfo.resolvedRef.ref << "\n"; + if (flake.sourceInfo.resolvedRef.rev) + std::cout << "Revision: " << flake.sourceInfo.resolvedRef.rev->to_string(Base16, false) << "\n"; if (flake.sourceInfo.revCount) std::cout << "Revcount: " << *flake.sourceInfo.revCount << "\n"; std::cout << "Path: " << flake.sourceInfo.storePath << "\n"; @@ -104,22 +104,22 @@ void printNonFlakeInfo(const NonFlake & nonFlake, bool json) { if (json) { nlohmann::json j; j["id"] = nonFlake.alias; - j["uri"] = nonFlake.resolvedRef.to_string(); - if (nonFlake.resolvedRef.ref) - j["branch"] = *nonFlake.resolvedRef.ref; - if (nonFlake.resolvedRef.rev) - j["revision"] = nonFlake.resolvedRef.rev->to_string(Base16, false); + j["uri"] = nonFlake.sourceInfo.resolvedRef.to_string(); + if (nonFlake.sourceInfo.resolvedRef.ref) + j["branch"] = *nonFlake.sourceInfo.resolvedRef.ref; + if (nonFlake.sourceInfo.resolvedRef.rev) + j["revision"] = nonFlake.sourceInfo.resolvedRef.rev->to_string(Base16, false); if (nonFlake.sourceInfo.revCount) j["revCount"] = *nonFlake.sourceInfo.revCount; j["path"] = nonFlake.sourceInfo.storePath; std::cout << j.dump(4) << std::endl; } else { std::cout << "ID: " << nonFlake.alias << "\n"; - std::cout << "URI: " << nonFlake.resolvedRef.to_string() << "\n"; - if (nonFlake.resolvedRef.ref) - std::cout << "Branch: " << *nonFlake.resolvedRef.ref; - if (nonFlake.resolvedRef.rev) - std::cout << "Revision: " << nonFlake.resolvedRef.rev->to_string(Base16, false) << "\n"; + std::cout << "URI: " << nonFlake.sourceInfo.resolvedRef.to_string() << "\n"; + if (nonFlake.sourceInfo.resolvedRef.ref) + std::cout << "Branch: " << *nonFlake.sourceInfo.resolvedRef.ref; + if (nonFlake.sourceInfo.resolvedRef.rev) + std::cout << "Revision: " << nonFlake.sourceInfo.resolvedRef.rev->to_string(Base16, false) << "\n"; if (nonFlake.sourceInfo.revCount) std::cout << "Revcount: " << *nonFlake.sourceInfo.revCount << "\n"; std::cout << "Path: " << nonFlake.sourceInfo.storePath << "\n"; @@ -295,13 +295,13 @@ struct CmdFlakePin : virtual Args, EvalCommand FlakeRegistry userRegistry = *readRegistry(userRegistryPath); auto it = userRegistry.entries.find(FlakeRef(alias)); if (it != userRegistry.entries.end()) { - it->second = getFlake(*evalState, it->second, true).resolvedRef; + it->second = getFlake(*evalState, it->second, true).sourceInfo.resolvedRef; writeRegistry(userRegistry, userRegistryPath); } else { std::shared_ptr globalReg = evalState->getGlobalFlakeRegistry(); it = globalReg->entries.find(FlakeRef(alias)); if (it != globalReg->entries.end()) { - FlakeRef newRef = getFlake(*evalState, it->second, true).resolvedRef; + auto newRef = getFlake(*evalState, it->second, true).sourceInfo.resolvedRef; userRegistry.entries.insert_or_assign(alias, newRef); writeRegistry(userRegistry, userRegistryPath); } else diff --git a/src/nix/installables.cc b/src/nix/installables.cc index ce09a43d0..4f9161666 100644 --- a/src/nix/installables.cc +++ b/src/nix/installables.cc @@ -176,7 +176,7 @@ void makeFlakeClosureGCRoot(Store & store, const FlakeRef & origFlakeRef, const while (!queue.empty()) { const ResolvedFlake & flake = queue.front(); queue.pop(); - if (!std::get_if(&flake.flake.resolvedRef.data)) + if (!std::get_if(&flake.flake.sourceInfo.resolvedRef.data)) closure.insert(flake.flake.sourceInfo.storePath); for (const auto & dep : flake.flakeDeps) queue.push(dep.second); From 25e497bf9c3a848afbd7463f132c9229d8a44284 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Tue, 28 May 2019 13:14:27 +0200 Subject: [PATCH 157/634] nix flake info/deps: Stop progress bar before printing output --- src/nix/flake.cc | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/nix/flake.cc b/src/nix/flake.cc index ce5ce8742..66fa11e47 100644 --- a/src/nix/flake.cc +++ b/src/nix/flake.cc @@ -147,6 +147,8 @@ struct CmdFlakeDeps : FlakeCommand, MixJSON std::queue todo; todo.push(resolveFlake()); + stopProgressBar(); + while (!todo.empty()) { auto resFlake = std::move(todo.front()); todo.pop(); @@ -204,6 +206,7 @@ struct CmdFlakeInfo : FlakeCommand, MixJSON void run(nix::ref store) override { auto flake = getFlake(); + stopProgressBar(); printFlakeInfo(flake, json); } }; From fdf06ce72f9695f57f6215683e4b2e1c6ec463cf Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Tue, 28 May 2019 13:21:06 +0200 Subject: [PATCH 158/634] printFlakeInfo/printNonFlakeInfo: Factor out commonality --- src/nix/flake.cc | 66 +++++++++++++++++++++++------------------------- 1 file changed, 32 insertions(+), 34 deletions(-) diff --git a/src/nix/flake.cc b/src/nix/flake.cc index 66fa11e47..da19caa5a 100644 --- a/src/nix/flake.cc +++ b/src/nix/flake.cc @@ -70,59 +70,57 @@ struct CmdFlakeList : EvalCommand } }; -void printFlakeInfo(const Flake & flake, bool json) { +static void printSourceInfo(const SourceInfo & sourceInfo) +{ + std::cout << "URI: " << sourceInfo.resolvedRef.to_string() << "\n"; + if (sourceInfo.resolvedRef.ref) + std::cout << "Branch: " << *sourceInfo.resolvedRef.ref; + if (sourceInfo.resolvedRef.rev) + std::cout << "Revision: " << sourceInfo.resolvedRef.rev->to_string(Base16, false) << "\n"; + if (sourceInfo.revCount) + std::cout << "Revcount: " << *sourceInfo.revCount << "\n"; + std::cout << "Path: " << sourceInfo.storePath << "\n"; +} + +static void sourceInfoToJson(const SourceInfo & sourceInfo, nlohmann::json & j) +{ + j["uri"] = sourceInfo.resolvedRef.to_string(); + if (sourceInfo.resolvedRef.ref) + j["branch"] = *sourceInfo.resolvedRef.ref; + if (sourceInfo.resolvedRef.rev) + j["revision"] = sourceInfo.resolvedRef.rev->to_string(Base16, false); + if (sourceInfo.revCount) + j["revCount"] = *sourceInfo.revCount; + j["path"] = sourceInfo.storePath; +} + +static void printFlakeInfo(const Flake & flake, bool json) +{ if (json) { nlohmann::json j; j["id"] = flake.id; - j["uri"] = flake.sourceInfo.resolvedRef.to_string(); j["description"] = flake.description; - if (flake.sourceInfo.resolvedRef.ref) - j["branch"] = *flake.sourceInfo.resolvedRef.ref; - if (flake.sourceInfo.resolvedRef.rev) - j["revision"] = flake.sourceInfo.resolvedRef.rev->to_string(Base16, false); - if (flake.sourceInfo.revCount) - j["revCount"] = *flake.sourceInfo.revCount; - j["path"] = flake.sourceInfo.storePath; j["epoch"] = flake.epoch; + sourceInfoToJson(flake.sourceInfo, j); std::cout << j.dump(4) << std::endl; } else { std::cout << "ID: " << flake.id << "\n"; - std::cout << "URI: " << flake.sourceInfo.resolvedRef.to_string() << "\n"; std::cout << "Description: " << flake.description << "\n"; - if (flake.sourceInfo.resolvedRef.ref) - std::cout << "Branch: " << *flake.sourceInfo.resolvedRef.ref << "\n"; - if (flake.sourceInfo.resolvedRef.rev) - std::cout << "Revision: " << flake.sourceInfo.resolvedRef.rev->to_string(Base16, false) << "\n"; - if (flake.sourceInfo.revCount) - std::cout << "Revcount: " << *flake.sourceInfo.revCount << "\n"; - std::cout << "Path: " << flake.sourceInfo.storePath << "\n"; std::cout << "Epoch: " << flake.epoch << "\n"; + printSourceInfo(flake.sourceInfo); } } -void printNonFlakeInfo(const NonFlake & nonFlake, bool json) { +static void printNonFlakeInfo(const NonFlake & nonFlake, bool json) +{ if (json) { nlohmann::json j; j["id"] = nonFlake.alias; - j["uri"] = nonFlake.sourceInfo.resolvedRef.to_string(); - if (nonFlake.sourceInfo.resolvedRef.ref) - j["branch"] = *nonFlake.sourceInfo.resolvedRef.ref; - if (nonFlake.sourceInfo.resolvedRef.rev) - j["revision"] = nonFlake.sourceInfo.resolvedRef.rev->to_string(Base16, false); - if (nonFlake.sourceInfo.revCount) - j["revCount"] = *nonFlake.sourceInfo.revCount; - j["path"] = nonFlake.sourceInfo.storePath; + printSourceInfo(nonFlake.sourceInfo); std::cout << j.dump(4) << std::endl; } else { std::cout << "ID: " << nonFlake.alias << "\n"; - std::cout << "URI: " << nonFlake.sourceInfo.resolvedRef.to_string() << "\n"; - if (nonFlake.sourceInfo.resolvedRef.ref) - std::cout << "Branch: " << *nonFlake.sourceInfo.resolvedRef.ref; - if (nonFlake.sourceInfo.resolvedRef.rev) - std::cout << "Revision: " << nonFlake.sourceInfo.resolvedRef.rev->to_string(Base16, false) << "\n"; - if (nonFlake.sourceInfo.revCount) - std::cout << "Revcount: " << *nonFlake.sourceInfo.revCount << "\n"; - std::cout << "Path: " << nonFlake.sourceInfo.storePath << "\n"; + printSourceInfo(nonFlake.sourceInfo); } } From e7e7a03baf446bad34939c106aee4b69f5619fd0 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Tue, 28 May 2019 13:22:11 +0200 Subject: [PATCH 159/634] nix flake deps: Remove --json flag for now It doesn't produce valid JSON at the moment (but a concatenation of JSON objects). Anyway we probably should merge this command info 'nix flake info'. --- src/nix/flake.cc | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/nix/flake.cc b/src/nix/flake.cc index da19caa5a..810529613 100644 --- a/src/nix/flake.cc +++ b/src/nix/flake.cc @@ -125,7 +125,7 @@ static void printNonFlakeInfo(const NonFlake & nonFlake, bool json) } // FIXME: merge info CmdFlakeInfo? -struct CmdFlakeDeps : FlakeCommand, MixJSON +struct CmdFlakeDeps : FlakeCommand { std::string name() override { @@ -152,10 +152,10 @@ struct CmdFlakeDeps : FlakeCommand, MixJSON todo.pop(); for (auto & nonFlake : resFlake.nonFlakeDeps) - printNonFlakeInfo(nonFlake, json); + printNonFlakeInfo(nonFlake, false); for (auto & info : resFlake.flakeDeps) { - printFlakeInfo(info.second.flake, json); + printFlakeInfo(info.second.flake, false); todo.push(info.second); } } From ecee759b80a5a4cefbf15c3670f28bdebb58c404 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Tue, 28 May 2019 14:01:08 +0200 Subject: [PATCH 160/634] callFlake(): Emit source info attributes for non-flake dependencies --- src/libexpr/primops/flake.cc | 35 ++++++++++++++++++++--------------- 1 file changed, 20 insertions(+), 15 deletions(-) diff --git a/src/libexpr/primops/flake.cc b/src/libexpr/primops/flake.cc index 4de742862..162e5c915 100644 --- a/src/libexpr/primops/flake.cc +++ b/src/libexpr/primops/flake.cc @@ -514,6 +514,23 @@ void updateLockFile(EvalState & state, const FlakeRef & flakeRef, bool recreateL resolveFlake(state, flakeRef, recreateLockFile ? RecreateLockFile : UpdateLockFile); } +static void emitSourceInfoAttrs(EvalState & state, const SourceInfo & sourceInfo, Value & vAttrs) +{ + auto & path = sourceInfo.storePath; + state.store->isValidPath(path); + mkString(*state.allocAttr(vAttrs, state.sOutPath), path, {path}); + + if (sourceInfo.resolvedRef.rev) { + mkString(*state.allocAttr(vAttrs, state.symbols.create("rev")), + sourceInfo.resolvedRef.rev->gitRev()); + mkString(*state.allocAttr(vAttrs, state.symbols.create("shortRev")), + sourceInfo.resolvedRef.rev->gitShortRev()); + } + + if (sourceInfo.revCount) + mkInt(*state.allocAttr(vAttrs, state.symbols.create("revCount")), *sourceInfo.revCount); +} + void callFlake(EvalState & state, const ResolvedFlake & resFlake, Value & v) { // Construct the resulting attrset '{description, provides, @@ -529,30 +546,18 @@ void callFlake(EvalState & state, const ResolvedFlake & resFlake, Value & v) for (const NonFlake nonFlake : resFlake.nonFlakeDeps) { auto vNonFlake = state.allocAttr(v, nonFlake.alias); - state.mkAttrs(*vNonFlake, 4); + state.mkAttrs(*vNonFlake, 8); state.store->isValidPath(nonFlake.sourceInfo.storePath); mkString(*state.allocAttr(*vNonFlake, state.sOutPath), nonFlake.sourceInfo.storePath, {nonFlake.sourceInfo.storePath}); - // FIXME: add rev, shortRev, revCount, ... + emitSourceInfoAttrs(state, nonFlake.sourceInfo, *vNonFlake); } mkString(*state.allocAttr(v, state.sDescription), resFlake.flake.description); - auto & path = resFlake.flake.sourceInfo.storePath; - state.store->isValidPath(path); - mkString(*state.allocAttr(v, state.sOutPath), path, {path}); - - if (resFlake.flake.sourceInfo.resolvedRef.rev) { - mkString(*state.allocAttr(v, state.symbols.create("rev")), - resFlake.flake.sourceInfo.resolvedRef.rev->gitRev()); - mkString(*state.allocAttr(v, state.symbols.create("shortRev")), - resFlake.flake.sourceInfo.resolvedRef.rev->gitShortRev()); - } - - if (resFlake.flake.sourceInfo.revCount) - mkInt(*state.allocAttr(v, state.symbols.create("revCount")), *resFlake.flake.sourceInfo.revCount); + emitSourceInfoAttrs(state, resFlake.flake.sourceInfo, v); auto vProvides = state.allocAttr(v, state.symbols.create("provides")); mkApp(*vProvides, *resFlake.flake.vProvides, v); From 46294d60cd46564ea6b40cf9c32759021f9d1fc9 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Tue, 28 May 2019 14:01:57 +0200 Subject: [PATCH 161/634] printFlakeInfo: Separate JSON output --- src/nix/flake.cc | 59 ++++++++++++++++++++++++++---------------------- 1 file changed, 32 insertions(+), 27 deletions(-) diff --git a/src/nix/flake.cc b/src/nix/flake.cc index 810529613..0fea1993c 100644 --- a/src/nix/flake.cc +++ b/src/nix/flake.cc @@ -94,34 +94,36 @@ static void sourceInfoToJson(const SourceInfo & sourceInfo, nlohmann::json & j) j["path"] = sourceInfo.storePath; } -static void printFlakeInfo(const Flake & flake, bool json) +static void printFlakeInfo(const Flake & flake) { - if (json) { - nlohmann::json j; - j["id"] = flake.id; - j["description"] = flake.description; - j["epoch"] = flake.epoch; - sourceInfoToJson(flake.sourceInfo, j); - std::cout << j.dump(4) << std::endl; - } else { - std::cout << "ID: " << flake.id << "\n"; - std::cout << "Description: " << flake.description << "\n"; - std::cout << "Epoch: " << flake.epoch << "\n"; - printSourceInfo(flake.sourceInfo); - } + std::cout << "ID: " << flake.id << "\n"; + std::cout << "Description: " << flake.description << "\n"; + std::cout << "Epoch: " << flake.epoch << "\n"; + printSourceInfo(flake.sourceInfo); } -static void printNonFlakeInfo(const NonFlake & nonFlake, bool json) +static nlohmann::json flakeToJson(const Flake & flake) { - if (json) { - nlohmann::json j; - j["id"] = nonFlake.alias; - printSourceInfo(nonFlake.sourceInfo); - std::cout << j.dump(4) << std::endl; - } else { - std::cout << "ID: " << nonFlake.alias << "\n"; - printSourceInfo(nonFlake.sourceInfo); - } + nlohmann::json j; + j["id"] = flake.id; + j["description"] = flake.description; + j["epoch"] = flake.epoch; + sourceInfoToJson(flake.sourceInfo, j); + return j; +} + +static void printNonFlakeInfo(const NonFlake & nonFlake) +{ + std::cout << "ID: " << nonFlake.alias << "\n"; + printSourceInfo(nonFlake.sourceInfo); +} + +static nlohmann::json nonFlakeToJson(const NonFlake & nonFlake) +{ + nlohmann::json j; + j["id"] = nonFlake.alias; + sourceInfoToJson(nonFlake.sourceInfo, j); + return j; } // FIXME: merge info CmdFlakeInfo? @@ -152,10 +154,10 @@ struct CmdFlakeDeps : FlakeCommand todo.pop(); for (auto & nonFlake : resFlake.nonFlakeDeps) - printNonFlakeInfo(nonFlake, false); + printNonFlakeInfo(nonFlake); for (auto & info : resFlake.flakeDeps) { - printFlakeInfo(info.second.flake, false); + printFlakeInfo(info.second.flake); todo.push(info.second); } } @@ -205,7 +207,10 @@ struct CmdFlakeInfo : FlakeCommand, MixJSON { auto flake = getFlake(); stopProgressBar(); - printFlakeInfo(flake, json); + if (json) + std::cout << flakeToJson(flake).dump() << std::endl; + else + printFlakeInfo(flake); } }; From 444786e6d39209d3f0c222af0fbc80846ac7337d Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Tue, 28 May 2019 14:11:19 +0200 Subject: [PATCH 162/634] nix flake info: Add missing newline --- src/nix/flake.cc | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/src/nix/flake.cc b/src/nix/flake.cc index 0fea1993c..d8c422d3d 100644 --- a/src/nix/flake.cc +++ b/src/nix/flake.cc @@ -72,14 +72,14 @@ struct CmdFlakeList : EvalCommand static void printSourceInfo(const SourceInfo & sourceInfo) { - std::cout << "URI: " << sourceInfo.resolvedRef.to_string() << "\n"; + std::cout << fmt("URI: %s\n", sourceInfo.resolvedRef.to_string()); if (sourceInfo.resolvedRef.ref) - std::cout << "Branch: " << *sourceInfo.resolvedRef.ref; + std::cout << fmt("Branch: %s\n",*sourceInfo.resolvedRef.ref); if (sourceInfo.resolvedRef.rev) - std::cout << "Revision: " << sourceInfo.resolvedRef.rev->to_string(Base16, false) << "\n"; + std::cout << fmt("Revision: %s\n", sourceInfo.resolvedRef.rev->to_string(Base16, false)); if (sourceInfo.revCount) - std::cout << "Revcount: " << *sourceInfo.revCount << "\n"; - std::cout << "Path: " << sourceInfo.storePath << "\n"; + std::cout << fmt("Revcount: %s\n", *sourceInfo.revCount); + std::cout << fmt("Path: %s\n", sourceInfo.storePath); } static void sourceInfoToJson(const SourceInfo & sourceInfo, nlohmann::json & j) @@ -96,9 +96,9 @@ static void sourceInfoToJson(const SourceInfo & sourceInfo, nlohmann::json & j) static void printFlakeInfo(const Flake & flake) { - std::cout << "ID: " << flake.id << "\n"; - std::cout << "Description: " << flake.description << "\n"; - std::cout << "Epoch: " << flake.epoch << "\n"; + std::cout << fmt("ID: %s\n", flake.id); + std::cout << fmt("Description: %s\n", flake.description); + std::cout << fmt("Epoch: %s\n", flake.epoch); printSourceInfo(flake.sourceInfo); } @@ -114,7 +114,7 @@ static nlohmann::json flakeToJson(const Flake & flake) static void printNonFlakeInfo(const NonFlake & nonFlake) { - std::cout << "ID: " << nonFlake.alias << "\n"; + std::cout << fmt("ID: %s\n", nonFlake.alias); printSourceInfo(nonFlake.sourceInfo); } From 0f840483c731f48983832f7f627909f8463f05f3 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Tue, 28 May 2019 20:34:02 +0200 Subject: [PATCH 163/634] Add date of last commit to SourceInfo This is primarily useful for version string generation, where we need a monotonically increasing number. The revcount is the preferred thing to use, but isn't available for GitHub flakes (since it requires fetching the entire history). The last commit timestamp OTOH can be extracted from GitHub tarballs. --- release.nix | 3 ++- src/libexpr/primops/fetchGit.cc | 9 +++++++- src/libexpr/primops/fetchGit.hh | 1 + src/libexpr/primops/flake.cc | 39 ++++++++++++++++++--------------- src/libexpr/primops/flake.hh | 14 +++++++++++- src/nix/flake.cc | 24 ++++++++++++-------- tests/flakes.sh | 1 + 7 files changed, 61 insertions(+), 30 deletions(-) diff --git a/release.nix b/release.nix index f98e6d6ed..d28c44910 100644 --- a/release.nix +++ b/release.nix @@ -19,7 +19,8 @@ let releaseTools.sourceTarball { name = "nix-tarball"; version = builtins.readFile ./.version; - versionSuffix = if officialRelease then "" else "pre${toString nix.revCount or 0}_${nix.shortRev or "0000000"}"; + versionSuffix = if officialRelease then "" else + "pre${if nix ? lastModified then builtins.substring 0 8 nix.lastModified else toString nix.revCount or 0}_${nix.shortRev or "0000000"}"; src = nix; inherit officialRelease; diff --git a/src/libexpr/primops/fetchGit.cc b/src/libexpr/primops/fetchGit.cc index f6b096c4a..10f6b6f72 100644 --- a/src/libexpr/primops/fetchGit.cc +++ b/src/libexpr/primops/fetchGit.cc @@ -69,6 +69,9 @@ GitInfo exportGit(ref store, std::string uri, gitInfo.storePath = store->addToStore("source", uri, true, htSHA256, filter); gitInfo.revCount = std::stoull(runProgram("git", true, { "-C", uri, "rev-list", "--count", "HEAD" })); + // FIXME: maybe we should use the timestamp of the last + // modified dirty file? + gitInfo.lastModified = std::stoull(runProgram("git", true, { "-C", uri, "show", "-s", "--format=%ct", "HEAD" })); return gitInfo; } @@ -85,8 +88,9 @@ GitInfo exportGit(ref store, std::string uri, } deletePath(getCacheDir() + "/nix/git"); + deletePath(getCacheDir() + "/nix/gitv2"); - Path cacheDir = getCacheDir() + "/nix/gitv2/" + hashString(htSHA256, uri).to_string(Base32, false); + Path cacheDir = getCacheDir() + "/nix/gitv3/" + hashString(htSHA256, uri).to_string(Base32, false); Path repoDir; if (isLocal) { @@ -181,6 +185,7 @@ GitInfo exportGit(ref store, std::string uri, if (store->isValidPath(storePath)) { gitInfo.storePath = storePath; gitInfo.revCount = json["revCount"]; + gitInfo.lastModified = json["lastModified"]; return gitInfo; } @@ -200,6 +205,7 @@ GitInfo exportGit(ref store, std::string uri, gitInfo.storePath = store->addToStore(name, tmpDir); gitInfo.revCount = std::stoull(runProgram("git", true, { "-C", repoDir, "rev-list", "--count", gitInfo.rev.gitRev() })); + gitInfo.lastModified = std::stoull(runProgram("git", true, { "-C", repoDir, "show", "-s", "--format=%ct", gitInfo.rev.gitRev() })); nlohmann::json json; json["storePath"] = gitInfo.storePath; @@ -207,6 +213,7 @@ GitInfo exportGit(ref store, std::string uri, json["name"] = name; json["rev"] = gitInfo.rev.gitRev(); json["revCount"] = gitInfo.revCount; + json["lastModified"] = gitInfo.lastModified; writeFile(storeLink, json.dump()); diff --git a/src/libexpr/primops/fetchGit.hh b/src/libexpr/primops/fetchGit.hh index 2ad6a5e5c..006fa8b5f 100644 --- a/src/libexpr/primops/fetchGit.hh +++ b/src/libexpr/primops/fetchGit.hh @@ -12,6 +12,7 @@ struct GitInfo std::string ref; Hash rev{htSHA1}; uint64_t revCount; + time_t lastModified; }; GitInfo exportGit(ref store, std::string uri, diff --git a/src/libexpr/primops/flake.cc b/src/libexpr/primops/flake.cc index 162e5c915..7cbbf9e99 100644 --- a/src/libexpr/primops/flake.cc +++ b/src/libexpr/primops/flake.cc @@ -8,6 +8,8 @@ #include #include #include +#include +#include #include namespace nix { @@ -232,6 +234,18 @@ static SourceInfo fetchFlake(EvalState & state, const FlakeRef & flakeRef, bool if (evalSettings.pureEval && !impureIsAllowed && !resolvedRef.isImmutable()) throw Error("requested to fetch mutable flake '%s' in pure mode", resolvedRef); + auto doGit = [&](const GitInfo & gitInfo) { + FlakeRef ref(resolvedRef.baseRef()); + ref.ref = gitInfo.ref; + ref.rev = gitInfo.rev; + SourceInfo info(ref); + info.storePath = gitInfo.storePath; + info.revCount = gitInfo.revCount; + info.narHash = state.store->queryPathInfo(info.storePath)->narHash; + info.lastModified = gitInfo.lastModified; + return info; + }; + // This only downloads only one revision of the repo, not the entire history. if (auto refData = std::get_if(&resolvedRef.data)) { @@ -270,29 +284,13 @@ static SourceInfo fetchFlake(EvalState & state, const FlakeRef & flakeRef, bool // This downloads the entire git history else if (auto refData = std::get_if(&resolvedRef.data)) { - auto gitInfo = exportGit(state.store, refData->uri, resolvedRef.ref, resolvedRef.rev, "source"); - FlakeRef ref(resolvedRef.baseRef()); - ref.ref = gitInfo.ref; - ref.rev = gitInfo.rev; - SourceInfo info(ref); - info.storePath = gitInfo.storePath; - info.revCount = gitInfo.revCount; - info.narHash = state.store->queryPathInfo(info.storePath)->narHash; - return info; + return doGit(exportGit(state.store, refData->uri, resolvedRef.ref, resolvedRef.rev, "source")); } else if (auto refData = std::get_if(&resolvedRef.data)) { if (!pathExists(refData->path + "/.git")) throw Error("flake '%s' does not reference a Git repository", refData->path); - auto gitInfo = exportGit(state.store, refData->path, {}, {}, "source"); - FlakeRef ref(resolvedRef.baseRef()); - ref.ref = gitInfo.ref; - ref.rev = gitInfo.rev; - SourceInfo info(ref); - info.storePath = gitInfo.storePath; - info.revCount = gitInfo.revCount; - info.narHash = state.store->queryPathInfo(info.storePath)->narHash; - return info; + return doGit(exportGit(state.store, refData->path, {}, {}, "source")); } else abort(); @@ -529,6 +527,11 @@ static void emitSourceInfoAttrs(EvalState & state, const SourceInfo & sourceInfo if (sourceInfo.revCount) mkInt(*state.allocAttr(vAttrs, state.symbols.create("revCount")), *sourceInfo.revCount); + + if (sourceInfo.lastModified) + mkString(*state.allocAttr(vAttrs, state.symbols.create("lastModified")), + fmt("%s", + std::put_time(std::gmtime(&*sourceInfo.lastModified), "%Y%m%d%H%M%S"))); } void callFlake(EvalState & state, const ResolvedFlake & resFlake, Value & v) diff --git a/src/libexpr/primops/flake.hh b/src/libexpr/primops/flake.hh index a26103736..0e2706e32 100644 --- a/src/libexpr/primops/flake.hh +++ b/src/libexpr/primops/flake.hh @@ -81,10 +81,22 @@ void writeRegistry(const FlakeRegistry &, const Path &); struct SourceInfo { + // Immutable flakeref that this source tree was obtained from. FlakeRef resolvedRef; + Path storePath; + + // Number of ancestors of the most recent commit. std::optional revCount; - Hash narHash; // store path hash + + // NAR hash of the store path. + Hash narHash; + + // A stable timestamp of this source tree. For Git and GitHub + // flakes, the commit date (not author date!) of the most recent + // commit. + std::optional lastModified; + SourceInfo(const FlakeRef & resolvRef) : resolvedRef(resolvRef) {}; }; diff --git a/src/nix/flake.cc b/src/nix/flake.cc index d8c422d3d..7836f0cfe 100644 --- a/src/nix/flake.cc +++ b/src/nix/flake.cc @@ -7,6 +7,7 @@ #include #include +#include using namespace nix; @@ -72,14 +73,17 @@ struct CmdFlakeList : EvalCommand static void printSourceInfo(const SourceInfo & sourceInfo) { - std::cout << fmt("URI: %s\n", sourceInfo.resolvedRef.to_string()); + std::cout << fmt("URI: %s\n", sourceInfo.resolvedRef.to_string()); if (sourceInfo.resolvedRef.ref) - std::cout << fmt("Branch: %s\n",*sourceInfo.resolvedRef.ref); + std::cout << fmt("Branch: %s\n",*sourceInfo.resolvedRef.ref); if (sourceInfo.resolvedRef.rev) - std::cout << fmt("Revision: %s\n", sourceInfo.resolvedRef.rev->to_string(Base16, false)); + std::cout << fmt("Revision: %s\n", sourceInfo.resolvedRef.rev->to_string(Base16, false)); if (sourceInfo.revCount) - std::cout << fmt("Revcount: %s\n", *sourceInfo.revCount); - std::cout << fmt("Path: %s\n", sourceInfo.storePath); + std::cout << fmt("Revisions: %s\n", *sourceInfo.revCount); + if (sourceInfo.lastModified) + std::cout << fmt("Last modified: %s\n", + std::put_time(std::localtime(&*sourceInfo.lastModified), "%F %T")); + std::cout << fmt("Path: %s\n", sourceInfo.storePath); } static void sourceInfoToJson(const SourceInfo & sourceInfo, nlohmann::json & j) @@ -91,14 +95,16 @@ static void sourceInfoToJson(const SourceInfo & sourceInfo, nlohmann::json & j) j["revision"] = sourceInfo.resolvedRef.rev->to_string(Base16, false); if (sourceInfo.revCount) j["revCount"] = *sourceInfo.revCount; + if (sourceInfo.lastModified) + j["lastModified"] = *sourceInfo.lastModified; j["path"] = sourceInfo.storePath; } static void printFlakeInfo(const Flake & flake) { - std::cout << fmt("ID: %s\n", flake.id); - std::cout << fmt("Description: %s\n", flake.description); - std::cout << fmt("Epoch: %s\n", flake.epoch); + std::cout << fmt("ID: %s\n", flake.id); + std::cout << fmt("Description: %s\n", flake.description); + std::cout << fmt("Epoch: %s\n", flake.epoch); printSourceInfo(flake.sourceInfo); } @@ -114,7 +120,7 @@ static nlohmann::json flakeToJson(const Flake & flake) static void printNonFlakeInfo(const NonFlake & nonFlake) { - std::cout << fmt("ID: %s\n", nonFlake.alias); + std::cout << fmt("ID: %s\n", nonFlake.alias); printSourceInfo(nonFlake.sourceInfo); } diff --git a/tests/flakes.sh b/tests/flakes.sh index 6081e8939..d95d34c76 100644 --- a/tests/flakes.sh +++ b/tests/flakes.sh @@ -124,6 +124,7 @@ nix flake info --flake-registry $registry $flake1Dir | grep -q 'ID: *flake1' json=$(nix flake info --flake-registry $registry flake1 --json | jq .) [[ $(echo "$json" | jq -r .description) = 'Bla bla' ]] [[ -d $(echo "$json" | jq -r .path) ]] +[[ $(echo "$json" | jq -r .lastModified) = $(git -C $flake1Dir log -n1 --format=%ct) ]] # Test 'nix build' on a flake. nix build -o $TEST_ROOT/result --flake-registry $registry flake1:foo From ae7b56cd9a5ed8810828736fbb930a7c14ea44ca Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Tue, 28 May 2019 22:35:41 +0200 Subject: [PATCH 164/634] Get last commit time of github flakes --- src/libexpr/primops/flake.cc | 2 ++ src/libstore/download.cc | 17 ++++++++++++++--- src/libstore/download.hh | 2 ++ src/libutil/util.cc | 18 +++++++++++++++--- src/libutil/util.hh | 6 ++++-- 5 files changed, 37 insertions(+), 8 deletions(-) diff --git a/src/libexpr/primops/flake.cc b/src/libexpr/primops/flake.cc index 7cbbf9e99..257b81887 100644 --- a/src/libexpr/primops/flake.cc +++ b/src/libexpr/primops/flake.cc @@ -265,6 +265,7 @@ static SourceInfo fetchFlake(EvalState & state, const FlakeRef & flakeRef, bool request.unpack = true; request.name = "source"; request.ttl = resolvedRef.rev ? 1000000000 : settings.tarballTtl; + request.getLastModified = true; auto result = getDownloader()->downloadCached(state.store, request); if (!result.etag) @@ -278,6 +279,7 @@ static SourceInfo fetchFlake(EvalState & state, const FlakeRef & flakeRef, bool SourceInfo info(ref); info.storePath = result.storePath; info.narHash = state.store->queryPathInfo(info.storePath)->narHash; + info.lastModified = result.lastModified; return info; } diff --git a/src/libstore/download.cc b/src/libstore/download.cc index 0d1974d3b..0338727c1 100644 --- a/src/libstore/download.cc +++ b/src/libstore/download.cc @@ -808,6 +808,7 @@ CachedDownloadResult Downloader::downloadCached( CachedDownloadResult result; result.storePath = expectedStorePath; result.path = store->toRealPath(expectedStorePath); + assert(!request.getLastModified); // FIXME return result; } } @@ -892,16 +893,26 @@ CachedDownloadResult Downloader::downloadCached( store->addTempRoot(unpackedStorePath); if (!store->isValidPath(unpackedStorePath)) unpackedStorePath = ""; + else + result.lastModified = lstat(unpackedLink).st_mtime; } if (unpackedStorePath.empty()) { printInfo(format("unpacking '%1%'...") % url); Path tmpDir = createTempDir(); AutoDelete autoDelete(tmpDir, true); // FIXME: this requires GNU tar for decompression. - runProgram("tar", true, {"xf", store->toRealPath(storePath), "-C", tmpDir, "--strip-components", "1"}); - unpackedStorePath = store->addToStore(name, tmpDir, true, htSHA256, defaultPathFilter, NoRepair); + runProgram("tar", true, {"xf", store->toRealPath(storePath), "-C", tmpDir}); + auto members = readDirectory(tmpDir); + if (members.size() != 1) + throw nix::Error("tarball '%s' contains an unexpected number of top-level files", url); + auto topDir = tmpDir + "/" + members.begin()->name; + result.lastModified = lstat(topDir).st_mtime; + unpackedStorePath = store->addToStore(name, topDir, true, htSHA256, defaultPathFilter, NoRepair); } - replaceSymlink(unpackedStorePath, unpackedLink); + // Store the last-modified date of the tarball in the symlink + // mtime. This saves us from having to store it somewhere + // else. + replaceSymlink(unpackedStorePath, unpackedLink, result.lastModified); storePath = unpackedStorePath; } diff --git a/src/libstore/download.hh b/src/libstore/download.hh index 404e51195..43b1c5c09 100644 --- a/src/libstore/download.hh +++ b/src/libstore/download.hh @@ -49,6 +49,7 @@ struct CachedDownloadRequest Hash expectedHash; unsigned int ttl = settings.tarballTtl; bool gcRoot = false; + bool getLastModified = false; CachedDownloadRequest(const std::string & uri) : uri(uri) { } @@ -62,6 +63,7 @@ struct CachedDownloadResult Path path; std::optional etag; std::string effectiveUri; + std::optional lastModified; }; class Store; diff --git a/src/libutil/util.cc b/src/libutil/util.cc index f82f902fc..92c8957ff 100644 --- a/src/libutil/util.cc +++ b/src/libutil/util.cc @@ -22,6 +22,7 @@ #include #include #include +#include #include #ifdef __APPLE__ @@ -552,20 +553,31 @@ Paths createDirs(const Path & path) } -void createSymlink(const Path & target, const Path & link) +void createSymlink(const Path & target, const Path & link, + std::optional mtime) { if (symlink(target.c_str(), link.c_str())) throw SysError(format("creating symlink from '%1%' to '%2%'") % link % target); + if (mtime) { + struct timeval times[2]; + times[0].tv_sec = *mtime; + times[0].tv_usec = 0; + times[1].tv_sec = *mtime; + times[1].tv_usec = 0; + if (lutimes(link.c_str(), times)) + throw SysError("setting time of symlink '%s'", link); + } } -void replaceSymlink(const Path & target, const Path & link) +void replaceSymlink(const Path & target, const Path & link, + std::optional mtime) { for (unsigned int n = 0; true; n++) { Path tmp = canonPath(fmt("%s/.%d_%s", dirOf(link), n, baseNameOf(link))); try { - createSymlink(target, tmp); + createSymlink(target, tmp, mtime); } catch (SysError & e) { if (e.errNo == EEXIST) continue; throw; diff --git a/src/libutil/util.hh b/src/libutil/util.hh index 35f9169f6..e05ef1e7d 100644 --- a/src/libutil/util.hh +++ b/src/libutil/util.hh @@ -142,10 +142,12 @@ Path getDataDir(); Paths createDirs(const Path & path); /* Create a symlink. */ -void createSymlink(const Path & target, const Path & link); +void createSymlink(const Path & target, const Path & link, + std::optional mtime = {}); /* Atomically create or replace a symlink. */ -void replaceSymlink(const Path & target, const Path & link); +void replaceSymlink(const Path & target, const Path & link, + std::optional mtime = {}); /* Wrappers arount read()/write() that read/write exactly the From c356d034f3ad3639b52792828ced2cd5e26426ab Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Wed, 29 May 2019 15:12:22 +0200 Subject: [PATCH 165/634] Make unsupported flake attributes a fatal error --- src/libexpr/primops/flake.cc | 27 +++++++++++++++++++++++---- 1 file changed, 23 insertions(+), 4 deletions(-) diff --git a/src/libexpr/primops/flake.cc b/src/libexpr/primops/flake.cc index 257b81887..0fb562895 100644 --- a/src/libexpr/primops/flake.cc +++ b/src/libexpr/primops/flake.cc @@ -327,7 +327,9 @@ Flake getFlake(EvalState & state, const FlakeRef & flakeRef, bool impureIsAllowe state.forceAttrs(vInfo); - if (auto epoch = vInfo.attrs->get(state.symbols.create("epoch"))) { + auto sEpoch = state.symbols.create("epoch"); + + if (auto epoch = vInfo.attrs->get(sEpoch)) { flake.epoch = state.forceInt(*(**epoch).value, *(**epoch).pos); if (flake.epoch > 2019) throw Error("flake '%s' requires unsupported epoch %d; please upgrade Nix", flakeRef, flake.epoch); @@ -342,14 +344,18 @@ Flake getFlake(EvalState & state, const FlakeRef & flakeRef, bool impureIsAllowe if (auto description = vInfo.attrs->get(state.sDescription)) flake.description = state.forceStringNoCtx(*(**description).value, *(**description).pos); - if (auto requires = vInfo.attrs->get(state.symbols.create("requires"))) { + auto sRequires = state.symbols.create("requires"); + + if (auto requires = vInfo.attrs->get(sRequires)) { state.forceList(*(**requires).value, *(**requires).pos); for (unsigned int n = 0; n < (**requires).value->listSize(); ++n) flake.requires.push_back(FlakeRef(state.forceStringNoCtx( *(**requires).value->listElems()[n], *(**requires).pos))); } - if (std::optional nonFlakeRequires = vInfo.attrs->get(state.symbols.create("nonFlakeRequires"))) { + auto sNonFlakeRequires = state.symbols.create("nonFlakeRequires"); + + if (std::optional nonFlakeRequires = vInfo.attrs->get(sNonFlakeRequires)) { state.forceAttrs(*(**nonFlakeRequires).value, *(**nonFlakeRequires).pos); for (Attr attr : *(*(**nonFlakeRequires).value).attrs) { std::string myNonFlakeUri = state.forceStringNoCtx(*attr.value, *attr.pos); @@ -358,12 +364,25 @@ Flake getFlake(EvalState & state, const FlakeRef & flakeRef, bool impureIsAllowe } } - if (auto provides = vInfo.attrs->get(state.symbols.create("provides"))) { + auto sProvides = state.symbols.create("provides"); + + if (auto provides = vInfo.attrs->get(sProvides)) { state.forceFunction(*(**provides).value, *(**provides).pos); flake.vProvides = (**provides).value; } else throw Error("flake '%s' lacks attribute 'provides'", flakeRef); + for (auto & attr : *vInfo.attrs) { + if (attr.name != sEpoch && + attr.name != state.sName && + attr.name != state.sDescription && + attr.name != sRequires && + attr.name != sNonFlakeRequires && + attr.name != sProvides) + throw Error("flake '%s' has an unsupported attribute '%s', at %s", + flakeRef, attr.name, *attr.pos); + } + return flake; } From 6e4a8c47f46041a94135c1e91ce4ab606d05f8ee Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Wed, 29 May 2019 15:31:07 +0200 Subject: [PATCH 166/634] Put flake-related stuff in its own namespace --- src/libexpr/eval.hh | 9 +++-- src/libexpr/primops/flake.cc | 64 ++++++++++++++++++++---------------- src/libexpr/primops/flake.hh | 10 ++++-- src/nix/command.hh | 5 ++- src/nix/flake.cc | 5 +-- src/nix/installables.cc | 11 ++++--- 6 files changed, 62 insertions(+), 42 deletions(-) diff --git a/src/libexpr/eval.hh b/src/libexpr/eval.hh index 1e45bc1a8..46c6ea271 100644 --- a/src/libexpr/eval.hh +++ b/src/libexpr/eval.hh @@ -17,7 +17,10 @@ namespace nix { class Store; class EvalState; enum RepairFlag : bool; + +namespace flake { struct FlakeRegistry; +} typedef void (* PrimOpFun) (EvalState & state, const Pos & pos, Value * * args, Value & v); @@ -323,12 +326,12 @@ private: public: - const std::vector> getFlakeRegistries(); + const std::vector> getFlakeRegistries(); - std::shared_ptr getGlobalFlakeRegistry(); + std::shared_ptr getGlobalFlakeRegistry(); private: - std::shared_ptr _globalFlakeRegistry; + std::shared_ptr _globalFlakeRegistry; std::once_flag _globalFlakeRegistryInit; }; diff --git a/src/libexpr/primops/flake.cc b/src/libexpr/primops/flake.cc index 0fb562895..5223b3df4 100644 --- a/src/libexpr/primops/flake.cc +++ b/src/libexpr/primops/flake.cc @@ -14,6 +14,10 @@ namespace nix { +using namespace flake; + +namespace flake { + /* Read a registry. */ std::shared_ptr readRegistry(const Path & path) { @@ -133,24 +137,6 @@ void writeLockFile(const LockFile & lockFile, const Path & path) writeFile(path, json.dump(4) + "\n"); // '4' = indentation in json file } -std::shared_ptr EvalState::getGlobalFlakeRegistry() -{ - std::call_once(_globalFlakeRegistryInit, [&]() { - auto path = evalSettings.flakeRegistry; - - if (!hasPrefix(path, "/")) { - CachedDownloadRequest request(evalSettings.flakeRegistry); - request.name = "flake-registry.json"; - request.gcRoot = true; - path = getDownloader()->downloadCached(store, request).path; - } - - _globalFlakeRegistry = readRegistry(path); - }); - - return _globalFlakeRegistry; -} - Path getUserRegistryPath() { return getHome() + "/.config/nix/registry.json"; @@ -170,17 +156,6 @@ std::shared_ptr getFlagRegistry(RegistryOverrides registryOverrid return flagRegistry; } -// This always returns a vector with flakeReg, userReg, globalReg. -// If one of them doesn't exist, the registry is left empty but does exist. -const Registries EvalState::getFlakeRegistries() -{ - Registries registries; - registries.push_back(getFlagRegistry(registryOverrides)); - registries.push_back(getUserRegistry()); - registries.push_back(getGlobalFlakeRegistry()); - return registries; -} - static FlakeRef lookupFlake(EvalState & state, const FlakeRef & flakeRef, const Registries & registries, std::vector pastSearches = {}); @@ -637,3 +612,34 @@ void gitCloneFlake(FlakeRef flakeRef, EvalState & state, Registries registries, } } + +std::shared_ptr EvalState::getGlobalFlakeRegistry() +{ + std::call_once(_globalFlakeRegistryInit, [&]() { + auto path = evalSettings.flakeRegistry; + + if (!hasPrefix(path, "/")) { + CachedDownloadRequest request(evalSettings.flakeRegistry); + request.name = "flake-registry.json"; + request.gcRoot = true; + path = getDownloader()->downloadCached(store, request).path; + } + + _globalFlakeRegistry = readRegistry(path); + }); + + return _globalFlakeRegistry; +} + +// This always returns a vector with flakeReg, userReg, globalReg. +// If one of them doesn't exist, the registry is left empty but does exist. +const Registries EvalState::getFlakeRegistries() +{ + Registries registries; + registries.push_back(getFlagRegistry(registryOverrides)); + registries.push_back(getUserRegistry()); + registries.push_back(getGlobalFlakeRegistry()); + return registries; +} + +} diff --git a/src/libexpr/primops/flake.hh b/src/libexpr/primops/flake.hh index 0e2706e32..309bf6db5 100644 --- a/src/libexpr/primops/flake.hh +++ b/src/libexpr/primops/flake.hh @@ -5,13 +5,15 @@ namespace nix { +struct Value; +class EvalState; + +namespace flake { + static const size_t FLAG_REGISTRY = 0; static const size_t USER_REGISTRY = 1; static const size_t GLOBAL_REGISTRY = 2; -struct Value; -class EvalState; - struct FlakeRegistry { std::map entries; @@ -143,3 +145,5 @@ void updateLockFile(EvalState &, const FlakeRef & flakeRef, bool recreateLockFil void gitCloneFlake(FlakeRef flakeRef, EvalState &, Registries, const Path & destDir); } + +} diff --git a/src/nix/command.hh b/src/nix/command.hh index a841b879a..26c308331 100644 --- a/src/nix/command.hh +++ b/src/nix/command.hh @@ -12,7 +12,10 @@ struct Value; class Bindings; class EvalState; class Store; + +namespace flake { enum HandleLockFile : unsigned int; +} /* A command that require a Nix store. */ struct StoreCommand : virtual Command @@ -71,7 +74,7 @@ struct MixFlakeOptions : virtual Args MixFlakeOptions(); - HandleLockFile getLockFileMode(); + flake::HandleLockFile getLockFileMode(); }; struct SourceExprCommand : virtual Args, EvalCommand, MixFlakeOptions diff --git a/src/nix/flake.cc b/src/nix/flake.cc index 7836f0cfe..c2781531d 100644 --- a/src/nix/flake.cc +++ b/src/nix/flake.cc @@ -10,6 +10,7 @@ #include using namespace nix; +using namespace nix::flake; class FlakeCommand : virtual Args, public EvalCommand, public MixFlakeOptions { @@ -33,12 +34,12 @@ public: Flake getFlake() { auto evalState = getEvalState(); - return nix::getFlake(*evalState, getFlakeRef(), useRegistries); + return flake::getFlake(*evalState, getFlakeRef(), useRegistries); } ResolvedFlake resolveFlake() { - return nix::resolveFlake(*getEvalState(), getFlakeRef(), getLockFileMode()); + return flake::resolveFlake(*getEvalState(), getFlakeRef(), getLockFileMode()); } }; diff --git a/src/nix/installables.cc b/src/nix/installables.cc index 4f9161666..df5214f13 100644 --- a/src/nix/installables.cc +++ b/src/nix/installables.cc @@ -32,8 +32,9 @@ MixFlakeOptions::MixFlakeOptions() .set(&useRegistries, false); } -HandleLockFile MixFlakeOptions::getLockFileMode() +flake::HandleLockFile MixFlakeOptions::getLockFileMode() { + using namespace flake; return useRegistries ? recreateLockFile @@ -163,18 +164,20 @@ struct InstallableAttrPath : InstallableValue } }; -void makeFlakeClosureGCRoot(Store & store, const FlakeRef & origFlakeRef, const ResolvedFlake & resFlake) +void makeFlakeClosureGCRoot(Store & store, + const FlakeRef & origFlakeRef, + const flake::ResolvedFlake & resFlake) { if (std::get_if(&origFlakeRef.data)) return; /* Get the store paths of all non-local flakes. */ PathSet closure; - std::queue> queue; + std::queue> queue; queue.push(resFlake); while (!queue.empty()) { - const ResolvedFlake & flake = queue.front(); + const flake::ResolvedFlake & flake = queue.front(); queue.pop(); if (!std::get_if(&flake.flake.sourceInfo.resolvedRef.data)) closure.insert(flake.flake.sourceInfo.storePath); From 6ae4437acbd5967cf4211ce8c8685a6f1273aaeb Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Wed, 29 May 2019 15:44:48 +0200 Subject: [PATCH 167/634] Remove makeFlakeValue() --- src/libexpr/primops/flake.cc | 11 ++--------- src/libexpr/primops/flake.hh | 2 -- 2 files changed, 2 insertions(+), 11 deletions(-) diff --git a/src/libexpr/primops/flake.cc b/src/libexpr/primops/flake.cc index 5223b3df4..0d762f2a1 100644 --- a/src/libexpr/primops/flake.cc +++ b/src/libexpr/primops/flake.cc @@ -566,18 +566,11 @@ void callFlake(EvalState & state, const ResolvedFlake & resFlake, Value & v) v.attrs->sort(); } -// Return the `provides` of the top flake, while assigning to `v` the provides -// of the dependencies as well. -void makeFlakeValue(EvalState & state, const FlakeRef & flakeRef, HandleLockFile handle, Value & v) -{ - callFlake(state, resolveFlake(state, flakeRef, handle), v); -} - // This function is exposed to be used in nix files. static void prim_getFlake(EvalState & state, const Pos & pos, Value * * args, Value & v) { - makeFlakeValue(state, state.forceStringNoCtx(*args[0], pos), - evalSettings.pureEval ? AllPure : UseUpdatedLockFile, v); + callFlake(state, resolveFlake(state, state.forceStringNoCtx(*args[0], pos), + evalSettings.pureEval ? AllPure : UseUpdatedLockFile), v); } static RegisterPrimOp r2("getFlake", 1, prim_getFlake); diff --git a/src/libexpr/primops/flake.hh b/src/libexpr/primops/flake.hh index 309bf6db5..340b97c65 100644 --- a/src/libexpr/primops/flake.hh +++ b/src/libexpr/primops/flake.hh @@ -75,8 +75,6 @@ enum HandleLockFile : unsigned int , UseNewLockFile // `RecreateLockFile` without writing to file }; -void makeFlakeValue(EvalState &, const FlakeRef &, HandleLockFile, Value &); - std::shared_ptr readRegistry(const Path &); void writeRegistry(const FlakeRegistry &, const Path &); From de00ed15d3fd70ffd897d660fc337029ab50a0bf Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Wed, 29 May 2019 16:30:00 +0200 Subject: [PATCH 168/634] Doh --- shell.nix | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/shell.nix b/shell.nix index 228a6685b..d7e63bad3 100644 --- a/shell.nix +++ b/shell.nix @@ -2,7 +2,7 @@ , nixpkgs ? builtins.fetchTarball https://github.com/NixOS/nixpkgs-channels/archive/nixos-19.03.tar.gz }: -with import (builtins.fetchTarball https://github.com/NixOS/nixpkgs-channels/archive/nixos-19.03.tar.gz) { system = builtins.currentSystem or "x86_64-linux"; }; +with import nixpkgs { system = builtins.currentSystem or "x86_64-linux"; }; with import ./release-common.nix { inherit pkgs; }; From e0aaf05f4fde3096a86c6481ada64aae53bfce93 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Wed, 29 May 2019 17:25:41 +0200 Subject: [PATCH 169/634] Add 'nix flake check' command This evaluates all the 'provides' of a flake and builds the 'check' attributes. --- flake.nix | 8 +++-- src/nix/flake.cc | 93 ++++++++++++++++++++++++++++++++++++++++++++++-- 2 files changed, 97 insertions(+), 4 deletions(-) diff --git a/flake.nix b/flake.nix index ab316c7c6..dd7d7992c 100644 --- a/flake.nix +++ b/flake.nix @@ -14,13 +14,17 @@ nixpkgs = deps.nixpkgs; }; - packages.nix = hydraJobs.build.x86_64-linux; + checks.binaryTarball = hydraJobs.binaryTarball.x86_64-linux; + + packages = { + nix = hydraJobs.build.x86_64-linux; + nix-perl-bindings = hydraJobs.perlBindings.x86_64-linux; + }; defaultPackage = packages.nix; devShell = import ./shell.nix { nixpkgs = deps.nixpkgs; }; - }; } diff --git a/src/nix/flake.cc b/src/nix/flake.cc index c2781531d..f0231e263 100644 --- a/src/nix/flake.cc +++ b/src/nix/flake.cc @@ -3,7 +3,10 @@ #include "shared.hh" #include "progress-bar.hh" #include "eval.hh" +#include "eval-inline.hh" #include "primops/flake.hh" +#include "get-drvs.hh" +#include "store-api.hh" #include #include @@ -208,8 +211,6 @@ struct CmdFlakeInfo : FlakeCommand, MixJSON return "list info about a given flake"; } - CmdFlakeInfo () { } - void run(nix::ref store) override { auto flake = getFlake(); @@ -221,6 +222,93 @@ struct CmdFlakeInfo : FlakeCommand, MixJSON } }; +static void enumerateProvides(EvalState & state, Value & vFlake, + std::function callback) +{ + state.forceAttrs(vFlake); + + auto vProvides = (*vFlake.attrs->get(state.symbols.create("provides")))->value; + + state.forceAttrs(*vProvides); + + for (auto & attr : *vProvides->attrs) + callback(attr.name, *attr.value); +} + +struct CmdFlakeCheck : FlakeCommand, MixJSON +{ + bool build = true; + + CmdFlakeCheck() + { + mkFlag() + .longName("no-build") + .description("do not build checks") + .set(&build, false); + } + + std::string name() override + { + return "check"; + } + + std::string description() override + { + return "check whether the flake evaluates and run its tests"; + } + + void run(nix::ref store) override + { + auto state = getEvalState(); + auto flake = resolveFlake(); + + PathSet drvPaths; + + { + Activity act(*logger, lvlInfo, actUnknown, "evaluating flake"); + + auto vFlake = state->allocValue(); + flake::callFlake(*state, flake, *vFlake); + + enumerateProvides(*state, + *vFlake, + [&](const std::string & name, Value & vProvide) { + Activity act(*logger, lvlChatty, actUnknown, + fmt("checking flake output '%s'", name)); + + try { + state->forceValue(vProvide); + + if (name == "checks") { + state->forceAttrs(vProvide); + for (auto & aCheck : *vProvide.attrs) { + try { + auto drvInfo = getDerivation(*state, *aCheck.value, false); + if (!drvInfo) + throw Error("flake output 'check.%s' is not a derivation", aCheck.name); + drvPaths.insert(drvInfo->queryDrvPath()); + // FIXME: check meta attributes? + } catch (Error & e) { + e.addPrefix(fmt("while checking flake check '" ANSI_BOLD "%s" ANSI_NORMAL "':\n", aCheck.name)); + throw; + } + } + } + + } catch (Error & e) { + e.addPrefix(fmt("while checking flake output '" ANSI_BOLD "%s" ANSI_NORMAL "':\n", name)); + throw; + } + }); + } + + if (build) { + Activity act(*logger, lvlInfo, actUnknown, "running flake checks"); + store->buildPaths(drvPaths); + } + } +}; + struct CmdFlakeAdd : MixEvalArgs, Command { FlakeUri alias; @@ -387,6 +475,7 @@ struct CmdFlake : virtual MultiCommand, virtual Command : MultiCommand({make_ref() , make_ref() , make_ref() + , make_ref() , make_ref() , make_ref() , make_ref() From 0e32b32fa3655e8a3e028ebb5ac838164a606132 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Wed, 29 May 2019 20:57:08 +0200 Subject: [PATCH 170/634] nix flake check: Check defaultPackage, devShell and packages --- src/nix/flake.cc | 38 ++++++++++++++++++++++++++------------ 1 file changed, 26 insertions(+), 12 deletions(-) diff --git a/src/nix/flake.cc b/src/nix/flake.cc index f0231e263..19e97aed9 100644 --- a/src/nix/flake.cc +++ b/src/nix/flake.cc @@ -262,6 +262,19 @@ struct CmdFlakeCheck : FlakeCommand, MixJSON auto state = getEvalState(); auto flake = resolveFlake(); + auto checkDerivation = [&](const std::string & attrPath, Value & v) { + try { + auto drvInfo = getDerivation(*state, v, false); + if (!drvInfo) + throw Error("flake attribute '%s' is not a derivation", attrPath); + // FIXME: check meta attributes + return drvInfo->queryDrvPath(); + } catch (Error & e) { + e.addPrefix(fmt("while checking flake attribute '" ANSI_BOLD "%s" ANSI_NORMAL "':\n", attrPath)); + throw; + } + }; + PathSet drvPaths; { @@ -281,20 +294,21 @@ struct CmdFlakeCheck : FlakeCommand, MixJSON if (name == "checks") { state->forceAttrs(vProvide); - for (auto & aCheck : *vProvide.attrs) { - try { - auto drvInfo = getDerivation(*state, *aCheck.value, false); - if (!drvInfo) - throw Error("flake output 'check.%s' is not a derivation", aCheck.name); - drvPaths.insert(drvInfo->queryDrvPath()); - // FIXME: check meta attributes? - } catch (Error & e) { - e.addPrefix(fmt("while checking flake check '" ANSI_BOLD "%s" ANSI_NORMAL "':\n", aCheck.name)); - throw; - } - } + for (auto & aCheck : *vProvide.attrs) + drvPaths.insert(checkDerivation( + name + "." + (std::string) aCheck.name, *aCheck.value)); } + else if (name == "packages") { + state->forceAttrs(vProvide); + for (auto & aCheck : *vProvide.attrs) + checkDerivation( + name + "." + (std::string) aCheck.name, *aCheck.value); + } + + else if (name == "defaultPackage" || name == "devShell") + checkDerivation(name, vProvide); + } catch (Error & e) { e.addPrefix(fmt("while checking flake output '" ANSI_BOLD "%s" ANSI_NORMAL "':\n", name)); throw; From a9d3524e1f4f0212184d611b3ff3b520619dff8e Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Wed, 29 May 2019 21:00:44 +0200 Subject: [PATCH 171/634] nix flake check: Use read-only mode if we're not building --- src/nix/flake.cc | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/nix/flake.cc b/src/nix/flake.cc index 19e97aed9..0be003da2 100644 --- a/src/nix/flake.cc +++ b/src/nix/flake.cc @@ -259,6 +259,8 @@ struct CmdFlakeCheck : FlakeCommand, MixJSON void run(nix::ref store) override { + settings.readOnlyMode = !build; + auto state = getEvalState(); auto flake = resolveFlake(); From b70fc8f30c66df9c5ba4be06e4a808df1bba7d3a Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Wed, 29 May 2019 21:00:58 +0200 Subject: [PATCH 172/634] flake.nix: Add more checks --- flake.nix | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/flake.nix b/flake.nix index dd7d7992c..20ad2bc7e 100644 --- a/flake.nix +++ b/flake.nix @@ -14,7 +14,12 @@ nixpkgs = deps.nixpkgs; }; - checks.binaryTarball = hydraJobs.binaryTarball.x86_64-linux; + checks = { + binaryTarball = hydraJobs.binaryTarball.x86_64-linux; + perlBindings = hydraJobs.perlBindings.x86_64-linux; + inherit (hydraJobs.tests) remoteBuilds nix-copy-closure; + setuid = hydraJobs.tests.setuid.x86_64-linux; + }; packages = { nix = hydraJobs.build.x86_64-linux; From 3488fa7c6cef487d3f9501e89894f9e632e678db Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Wed, 29 May 2019 21:30:22 +0200 Subject: [PATCH 173/634] Hack: Use legacyPackages from Nixpkgs Nixpkgs doesn't provide a clean "packages" set yet, so until that's the case, look for packages in "legacyPackages" as well. --- src/nix/installables.cc | 15 +++++++++++++-- 1 file changed, 13 insertions(+), 2 deletions(-) diff --git a/src/nix/installables.cc b/src/nix/installables.cc index df5214f13..ea12cd79c 100644 --- a/src/nix/installables.cc +++ b/src/nix/installables.cc @@ -236,9 +236,9 @@ struct InstallableFlake : InstallableValue auto emptyArgs = state.allocBindings(0); - // As a convenience, look for the attribute in - // 'provides.packages'. if (searchPackages) { + // As a convenience, look for the attribute in + // 'provides.packages'. if (auto aPackages = *vProvides->attrs->get(state.symbols.create("packages"))) { try { auto * v = findAlongAttrPath(state, *attrPaths.begin(), *emptyArgs, *aPackages->value); @@ -247,6 +247,17 @@ struct InstallableFlake : InstallableValue } catch (AttrPathNotFound & e) { } } + + // As a temporary hack until Nixpkgs is properly converted + // to provide a clean 'packages' set, look in 'legacyPackages'. + if (auto aPackages = *vProvides->attrs->get(state.symbols.create("legacyPackages"))) { + try { + auto * v = findAlongAttrPath(state, *attrPaths.begin(), *emptyArgs, *aPackages->value); + state.forceValue(*v); + return v; + } catch (AttrPathNotFound & e) { + } + } } // Otherwise, look for it in 'provides'. From 49436bdbb77f32ffec2035e836add04f98be49e3 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Wed, 29 May 2019 22:17:08 +0200 Subject: [PATCH 174/634] nix flake info --json: List the "provides" It also lists the contents of "checks" and "packages". For example: $ nix flake info --json | jq { "branch": "HEAD", "description": "The purely functional package manager", "epoch": 2019, "id": "nix", "lastModified": 1559161142, "path": "/nix/store/2w2qla8735dbxah8gai8r1nsbf5x4f5d-source", "provides": { "checks": { "binaryTarball": {}, "nix-copy-closure": {}, "perlBindings": {}, "remoteBuilds": {}, "setuid": {} }, "defaultPackage": {}, "devShell": {}, "hydraJobs": {}, "packages": { "nix": {}, "nix-perl-bindings": {} } }, "revCount": 6955, "revision": "8cb24e04e8b6cc60e2504733afe78e0eadafcd98", "uri": "/home/eelco/Dev/nix" } Fixes #2820. --- src/nix/flake.cc | 58 +++++++++++++++++++++++++++++++++++------------- 1 file changed, 42 insertions(+), 16 deletions(-) diff --git a/src/nix/flake.cc b/src/nix/flake.cc index 0be003da2..872ec2849 100644 --- a/src/nix/flake.cc +++ b/src/nix/flake.cc @@ -199,6 +199,19 @@ struct CmdFlakeUpdate : FlakeCommand } }; +static void enumerateProvides(EvalState & state, Value & vFlake, + std::function callback) +{ + state.forceAttrs(vFlake); + + auto vProvides = (*vFlake.attrs->get(state.symbols.create("provides")))->value; + + state.forceAttrs(*vProvides); + + for (auto & attr : *vProvides->attrs) + callback(attr.name, *attr.value); +} + struct CmdFlakeInfo : FlakeCommand, MixJSON { std::string name() override @@ -215,26 +228,39 @@ struct CmdFlakeInfo : FlakeCommand, MixJSON { auto flake = getFlake(); stopProgressBar(); - if (json) - std::cout << flakeToJson(flake).dump() << std::endl; - else + + if (json) { + auto json = flakeToJson(flake); + + auto state = getEvalState(); + + auto vFlake = state->allocValue(); + flake::callFlake(*state, flake, *vFlake); + + auto provides = nlohmann::json::object(); + + enumerateProvides(*state, + *vFlake, + [&](const std::string & name, Value & vProvide) { + auto provide = nlohmann::json::object(); + + if (name == "checks" || name == "packages") { + state->forceAttrs(vProvide); + for (auto & aCheck : *vProvide.attrs) + provide[aCheck.name] = nlohmann::json::object(); + } + + provides[name] = provide; + }); + + json["provides"] = std::move(provides); + + std::cout << json.dump() << std::endl; + } else printFlakeInfo(flake); } }; -static void enumerateProvides(EvalState & state, Value & vFlake, - std::function callback) -{ - state.forceAttrs(vFlake); - - auto vProvides = (*vFlake.attrs->get(state.symbols.create("provides")))->value; - - state.forceAttrs(*vProvides); - - for (auto & attr : *vProvides->attrs) - callback(attr.name, *attr.value); -} - struct CmdFlakeCheck : FlakeCommand, MixJSON { bool build = true; From 094539ef4a637bde795bf67ddcc8f1f7443499f9 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Wed, 29 May 2019 23:09:23 +0200 Subject: [PATCH 175/634] Rename requires -> inputs, provides -> outputs Issue #2828. --- doc/flakes/design.md | 37 +++++++-------- flake.lock | 8 ++-- flake.nix | 10 ++-- src/libexpr/primops/flake.cc | 90 ++++++++++++++++++------------------ src/libexpr/primops/flake.hh | 6 +-- src/nix/flake.cc | 20 ++++---- src/nix/installables.cc | 14 +++--- tests/flakes.sh | 38 +++++++-------- 8 files changed, 112 insertions(+), 111 deletions(-) diff --git a/doc/flakes/design.md b/doc/flakes/design.md index ebc229b09..4f6524f83 100644 --- a/doc/flakes/design.md +++ b/doc/flakes/design.md @@ -103,12 +103,12 @@ module. # A list of flake references denoting the flakes that this flake # depends on. Nix will resolve and fetch these flakes and pass them - # as a function argument to `provides` below. + # as a function argument to `outputs` below. # # `flake:nixpkgs` denotes a flake named `nixpkgs` which is looked up # in the flake registry, or in `flake.lock` inside this flake, if it # exists. - requires = [ flake:nixpkgs ]; + inputs = [ flake:nixpkgs ]; # The stuff provided by this flake. Flakes can provide whatever they # want (convention over configuration), but some attributes have @@ -117,9 +117,9 @@ module. # `nixosModules` is used by NixOS to automatically pull in the # modules provided by a flake. # - # `provides` takes a single argument named `deps` that contains + # `outputs` takes a single argument named `deps` that contains # the resolved set of flakes. (See below.) - provides = deps: { + outputs = deps: { # This is searched by `nix`, so something like `nix install # dwarffs.dwarffs` resolves to this `packages.dwarffs`. @@ -168,7 +168,7 @@ Similarly, a minimal `flake.nix` for Nixpkgs: description = "A collection of packages for the Nix package manager"; - provides = deps: + outputs = deps: let pkgs = import ./. {}; in { lib = import ./lib; @@ -310,9 +310,9 @@ Example: ``` -## `provides` +## `outputs` -The flake attribute `provides` is a function that takes an argument +The flake attribute `outputs` is a function that takes an argument named `deps` and returns a (mostly) arbitrary attrset of values. Some of the standard result attributes: @@ -329,13 +329,13 @@ of the standard result attributes: we need to avoid a situation where `nixos-rebuild` needs to fetch its own `nixpkgs` just to do `evalModules`.) -* `shell`: A specification of a development environment in some TBD +* `devShell`: A specification of a development environment in some TBD format. The function argument `flakes` is an attrset that contains an -attribute for each dependency specified in `requires`. (Should it +attribute for each dependency specified in `inputs`. (Should it contain transitive dependencies? Probably not.) Each attribute is an -attrset containing the `provides` of the dependency, in addition to +attrset containing the `outputs` of the dependency, in addition to the following attributes: * `path`: The path to the flake's source code. Useful when you want to @@ -366,13 +366,13 @@ It may be useful to pull in repositories that are not flakes (i.e. don't contain a `flake.nix`). This could be done in two ways: * Allow flakes not to have a `flake.nix` file, in which case it's a - flake with no requires and no provides. The downside of this + flake with no inputs and no outputs. The downside of this approach is that we can't detect accidental use of a non-flake repository. (Also, we need to conjure up an identifier somehow.) * Add a flake attribute to specifiy non-flake dependencies, e.g. - > nonFlakeRequires.foobar = github:foo/bar; + > nonFlakeInputs.foobar = github:foo/bar; ## Flake registry @@ -454,7 +454,7 @@ The default installation source in `nix` is the `packages` from all flakes in the registry, that is: ``` builtins.mapAttrs (flakeName: flakeInfo: - (getFlake flakeInfo.uri).${flakeName}.provides.packages or {}) + (getFlake flakeInfo.uri).${flakeName}.outputs.packages or {}) builtins.flakeRegistry ``` (where `builtins.flakeRegistry` is the global registry with user @@ -476,10 +476,11 @@ in the registry named `hello`. Maybe the command -> nix shell +> nix dev-shell -should do something like use `provides.shell` to initialize the shell, -but probably we should ditch `nix shell` / `nix-shell` for direnv. +should do something like use `outputs.devShell` to initialize the +shell, but probably we should ditch `nix shell` / `nix-shell` for +direnv. ## Pure evaluation and caching @@ -535,7 +536,7 @@ repositories. ```nix { - provides = flakes: { + outputs = flakes: { nixosSystems.default = flakes.nixpkgs.lib.evalModules { modules = @@ -549,7 +550,7 @@ repositories. }; }; - requires = + inputs = [ "nixpkgs/nixos-18.09" "dwarffs" "hydra" diff --git a/flake.lock b/flake.lock index 965d038ce..b85571b61 100644 --- a/flake.lock +++ b/flake.lock @@ -1,10 +1,10 @@ { - "nonFlakeRequires": {}, - "requires": { + "inputs": { "nixpkgs": { - "contentHash": "sha256-vy2UmXQM66aS/Kn2tCtjt9RwxfBvV+nQVb5tJQFwi8E=", - "uri": "github:edolstra/nixpkgs/a4d896e89932e873c4117908d558db6210fa3b56" + "narHash": "sha256-rMiWaLXkhizEEMEeMDutUl0Y/c+VEjfjvMkvBwvuQJU=", + "uri": "github:edolstra/nixpkgs/eeeffd24cd7e407cfaa99e98cfbb8f93bf4cc033" } }, + "nonFlakeInputs": {}, "version": 1 } diff --git a/flake.nix b/flake.nix index 20ad2bc7e..ab96d5c90 100644 --- a/flake.nix +++ b/flake.nix @@ -5,13 +5,13 @@ epoch = 2019; - requires = [ "nixpkgs" ]; + inputs = [ "nixpkgs" ]; - provides = deps: rec { + outputs = inputs: rec { hydraJobs = import ./release.nix { - nix = deps.self; - nixpkgs = deps.nixpkgs; + nix = inputs.self; + nixpkgs = inputs.nixpkgs; }; checks = { @@ -29,7 +29,7 @@ defaultPackage = packages.nix; devShell = import ./shell.nix { - nixpkgs = deps.nixpkgs; + nixpkgs = inputs.nixpkgs; }; }; } diff --git a/src/libexpr/primops/flake.cc b/src/libexpr/primops/flake.cc index 0d762f2a1..fdbdc83bc 100644 --- a/src/libexpr/primops/flake.cc +++ b/src/libexpr/primops/flake.cc @@ -56,21 +56,21 @@ LockFile::FlakeEntry readFlakeEntry(nlohmann::json json) if (!flakeRef.isImmutable()) throw Error("cannot use mutable flake '%s' in pure mode", flakeRef); - LockFile::FlakeEntry entry(flakeRef, Hash((std::string) json["contentHash"])); + LockFile::FlakeEntry entry(flakeRef, Hash((std::string) json["narHash"])); - auto nonFlakeRequires = json["nonFlakeRequires"]; + auto nonFlakeInputs = json["nonFlakeInputs"]; - for (auto i = nonFlakeRequires.begin(); i != nonFlakeRequires.end(); ++i) { + for (auto i = nonFlakeInputs.begin(); i != nonFlakeInputs.end(); ++i) { FlakeRef flakeRef(i->value("uri", "")); if (!flakeRef.isImmutable()) throw Error("requested to fetch FlakeRef '%s' purely, which is mutable", flakeRef); - LockFile::NonFlakeEntry nonEntry(flakeRef, Hash(i->value("contentHash", ""))); + LockFile::NonFlakeEntry nonEntry(flakeRef, Hash(i->value("narHash", ""))); entry.nonFlakeEntries.insert_or_assign(i.key(), nonEntry); } - auto requires = json["requires"]; + auto inputs = json["inputs"]; - for (auto i = requires.begin(); i != requires.end(); ++i) + for (auto i = inputs.begin(); i != inputs.end(); ++i) entry.flakeEntries.insert_or_assign(i.key(), readFlakeEntry(*i)); return entry; @@ -89,19 +89,19 @@ LockFile readLockFile(const Path & path) if (version != 1) throw Error("lock file '%s' has unsupported version %d", path, version); - auto nonFlakeRequires = json["nonFlakeRequires"]; + auto nonFlakeInputs = json["nonFlakeInputs"]; - for (auto i = nonFlakeRequires.begin(); i != nonFlakeRequires.end(); ++i) { + for (auto i = nonFlakeInputs.begin(); i != nonFlakeInputs.end(); ++i) { FlakeRef flakeRef(i->value("uri", "")); - LockFile::NonFlakeEntry nonEntry(flakeRef, Hash(i->value("contentHash", ""))); + LockFile::NonFlakeEntry nonEntry(flakeRef, Hash(i->value("narHash", ""))); if (!flakeRef.isImmutable()) throw Error("found mutable FlakeRef '%s' in lockfile at path %s", flakeRef, path); lockFile.nonFlakeEntries.insert_or_assign(i.key(), nonEntry); } - auto requires = json["requires"]; + auto inputs = json["inputs"]; - for (auto i = requires.begin(); i != requires.end(); ++i) + for (auto i = inputs.begin(); i != inputs.end(); ++i) lockFile.flakeEntries.insert_or_assign(i.key(), readFlakeEntry(*i)); return lockFile; @@ -111,13 +111,13 @@ nlohmann::json flakeEntryToJson(const LockFile::FlakeEntry & entry) { nlohmann::json json; json["uri"] = entry.ref.to_string(); - json["contentHash"] = entry.narHash.to_string(SRI); + json["narHash"] = entry.narHash.to_string(SRI); for (auto & x : entry.nonFlakeEntries) { - json["nonFlakeRequires"][x.first]["uri"] = x.second.ref.to_string(); - json["nonFlakeRequires"][x.first]["contentHash"] = x.second.narHash.to_string(SRI); + json["nonFlakeInputs"][x.first]["uri"] = x.second.ref.to_string(); + json["nonFlakeInputs"][x.first]["narHash"] = x.second.narHash.to_string(SRI); } for (auto & x : entry.flakeEntries) - json["requires"][x.first.to_string()] = flakeEntryToJson(x.second); + json["inputs"][x.first.to_string()] = flakeEntryToJson(x.second); return json; } @@ -125,14 +125,14 @@ void writeLockFile(const LockFile & lockFile, const Path & path) { nlohmann::json json; json["version"] = 1; - json["nonFlakeRequires"] = nlohmann::json::object(); + json["nonFlakeInputs"] = nlohmann::json::object(); for (auto & x : lockFile.nonFlakeEntries) { - json["nonFlakeRequires"][x.first]["uri"] = x.second.ref.to_string(); - json["nonFlakeRequires"][x.first]["contentHash"] = x.second.narHash.to_string(SRI); + json["nonFlakeInputs"][x.first]["uri"] = x.second.ref.to_string(); + json["nonFlakeInputs"][x.first]["narHash"] = x.second.narHash.to_string(SRI); } - json["requires"] = nlohmann::json::object(); + json["inputs"] = nlohmann::json::object(); for (auto & x : lockFile.flakeEntries) - json["requires"][x.first.to_string()] = flakeEntryToJson(x.second); + json["inputs"][x.first.to_string()] = flakeEntryToJson(x.second); createDirs(dirOf(path)); writeFile(path, json.dump(4) + "\n"); // '4' = indentation in json file } @@ -319,41 +319,41 @@ Flake getFlake(EvalState & state, const FlakeRef & flakeRef, bool impureIsAllowe if (auto description = vInfo.attrs->get(state.sDescription)) flake.description = state.forceStringNoCtx(*(**description).value, *(**description).pos); - auto sRequires = state.symbols.create("requires"); + auto sInputs = state.symbols.create("inputs"); - if (auto requires = vInfo.attrs->get(sRequires)) { - state.forceList(*(**requires).value, *(**requires).pos); - for (unsigned int n = 0; n < (**requires).value->listSize(); ++n) - flake.requires.push_back(FlakeRef(state.forceStringNoCtx( - *(**requires).value->listElems()[n], *(**requires).pos))); + if (auto inputs = vInfo.attrs->get(sInputs)) { + state.forceList(*(**inputs).value, *(**inputs).pos); + for (unsigned int n = 0; n < (**inputs).value->listSize(); ++n) + flake.inputs.push_back(FlakeRef(state.forceStringNoCtx( + *(**inputs).value->listElems()[n], *(**inputs).pos))); } - auto sNonFlakeRequires = state.symbols.create("nonFlakeRequires"); + auto sNonFlakeInputs = state.symbols.create("nonFlakeInputs"); - if (std::optional nonFlakeRequires = vInfo.attrs->get(sNonFlakeRequires)) { - state.forceAttrs(*(**nonFlakeRequires).value, *(**nonFlakeRequires).pos); - for (Attr attr : *(*(**nonFlakeRequires).value).attrs) { + if (std::optional nonFlakeInputs = vInfo.attrs->get(sNonFlakeInputs)) { + state.forceAttrs(*(**nonFlakeInputs).value, *(**nonFlakeInputs).pos); + for (Attr attr : *(*(**nonFlakeInputs).value).attrs) { std::string myNonFlakeUri = state.forceStringNoCtx(*attr.value, *attr.pos); FlakeRef nonFlakeRef = FlakeRef(myNonFlakeUri); - flake.nonFlakeRequires.insert_or_assign(attr.name, nonFlakeRef); + flake.nonFlakeInputs.insert_or_assign(attr.name, nonFlakeRef); } } - auto sProvides = state.symbols.create("provides"); + auto sOutputs = state.symbols.create("outputs"); - if (auto provides = vInfo.attrs->get(sProvides)) { - state.forceFunction(*(**provides).value, *(**provides).pos); - flake.vProvides = (**provides).value; + if (auto outputs = vInfo.attrs->get(sOutputs)) { + state.forceFunction(*(**outputs).value, *(**outputs).pos); + flake.vOutputs = (**outputs).value; } else - throw Error("flake '%s' lacks attribute 'provides'", flakeRef); + throw Error("flake '%s' lacks attribute 'outputs'", flakeRef); for (auto & attr : *vInfo.attrs) { if (attr.name != sEpoch && attr.name != state.sName && attr.name != state.sDescription && - attr.name != sRequires && - attr.name != sNonFlakeRequires && - attr.name != sProvides) + attr.name != sInputs && + attr.name != sNonFlakeInputs && + attr.name != sOutputs) throw Error("flake '%s' has an unsupported attribute '%s', at %s", flakeRef, attr.name, *attr.pos); } @@ -436,7 +436,7 @@ ResolvedFlake resolveFlakeFromLockFile(EvalState & state, const FlakeRef & flake ResolvedFlake deps(flake); - for (auto & nonFlakeInfo : flake.nonFlakeRequires) { + for (auto & nonFlakeInfo : flake.nonFlakeInputs) { FlakeRef ref = nonFlakeInfo.second; auto i = lockFile.nonFlakeEntries.find(nonFlakeInfo.first); if (i != lockFile.nonFlakeEntries.end()) { @@ -451,7 +451,7 @@ ResolvedFlake resolveFlakeFromLockFile(EvalState & state, const FlakeRef & flake } } - for (auto newFlakeRef : flake.requires) { + for (auto newFlakeRef : flake.inputs) { auto i = lockFile.flakeEntries.find(newFlakeRef); if (i != lockFile.flakeEntries.end()) { // Propagate lockFile downwards if possible ResolvedFlake newResFlake = resolveFlakeFromLockFile(state, i->second.ref, handleLockFile, entryToLockFile(i->second)); @@ -532,8 +532,8 @@ static void emitSourceInfoAttrs(EvalState & state, const SourceInfo & sourceInfo void callFlake(EvalState & state, const ResolvedFlake & resFlake, Value & v) { - // Construct the resulting attrset '{description, provides, - // ...}'. This attrset is passed lazily as an argument to 'provides'. + // Construct the resulting attrset '{description, outputs, + // ...}'. This attrset is passed lazily as an argument to 'outputs'. state.mkAttrs(v, resFlake.flakeDeps.size() + resFlake.nonFlakeDeps.size() + 8); @@ -558,8 +558,8 @@ void callFlake(EvalState & state, const ResolvedFlake & resFlake, Value & v) emitSourceInfoAttrs(state, resFlake.flake.sourceInfo, v); - auto vProvides = state.allocAttr(v, state.symbols.create("provides")); - mkApp(*vProvides, *resFlake.flake.vProvides, v); + auto vOutputs = state.allocAttr(v, state.symbols.create("outputs")); + mkApp(*vOutputs, *resFlake.flake.vOutputs, v); v.attrs->push_back(Attr(state.symbols.create("self"), &v)); diff --git a/src/libexpr/primops/flake.hh b/src/libexpr/primops/flake.hh index 340b97c65..82b0973f6 100644 --- a/src/libexpr/primops/flake.hh +++ b/src/libexpr/primops/flake.hh @@ -106,9 +106,9 @@ struct Flake FlakeRef originalRef; std::string description; SourceInfo sourceInfo; - std::vector requires; - std::map nonFlakeRequires; - Value * vProvides; // FIXME: gc + std::vector inputs; + std::map nonFlakeInputs; + Value * vOutputs; // FIXME: gc unsigned int epoch; Flake(const FlakeRef & origRef, const SourceInfo & sourceInfo) diff --git a/src/nix/flake.cc b/src/nix/flake.cc index 872ec2849..8d6716391 100644 --- a/src/nix/flake.cc +++ b/src/nix/flake.cc @@ -199,16 +199,16 @@ struct CmdFlakeUpdate : FlakeCommand } }; -static void enumerateProvides(EvalState & state, Value & vFlake, +static void enumerateOutputs(EvalState & state, Value & vFlake, std::function callback) { state.forceAttrs(vFlake); - auto vProvides = (*vFlake.attrs->get(state.symbols.create("provides")))->value; + auto vOutputs = (*vFlake.attrs->get(state.symbols.create("outputs")))->value; - state.forceAttrs(*vProvides); + state.forceAttrs(*vOutputs); - for (auto & attr : *vProvides->attrs) + for (auto & attr : *vOutputs->attrs) callback(attr.name, *attr.value); } @@ -237,9 +237,9 @@ struct CmdFlakeInfo : FlakeCommand, MixJSON auto vFlake = state->allocValue(); flake::callFlake(*state, flake, *vFlake); - auto provides = nlohmann::json::object(); + auto outputs = nlohmann::json::object(); - enumerateProvides(*state, + enumerateOutputs(*state, *vFlake, [&](const std::string & name, Value & vProvide) { auto provide = nlohmann::json::object(); @@ -250,10 +250,10 @@ struct CmdFlakeInfo : FlakeCommand, MixJSON provide[aCheck.name] = nlohmann::json::object(); } - provides[name] = provide; + outputs[name] = provide; }); - json["provides"] = std::move(provides); + json["outputs"] = std::move(outputs); std::cout << json.dump() << std::endl; } else @@ -298,7 +298,7 @@ struct CmdFlakeCheck : FlakeCommand, MixJSON // FIXME: check meta attributes return drvInfo->queryDrvPath(); } catch (Error & e) { - e.addPrefix(fmt("while checking flake attribute '" ANSI_BOLD "%s" ANSI_NORMAL "':\n", attrPath)); + e.addPrefix(fmt("while checking flake output attribute '" ANSI_BOLD "%s" ANSI_NORMAL "':\n", attrPath)); throw; } }; @@ -311,7 +311,7 @@ struct CmdFlakeCheck : FlakeCommand, MixJSON auto vFlake = state->allocValue(); flake::callFlake(*state, flake, *vFlake); - enumerateProvides(*state, + enumerateOutputs(*state, *vFlake, [&](const std::string & name, Value & vProvide) { Activity act(*logger, lvlChatty, actUnknown, diff --git a/src/nix/installables.cc b/src/nix/installables.cc index ea12cd79c..fe89a6bb4 100644 --- a/src/nix/installables.cc +++ b/src/nix/installables.cc @@ -230,16 +230,16 @@ struct InstallableFlake : InstallableValue makeFlakeClosureGCRoot(*state.store, flakeRef, resFlake); - auto vProvides = (*vFlake->attrs->get(state.symbols.create("provides")))->value; + auto vOutputs = (*vFlake->attrs->get(state.symbols.create("outputs")))->value; - state.forceValue(*vProvides); + state.forceValue(*vOutputs); auto emptyArgs = state.allocBindings(0); if (searchPackages) { // As a convenience, look for the attribute in - // 'provides.packages'. - if (auto aPackages = *vProvides->attrs->get(state.symbols.create("packages"))) { + // 'outputs.packages'. + if (auto aPackages = *vOutputs->attrs->get(state.symbols.create("packages"))) { try { auto * v = findAlongAttrPath(state, *attrPaths.begin(), *emptyArgs, *aPackages->value); state.forceValue(*v); @@ -250,7 +250,7 @@ struct InstallableFlake : InstallableValue // As a temporary hack until Nixpkgs is properly converted // to provide a clean 'packages' set, look in 'legacyPackages'. - if (auto aPackages = *vProvides->attrs->get(state.symbols.create("legacyPackages"))) { + if (auto aPackages = *vOutputs->attrs->get(state.symbols.create("legacyPackages"))) { try { auto * v = findAlongAttrPath(state, *attrPaths.begin(), *emptyArgs, *aPackages->value); state.forceValue(*v); @@ -260,10 +260,10 @@ struct InstallableFlake : InstallableValue } } - // Otherwise, look for it in 'provides'. + // Otherwise, look for it in 'outputs'. for (auto & attrPath : attrPaths) { try { - auto * v = findAlongAttrPath(state, attrPath, *emptyArgs, *vProvides); + auto * v = findAlongAttrPath(state, attrPath, *emptyArgs, *vOutputs); state.forceValue(*v); return v; } catch (AttrPathNotFound & e) { diff --git a/tests/flakes.sh b/tests/flakes.sh index d95d34c76..377f93c8e 100644 --- a/tests/flakes.sh +++ b/tests/flakes.sh @@ -33,7 +33,7 @@ cat > $flake1Dir/flake.nix < $flake2Dir/flake.nix < $flake3Dir/flake.nix < $flake3Dir/flake.nix < $flake3Dir/flake.nix < $flake3Dir/flake.nix < Date: Fri, 31 May 2019 18:48:28 +0200 Subject: [PATCH 176/634] Show hash mismatch warnings in SRI format --- src/libstore/build.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/libstore/build.cc b/src/libstore/build.cc index 004be8010..a69592219 100644 --- a/src/libstore/build.cc +++ b/src/libstore/build.cc @@ -3170,7 +3170,7 @@ void DerivationGoal::registerOutputs() valid. */ delayedException = std::make_exception_ptr( BuildError("hash mismatch in fixed-output derivation '%s':\n wanted: %s\n got: %s", - dest, h.to_string(), h2.to_string())); + dest, h.to_string(SRI), h2.to_string(SRI))); Path actualDest = worker.store.toRealPath(dest); From b971e406dee82486d53737a928b0bb3b482a6c49 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Fri, 31 May 2019 19:01:11 +0200 Subject: [PATCH 177/634] Support 'dir' and other parameters in path flakerefs --- src/libexpr/primops/flakeref.cc | 90 ++++++++++++++++++--------------- 1 file changed, 49 insertions(+), 41 deletions(-) diff --git a/src/libexpr/primops/flakeref.cc b/src/libexpr/primops/flakeref.cc index 3c805eff8..306da20fe 100644 --- a/src/libexpr/primops/flakeref.cc +++ b/src/libexpr/primops/flakeref.cc @@ -1,4 +1,5 @@ #include "flakeref.hh" +#include "store-api.hh" #include @@ -30,10 +31,6 @@ const static std::string schemeRegex = "(?:http|https|ssh|git|file)"; const static std::string authorityRegex = "[a-zA-Z0-9._~-]*"; const static std::string segmentRegex = "[a-zA-Z0-9._~-]+"; const static std::string pathRegex = "/?" + segmentRegex + "(?:/" + segmentRegex + ")*"; -// FIXME: support escaping in query string. -// Note: '/' is not a valid query parameter, but so what... -const static std::string paramRegex = "[a-z]+=[/a-zA-Z0-9._-]*"; -const static std::string paramsRegex = "(?:[?](" + paramRegex + "(?:&" + paramRegex + ")*))"; // 'dir' path elements cannot start with a '.'. We also reject // potentially dangerous characters like ';'. @@ -41,7 +38,7 @@ const static std::string subDirElemRegex = "(?:[a-zA-Z0-9_-]+[a-zA-Z0-9._-]*)"; const static std::string subDirRegex = subDirElemRegex + "(?:/" + subDirElemRegex + ")*"; -FlakeRef::FlakeRef(const std::string & uri, bool allowRelative) +FlakeRef::FlakeRef(const std::string & uri_, bool allowRelative) { // FIXME: could combine this into one regex. @@ -50,21 +47,46 @@ FlakeRef::FlakeRef(const std::string & uri, bool allowRelative) std::regex::ECMAScript); static std::regex githubRegex( - "github:(" + ownerRegex + ")/(" + repoRegex + ")(?:/" + revOrRefRegex + ")?" - + paramsRegex + "?", + "github:(" + ownerRegex + ")/(" + repoRegex + ")(?:/" + revOrRefRegex + ")?", std::regex::ECMAScript); static std::regex uriRegex( "((" + schemeRegex + "):" + "(?://(" + authorityRegex + "))?" + - "(" + pathRegex + "))" + - paramsRegex + "?", + "(" + pathRegex + "))", std::regex::ECMAScript); static std::regex refRegex2(refRegex, std::regex::ECMAScript); static std::regex subDirRegex2(subDirRegex, std::regex::ECMAScript); + auto [uri, params] = splitUriAndParams(uri_); + + auto handleSubdir = [&](const std::string & name, const std::string & value) { + if (name == "dir") { + if (value != "" && !std::regex_match(value, subDirRegex2)) + throw Error("flake '%s' has invalid subdirectory '%s'", uri, value); + subdir = value; + return true; + } else + return false; + }; + + auto handleGitParams = [&](const std::string & name, const std::string & value) { + if (name == "rev") { + if (!std::regex_match(value, revRegex)) + throw Error("invalid Git revision '%s'", value); + rev = Hash(value, htSHA1); + } else if (name == "ref") { + if (!std::regex_match(value, refRegex2)) + throw Error("invalid Git ref '%s'", value); + ref = value; + } else if (handleSubdir(name, value)) + ; + else return false; + return true; + }; + std::cmatch match; if (std::regex_match(uri.c_str(), match, flakeRegex)) { IsAlias d; @@ -88,17 +110,11 @@ FlakeRef::FlakeRef(const std::string & uri, bool allowRelative) else if (match[4].matched) { ref = match[4]; } - for (auto & param : tokenizeString(match[5], "&")) { - auto n = param.find('='); - assert(n != param.npos); - std::string name(param, 0, n); - std::string value(param, n + 1); - if (name == "dir") { - if (value != "" && !std::regex_match(value, subDirRegex2)) - throw Error("flake '%s' has invalid subdirectory '%s'", uri, value); - subdir = value; - } else - throw Error("invalid Git flake reference parameter '%s', in '%s'", name, uri); + for (auto & param : params) { + if (handleSubdir(param.first, param.second)) + ; + else + throw Error("invalid Git flakeref parameter '%s', in '%s'", param.first, uri); } data = d; } @@ -108,26 +124,12 @@ FlakeRef::FlakeRef(const std::string & uri, bool allowRelative) { IsGit d; d.uri = match[1]; - for (auto & param : tokenizeString(match[5], "&")) { - auto n = param.find('='); - assert(n != param.npos); - std::string name(param, 0, n); - std::string value(param, n + 1); - if (name == "rev") { - if (!std::regex_match(value, revRegex)) - throw Error("invalid Git revision '%s'", value); - rev = Hash(value, htSHA1); - } else if (name == "ref") { - if (!std::regex_match(value, refRegex2)) - throw Error("invalid Git ref '%s'", value); - ref = value; - } else if (name == "dir") { - if (value != "" && !std::regex_match(value, subDirRegex2)) - throw Error("flake '%s' has invalid subdirectory '%s'", uri, value); - subdir = value; - } else + for (auto & param : params) { + if (handleGitParams(param.first, param.second)) + ; + else // FIXME: should probably pass through unknown parameters - throw Error("invalid Git flake reference parameter '%s', in '%s'", name, uri); + throw Error("invalid Git flakeref parameter '%s', in '%s'", param.first, uri); } if (rev && !ref) throw Error("flake URI '%s' lacks a Git ref", uri); @@ -138,6 +140,12 @@ FlakeRef::FlakeRef(const std::string & uri, bool allowRelative) IsPath d; d.path = allowRelative ? absPath(uri) : canonPath(uri); data = d; + for (auto & param : params) { + if (handleGitParams(param.first, param.second)) + ; + else + throw Error("invalid Git flakeref parameter '%s', in '%s'", param.first, uri); + } } else @@ -165,10 +173,10 @@ std::string FlakeRef::to_string() const } else if (auto refData = std::get_if(&data)) { - assert(subdir == ""); + string = refData->path; if (ref) addParam("ref", *ref); if (rev) addParam("rev", rev->gitRev()); - return refData->path; + if (subdir != "") addParam("dir", subdir); } else if (auto refData = std::get_if(&data)) { From 9169046e64cbffd85a445ebe4313b172a6681646 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Fri, 31 May 2019 20:10:56 +0200 Subject: [PATCH 178/634] Add operator << for LockFile Useful for debugging. --- src/libexpr/primops/flake.cc | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/src/libexpr/primops/flake.cc b/src/libexpr/primops/flake.cc index fdbdc83bc..af7d51834 100644 --- a/src/libexpr/primops/flake.cc +++ b/src/libexpr/primops/flake.cc @@ -121,7 +121,7 @@ nlohmann::json flakeEntryToJson(const LockFile::FlakeEntry & entry) return json; } -void writeLockFile(const LockFile & lockFile, const Path & path) +std::ostream & operator <<(std::ostream & stream, const LockFile & lockFile) { nlohmann::json json; json["version"] = 1; @@ -133,8 +133,14 @@ void writeLockFile(const LockFile & lockFile, const Path & path) json["inputs"] = nlohmann::json::object(); for (auto & x : lockFile.flakeEntries) json["inputs"][x.first.to_string()] = flakeEntryToJson(x.second); + stream << json.dump(4); // '4' = indentation in json file + return stream; +} + +void writeLockFile(const LockFile & lockFile, const Path & path) +{ createDirs(dirOf(path)); - writeFile(path, json.dump(4) + "\n"); // '4' = indentation in json file + writeFile(path, fmt("%s\n", lockFile)); } Path getUserRegistryPath() From 7adb10d29b0041a93d1afeec197bf9af6e8b25b5 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Fri, 31 May 2019 20:12:59 +0200 Subject: [PATCH 179/634] Fix reading the lockfile of a flake in a subdirectory --- src/libexpr/primops/flake.cc | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/src/libexpr/primops/flake.cc b/src/libexpr/primops/flake.cc index af7d51834..235e10922 100644 --- a/src/libexpr/primops/flake.cc +++ b/src/libexpr/primops/flake.cc @@ -482,9 +482,12 @@ ResolvedFlake resolveFlake(EvalState & state, const FlakeRef & topRef, HandleLoc Flake flake = getFlake(state, topRef, allowedToUseRegistries(handleLockFile, true)); LockFile oldLockFile; - if (!recreateLockFile (handleLockFile)) { + if (!recreateLockFile(handleLockFile)) { // If recreateLockFile, start with an empty lockfile - oldLockFile = readLockFile(flake.sourceInfo.storePath + "/flake.lock"); // FIXME: symlink attack + // FIXME: symlink attack + oldLockFile = readLockFile( + state.store->toRealPath(flake.sourceInfo.storePath) + + "/" + flake.sourceInfo.resolvedRef.subdir + "/flake.lock"); } LockFile lockFile(oldLockFile); From ccb1bad612e060fc4397d340edc64d18231744b6 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Fri, 31 May 2019 20:53:23 +0200 Subject: [PATCH 180/634] Allow bare flakerefs as installables So now $ nix build blender-bin works and builds the default package from that flake. You don't need to add a colon at the end anymore. --- src/libexpr/primops/flakeref.cc | 31 ++++++++++++++++++++++--------- src/libexpr/primops/flakeref.hh | 5 +++++ src/nix/installables.cc | 4 ++++ tests/flakes.sh | 2 +- 4 files changed, 32 insertions(+), 10 deletions(-) diff --git a/src/libexpr/primops/flakeref.cc b/src/libexpr/primops/flakeref.cc index 306da20fe..4b6922295 100644 --- a/src/libexpr/primops/flakeref.cc +++ b/src/libexpr/primops/flakeref.cc @@ -65,7 +65,7 @@ FlakeRef::FlakeRef(const std::string & uri_, bool allowRelative) auto handleSubdir = [&](const std::string & name, const std::string & value) { if (name == "dir") { if (value != "" && !std::regex_match(value, subDirRegex2)) - throw Error("flake '%s' has invalid subdirectory '%s'", uri, value); + throw BadFlakeRef("flake '%s' has invalid subdirectory '%s'", uri, value); subdir = value; return true; } else @@ -75,11 +75,11 @@ FlakeRef::FlakeRef(const std::string & uri_, bool allowRelative) auto handleGitParams = [&](const std::string & name, const std::string & value) { if (name == "rev") { if (!std::regex_match(value, revRegex)) - throw Error("invalid Git revision '%s'", value); + throw BadFlakeRef("invalid Git revision '%s'", value); rev = Hash(value, htSHA1); } else if (name == "ref") { if (!std::regex_match(value, refRegex2)) - throw Error("invalid Git ref '%s'", value); + throw BadFlakeRef("invalid Git ref '%s'", value); ref = value; } else if (handleSubdir(name, value)) ; @@ -114,7 +114,7 @@ FlakeRef::FlakeRef(const std::string & uri_, bool allowRelative) if (handleSubdir(param.first, param.second)) ; else - throw Error("invalid Git flakeref parameter '%s', in '%s'", param.first, uri); + throw BadFlakeRef("invalid Git flakeref parameter '%s', in '%s'", param.first, uri); } data = d; } @@ -129,14 +129,16 @@ FlakeRef::FlakeRef(const std::string & uri_, bool allowRelative) ; else // FIXME: should probably pass through unknown parameters - throw Error("invalid Git flakeref parameter '%s', in '%s'", param.first, uri); + throw BadFlakeRef("invalid Git flakeref parameter '%s', in '%s'", param.first, uri); } if (rev && !ref) - throw Error("flake URI '%s' lacks a Git ref", uri); + throw BadFlakeRef("flake URI '%s' lacks a Git ref", uri); data = d; } - else if (hasPrefix(uri, "/") || (allowRelative && (hasPrefix(uri, "./") || hasPrefix(uri, "../") || uri == "."))) { + else if ((hasPrefix(uri, "/") || (allowRelative && (hasPrefix(uri, "./") || hasPrefix(uri, "../") || uri == "."))) + && uri.find(':') == std::string::npos) + { IsPath d; d.path = allowRelative ? absPath(uri) : canonPath(uri); data = d; @@ -144,12 +146,12 @@ FlakeRef::FlakeRef(const std::string & uri_, bool allowRelative) if (handleGitParams(param.first, param.second)) ; else - throw Error("invalid Git flakeref parameter '%s', in '%s'", param.first, uri); + throw BadFlakeRef("invalid Git flakeref parameter '%s', in '%s'", param.first, uri); } } else - throw Error("'%s' is not a valid flake reference", uri); + throw BadFlakeRef("'%s' is not a valid flake reference", uri); } std::string FlakeRef::to_string() const @@ -225,4 +227,15 @@ FlakeRef FlakeRef::baseRef() const // Removes the ref and rev from a FlakeRef. result.rev = std::nullopt; return result; } + +std::optional parseFlakeRef( + const std::string & uri, bool allowRelative) +{ + try { + return FlakeRef(uri, allowRelative); + } catch (BadFlakeRef & e) { + return {}; + } +} + } diff --git a/src/libexpr/primops/flakeref.hh b/src/libexpr/primops/flakeref.hh index 299094634..52bb82ddb 100644 --- a/src/libexpr/primops/flakeref.hh +++ b/src/libexpr/primops/flakeref.hh @@ -180,4 +180,9 @@ struct FlakeRef std::ostream & operator << (std::ostream & str, const FlakeRef & flakeRef); +MakeError(BadFlakeRef, Error); + +std::optional parseFlakeRef( + const std::string & uri, bool allowRelative = false); + } diff --git a/src/nix/installables.cc b/src/nix/installables.cc index fe89a6bb4..40248eb5d 100644 --- a/src/nix/installables.cc +++ b/src/nix/installables.cc @@ -332,6 +332,10 @@ std::vector> SourceExprCommand::parseInstallables( getDefaultFlakeAttrPaths())); } + else if (auto flakeRef = parseFlakeRef(s, true)) + result.push_back(std::make_shared(*this, s, + getDefaultFlakeAttrPaths())); + else result.push_back(std::make_shared(*this, FlakeRef("nixpkgs"), s)); } diff --git a/tests/flakes.sh b/tests/flakes.sh index 377f93c8e..8b9cb7260 100644 --- a/tests/flakes.sh +++ b/tests/flakes.sh @@ -131,7 +131,7 @@ nix build -o $TEST_ROOT/result --flake-registry $registry flake1:foo [[ -e $TEST_ROOT/result/hello ]] # Test defaultPackage. -nix build -o $TEST_ROOT/result --flake-registry $registry flake1: +nix build -o $TEST_ROOT/result --flake-registry $registry flake1 [[ -e $TEST_ROOT/result/hello ]] # Building a flake with an unlocked dependency should fail in pure mode. From 8abb8647a33c3516026cd8a2954d34633377b23c Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Fri, 31 May 2019 21:52:02 +0200 Subject: [PATCH 181/634] Automatically determine subdir for path flakes This means that in a flake in a subdirectory of a Git repo, you can now do $ nix build rather than the inconvenient $ nix build ../..?dir=foo/bar --- src/libexpr/primops/flakeref.cc | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/src/libexpr/primops/flakeref.cc b/src/libexpr/primops/flakeref.cc index 4b6922295..6c90c3b64 100644 --- a/src/libexpr/primops/flakeref.cc +++ b/src/libexpr/primops/flakeref.cc @@ -140,7 +140,17 @@ FlakeRef::FlakeRef(const std::string & uri_, bool allowRelative) && uri.find(':') == std::string::npos) { IsPath d; - d.path = allowRelative ? absPath(uri) : canonPath(uri); + if (allowRelative) { + d.path = absPath(uri); + while (true) { + if (pathExists(d.path + "/.git")) break; + subdir = baseNameOf(d.path) + (subdir.empty() ? "" : "/" + subdir); + d.path = dirOf(d.path); + if (d.path == "/") + throw BadFlakeRef("path '%s' does not reference a Git repository", uri); + } + } else + d.path = canonPath(uri); data = d; for (auto & param : params) { if (handleGitParams(param.first, param.second)) From 8cb3bbd5044b8fbfc65f13455d1619a78ccf33a5 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Fri, 31 May 2019 22:17:39 +0200 Subject: [PATCH 182/634] Fix handling of bare flakerefs containing a colon --- src/nix/installables.cc | 8 ++++---- tests/flakes.sh | 3 +++ 2 files changed, 7 insertions(+), 4 deletions(-) diff --git a/src/nix/installables.cc b/src/nix/installables.cc index 40248eb5d..38ae416e3 100644 --- a/src/nix/installables.cc +++ b/src/nix/installables.cc @@ -314,6 +314,10 @@ std::vector> SourceExprCommand::parseInstallables( Strings{"packages." + std::string(s, 8)})); } + else if (auto flakeRef = parseFlakeRef(s, true)) + result.push_back(std::make_shared(*this, s, + getDefaultFlakeAttrPaths())); + else if ((colon = s.rfind(':')) != std::string::npos) { auto flakeRef = std::string(s, 0, colon); auto attrPath = std::string(s, colon + 1); @@ -332,10 +336,6 @@ std::vector> SourceExprCommand::parseInstallables( getDefaultFlakeAttrPaths())); } - else if (auto flakeRef = parseFlakeRef(s, true)) - result.push_back(std::make_shared(*this, s, - getDefaultFlakeAttrPaths())); - else result.push_back(std::make_shared(*this, FlakeRef("nixpkgs"), s)); } diff --git a/tests/flakes.sh b/tests/flakes.sh index 8b9cb7260..c4dd8c333 100644 --- a/tests/flakes.sh +++ b/tests/flakes.sh @@ -134,6 +134,9 @@ nix build -o $TEST_ROOT/result --flake-registry $registry flake1:foo nix build -o $TEST_ROOT/result --flake-registry $registry flake1 [[ -e $TEST_ROOT/result/hello ]] +nix build -o $TEST_ROOT/result --flake-registry $registry $flake1Dir +nix build -o $TEST_ROOT/result --flake-registry $registry file://$flake1Dir + # Building a flake with an unlocked dependency should fail in pure mode. (! nix eval "(builtins.getFlake "$flake2Dir")") From 15f241775ace2bbd807e7222e822bd5bf0f42ff7 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Fri, 31 May 2019 23:21:53 +0200 Subject: [PATCH 183/634] Doh --- src/nix/installables.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/nix/installables.cc b/src/nix/installables.cc index 38ae416e3..eb3c27d6b 100644 --- a/src/nix/installables.cc +++ b/src/nix/installables.cc @@ -315,7 +315,7 @@ std::vector> SourceExprCommand::parseInstallables( } else if (auto flakeRef = parseFlakeRef(s, true)) - result.push_back(std::make_shared(*this, s, + result.push_back(std::make_shared(*this, std::move(*flakeRef), getDefaultFlakeAttrPaths())); else if ((colon = s.rfind(':')) != std::string::npos) { From fb692e5f7b34def8cf590298036ab63e40747062 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Fri, 31 May 2019 23:44:42 +0200 Subject: [PATCH 184/634] Bindings: Add convenience method for requiring an attribute --- src/libexpr/attr-set.hh | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/src/libexpr/attr-set.hh b/src/libexpr/attr-set.hh index 6c5fb21ad..d6af99912 100644 --- a/src/libexpr/attr-set.hh +++ b/src/libexpr/attr-set.hh @@ -72,6 +72,14 @@ public: return {}; } + Attr & need(const Symbol & name, const Pos & pos = noPos) + { + auto a = get(name); + if (!a) + throw Error("attribute '%s' missing, at %s", name, pos); + return **a; + } + iterator begin() { return &attrs[0]; } iterator end() { return &attrs[size_]; } From 5fbd9fee0b4b26cc7bcceb350e56e808c7a70e8c Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Fri, 31 May 2019 23:45:13 +0200 Subject: [PATCH 185/634] Add 'nix app' command This is like 'nix run', except that the command to execute is defined in a flake output, e.g. defaultApp = { type = "app"; program = "${packages.blender_2_80}/bin/blender"; }; Thus you can do $ nix app blender-bin to start Blender from the 'blender-bin' flake. In the future, we can extend this with sandboxing. (For example we would want to be able to specify that Blender should not have network access by default and should only have access to certain paths in the user's home directory.) --- src/nix/command.hh | 9 ++++ src/nix/installables.cc | 22 ++++++++ src/nix/run.cc | 115 +++++++++++++++++++++++++++++----------- 3 files changed, 116 insertions(+), 30 deletions(-) diff --git a/src/nix/command.hh b/src/nix/command.hh index 26c308331..659b724c3 100644 --- a/src/nix/command.hh +++ b/src/nix/command.hh @@ -38,6 +38,13 @@ struct Buildable typedef std::vector Buildables; +struct App +{ + PathSet context; + Path program; + // FIXME: add args, sandbox settings, metadata, ... +}; + struct Installable { virtual std::string what() = 0; @@ -49,6 +56,8 @@ struct Installable Buildable toBuildable(); + App toApp(EvalState & state); + virtual Value * toValue(EvalState & state) { throw Error("argument '%s' cannot be evaluated", what()); diff --git a/src/nix/installables.cc b/src/nix/installables.cc index eb3c27d6b..b6f05b314 100644 --- a/src/nix/installables.cc +++ b/src/nix/installables.cc @@ -68,6 +68,28 @@ Buildable Installable::toBuildable() return std::move(buildables[0]); } +App Installable::toApp(EvalState & state) +{ + auto v = toValue(state); + + state.forceAttrs(*v); + + auto aType = v->attrs->need(state.sType); + if (state.forceStringNoCtx(*aType.value, *aType.pos) != "app") + throw Error("value does not have type 'app', at %s", *aType.pos); + + App app; + + auto aProgram = v->attrs->need(state.symbols.create("program")); + app.program = state.forceString(*aProgram.value, app.context, *aProgram.pos); + + // FIXME: check that 'program' is in the closure of 'context'. + if (!state.store->isInStore(app.program)) + throw Error("app program '%s' is not in the Nix store", app.program); + + return app; +} + struct InstallableStorePath : Installable { Path storePath; diff --git a/src/nix/run.cc b/src/nix/run.cc index 35b763345..00a682832 100644 --- a/src/nix/run.cc +++ b/src/nix/run.cc @@ -8,6 +8,7 @@ #include "fs-accessor.hh" #include "progress-bar.hh" #include "affinity.hh" +#include "eval.hh" #if __linux__ #include @@ -19,7 +20,44 @@ using namespace nix; std::string chrootHelperName = "__run_in_chroot"; -struct CmdRun : InstallablesCommand +struct RunCommon : virtual Command +{ + void runProgram(ref store, + const std::string & program, + const Strings & args) + { + stopProgressBar(); + + restoreSignals(); + + restoreAffinity(); + + /* If this is a diverted store (i.e. its "logical" location + (typically /nix/store) differs from its "physical" location + (e.g. /home/eelco/nix/store), then run the command in a + chroot. For non-root users, this requires running it in new + mount and user namespaces. Unfortunately, + unshare(CLONE_NEWUSER) doesn't work in a multithreaded + program (which "nix" is), so we exec() a single-threaded + helper program (chrootHelper() below) to do the work. */ + auto store2 = store.dynamic_pointer_cast(); + + if (store2 && store->storeDir != store2->realStoreDir) { + Strings helperArgs = { chrootHelperName, store->storeDir, store2->realStoreDir, program }; + for (auto & arg : args) helperArgs.push_back(arg); + + execv(readLink("/proc/self/exe").c_str(), stringsToCharPtrs(helperArgs).data()); + + throw SysError("could not execute chroot helper"); + } + + execvp(program.c_str(), stringsToCharPtrs(args).data()); + + throw SysError("unable to execute '%s'", program); + } +}; + +struct CmdRun : InstallablesCommand, RunCommon { std::vector command = { "bash" }; StringSet keep, unset; @@ -147,43 +185,60 @@ struct CmdRun : InstallablesCommand setenv("PATH", concatStringsSep(":", unixPath).c_str(), 1); - std::string cmd = *command.begin(); Strings args; for (auto & arg : command) args.push_back(arg); - stopProgressBar(); - - restoreSignals(); - - restoreAffinity(); - - /* If this is a diverted store (i.e. its "logical" location - (typically /nix/store) differs from its "physical" location - (e.g. /home/eelco/nix/store), then run the command in a - chroot. For non-root users, this requires running it in new - mount and user namespaces. Unfortunately, - unshare(CLONE_NEWUSER) doesn't work in a multithreaded - program (which "nix" is), so we exec() a single-threaded - helper program (chrootHelper() below) to do the work. */ - auto store2 = store.dynamic_pointer_cast(); - - if (store2 && store->storeDir != store2->realStoreDir) { - Strings helperArgs = { chrootHelperName, store->storeDir, store2->realStoreDir, cmd }; - for (auto & arg : args) helperArgs.push_back(arg); - - execv(readLink("/proc/self/exe").c_str(), stringsToCharPtrs(helperArgs).data()); - - throw SysError("could not execute chroot helper"); - } - - execvp(cmd.c_str(), stringsToCharPtrs(args).data()); - - throw SysError("unable to exec '%s'", cmd); + runProgram(store, *command.begin(), args); } }; static RegisterCommand r1(make_ref()); +struct CmdApp : InstallableCommand, RunCommon +{ + CmdApp() + { + } + + std::string name() override + { + return "app"; + } + + std::string description() override + { + return "run a Nix application"; + } + + Examples examples() override + { + return { + Example{ + "To run Blender:", + "nix app blender-bin" + }, + }; + } + + Strings getDefaultFlakeAttrPaths() override + { + return {"defaultApp"}; + } + + void run(ref store) override + { + auto state = getEvalState(); + + auto app = installable->toApp(*state); + + state->realiseContext(app.context); + + runProgram(store, app.program, {app.program}); + } +}; + +static RegisterCommand r2(make_ref()); + void chrootHelper(int argc, char * * argv) { int p = 1; From d9a6a75ed28c590dde2dba846e356cbcda38d977 Mon Sep 17 00:00:00 2001 From: Nick Van den Broeck Date: Mon, 3 Jun 2019 14:47:47 +0200 Subject: [PATCH 186/634] Made epochs more fine-grained Fixes #2894 --- doc/flakes/design.md | 4 ++-- flake.nix | 2 +- src/libexpr/primops/flake.cc | 2 +- src/nix/flake-template.nix | 2 +- tests/flakes.sh | 12 ++++++------ 5 files changed, 11 insertions(+), 11 deletions(-) diff --git a/doc/flakes/design.md b/doc/flakes/design.md index 4f6524f83..63198e577 100644 --- a/doc/flakes/design.md +++ b/doc/flakes/design.md @@ -96,7 +96,7 @@ module. # The epoch may be used in the future to determine how Nix # expressions inside this flake are to be parsed. - epoch = 2018; + epoch = 201906; # Some other metadata. description = "A filesystem that fetches DWARF debug info from the Internet on demand"; @@ -164,7 +164,7 @@ Similarly, a minimal `flake.nix` for Nixpkgs: { name = "nixpkgs"; - epoch = 2018; + epoch = 201906; description = "A collection of packages for the Nix package manager"; diff --git a/flake.nix b/flake.nix index ab96d5c90..e7deb2de3 100644 --- a/flake.nix +++ b/flake.nix @@ -3,7 +3,7 @@ description = "The purely functional package manager"; - epoch = 2019; + epoch = 201906; inputs = [ "nixpkgs" ]; diff --git a/src/libexpr/primops/flake.cc b/src/libexpr/primops/flake.cc index 235e10922..e5035c53a 100644 --- a/src/libexpr/primops/flake.cc +++ b/src/libexpr/primops/flake.cc @@ -312,7 +312,7 @@ Flake getFlake(EvalState & state, const FlakeRef & flakeRef, bool impureIsAllowe if (auto epoch = vInfo.attrs->get(sEpoch)) { flake.epoch = state.forceInt(*(**epoch).value, *(**epoch).pos); - if (flake.epoch > 2019) + if (flake.epoch > 201906) throw Error("flake '%s' requires unsupported epoch %d; please upgrade Nix", flakeRef, flake.epoch); } else throw Error("flake '%s' lacks attribute 'epoch'", flakeRef); diff --git a/src/nix/flake-template.nix b/src/nix/flake-template.nix index fe89e647e..bec613f6c 100644 --- a/src/nix/flake-template.nix +++ b/src/nix/flake-template.nix @@ -3,7 +3,7 @@ description = "A flake for building Hello World"; - epoch = 2019; + epoch = 201906; requires = [ "nixpkgs" ]; diff --git a/tests/flakes.sh b/tests/flakes.sh index c4dd8c333..998abfd09 100644 --- a/tests/flakes.sh +++ b/tests/flakes.sh @@ -29,7 +29,7 @@ cat > $flake1Dir/flake.nix < $flake2Dir/flake.nix < $flake3Dir/flake.nix < $flake3Dir/flake.nix <&1 | grep 'unsupported epoch' # Test whether registry caching works. @@ -219,7 +219,7 @@ cat > $flake3Dir/flake.nix < Date: Mon, 3 Jun 2019 21:51:06 +0200 Subject: [PATCH 187/634] Fix clang compilation error https://hydra.nixos.org/build/94332344 https://stackoverflow.com/questions/46114214/lambda-implicit-capture-fails-with-variable-declared-from-structured-binding --- src/libexpr/primops/flakeref.cc | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/libexpr/primops/flakeref.cc b/src/libexpr/primops/flakeref.cc index 6c90c3b64..24af09124 100644 --- a/src/libexpr/primops/flakeref.cc +++ b/src/libexpr/primops/flakeref.cc @@ -60,7 +60,8 @@ FlakeRef::FlakeRef(const std::string & uri_, bool allowRelative) static std::regex subDirRegex2(subDirRegex, std::regex::ECMAScript); - auto [uri, params] = splitUriAndParams(uri_); + auto [uri2, params] = splitUriAndParams(uri_); + std::string uri(uri2); auto handleSubdir = [&](const std::string & name, const std::string & value) { if (name == "dir") { From c7c562416c75ed60e024f84e4ac440e29b98e0e3 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Tue, 4 Jun 2019 13:44:12 +0200 Subject: [PATCH 188/634] shell.nix: Use clang by default Clang compiles faster (121s vs 156s for GCC 7) so it's a bit nicer for development. --- shell.nix | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/shell.nix b/shell.nix index d7e63bad3..dd2950b29 100644 --- a/shell.nix +++ b/shell.nix @@ -1,4 +1,4 @@ -{ useClang ? false +{ useClang ? true , nixpkgs ? builtins.fetchTarball https://github.com/NixOS/nixpkgs-channels/archive/nixos-19.03.tar.gz }: From 6dbd5c26e6c853f302cd9d3ed171d134ff24ffe1 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Tue, 4 Jun 2019 19:10:35 +0200 Subject: [PATCH 189/634] Make flake input fetching lazy As long as the flake input is locked, it is now only fetched when it is evaluated (e.g. "nixpkgs" is fetched when "inputs.nixpkgs." is evaluated). This required adding an "id" attribute to the members of "inputs" in lockfiles, e.g. "inputs": { "nixpkgs/release-19.03": { "id": "nixpkgs", "inputs": {}, "narHash": "sha256-eYtxncIMFVmOHaHBtTdPGcs/AnJqKqA6tHCm0UmPYQU=", "nonFlakeInputs": {}, "uri": "github:edolstra/nixpkgs/e9d5882bb861dc48f8d46960e7c820efdbe8f9c1" } } because the flake ID needs to be known beforehand to construct the "inputs" attrset. Fixes #2913. --- src/libexpr/primops/flake.cc | 292 +++++++++++++++++++---------------- src/libexpr/primops/flake.hh | 138 +++++++++++------ src/nix/flake.cc | 6 +- src/nix/installables.cc | 2 + src/nlohmann/json_fwd.hpp | 10 ++ tests/flakes.sh | 11 ++ 6 files changed, 272 insertions(+), 187 deletions(-) create mode 100644 src/nlohmann/json_fwd.hpp diff --git a/src/libexpr/primops/flake.cc b/src/libexpr/primops/flake.cc index e5035c53a..189663e51 100644 --- a/src/libexpr/primops/flake.cc +++ b/src/libexpr/primops/flake.cc @@ -43,97 +43,101 @@ std::shared_ptr readRegistry(const Path & path) void writeRegistry(const FlakeRegistry & registry, const Path & path) { nlohmann::json json; - json["version"] = 1; + json["version"] = 2; for (auto elem : registry.entries) json["flakes"][elem.first.to_string()] = { {"uri", elem.second.to_string()} }; createDirs(dirOf(path)); writeFile(path, json.dump(4)); // The '4' is the number of spaces used in the indentation in the json file. } -LockFile::FlakeEntry readFlakeEntry(nlohmann::json json) +NonFlakeDep::NonFlakeDep(const nlohmann::json & json) + : ref(json["uri"]) + , narHash(Hash((std::string) json["narHash"])) { - FlakeRef flakeRef(json["uri"]); - if (!flakeRef.isImmutable()) - throw Error("cannot use mutable flake '%s' in pure mode", flakeRef); + if (!ref.isImmutable()) + throw Error("lockfile contains mutable flakeref '%s'", ref); +} - LockFile::FlakeEntry entry(flakeRef, Hash((std::string) json["narHash"])); +nlohmann::json NonFlakeDep::toJson() const +{ + nlohmann::json json; + json["uri"] = ref.to_string(); + json["narHash"] = narHash.to_string(SRI); + return json; +} +FlakeDep::FlakeDep(const nlohmann::json & json) + : FlakeInputs(json) + , id(json["id"]) + , ref(json["uri"]) + , narHash(Hash((std::string) json["narHash"])) +{ + if (!ref.isImmutable()) + throw Error("lockfile contains mutable flakeref '%s'", ref); +} + +nlohmann::json FlakeDep::toJson() const +{ + auto json = FlakeInputs::toJson(); + json["id"] = id; + json["uri"] = ref.to_string(); + json["narHash"] = narHash.to_string(SRI); + return json; +} + +FlakeInputs::FlakeInputs(const nlohmann::json & json) +{ auto nonFlakeInputs = json["nonFlakeInputs"]; - - for (auto i = nonFlakeInputs.begin(); i != nonFlakeInputs.end(); ++i) { - FlakeRef flakeRef(i->value("uri", "")); - if (!flakeRef.isImmutable()) - throw Error("requested to fetch FlakeRef '%s' purely, which is mutable", flakeRef); - LockFile::NonFlakeEntry nonEntry(flakeRef, Hash(i->value("narHash", ""))); - entry.nonFlakeEntries.insert_or_assign(i.key(), nonEntry); - } + for (auto i = nonFlakeInputs.begin(); i != nonFlakeInputs.end(); ++i) + nonFlakeDeps.insert_or_assign(i.key(), NonFlakeDep(*i)); auto inputs = json["inputs"]; - for (auto i = inputs.begin(); i != inputs.end(); ++i) - entry.flakeEntries.insert_or_assign(i.key(), readFlakeEntry(*i)); + flakeDeps.insert_or_assign(i.key(), FlakeDep(*i)); +} - return entry; +nlohmann::json FlakeInputs::toJson() const +{ + nlohmann::json json; + { + auto j = nlohmann::json::object(); + for (auto & i : nonFlakeDeps) + j[i.first] = i.second.toJson(); + json["nonFlakeInputs"] = std::move(j); + } + { + auto j = nlohmann::json::object(); + for (auto & i : flakeDeps) + j[i.first.to_string()] = i.second.toJson(); + json["inputs"] = std::move(j); + } + return json; +} + +nlohmann::json LockFile::toJson() const +{ + auto json = FlakeInputs::toJson(); + json["version"] = 2; + return json; } LockFile readLockFile(const Path & path) { - LockFile lockFile; + if (pathExists(path)) { + auto json = nlohmann::json::parse(readFile(path)); - if (!pathExists(path)) - return lockFile; + auto version = json.value("version", 0); + if (version != 2) + throw Error("lock file '%s' has unsupported version %d", path, version); - auto json = nlohmann::json::parse(readFile(path)); - - auto version = json.value("version", 0); - if (version != 1) - throw Error("lock file '%s' has unsupported version %d", path, version); - - auto nonFlakeInputs = json["nonFlakeInputs"]; - - for (auto i = nonFlakeInputs.begin(); i != nonFlakeInputs.end(); ++i) { - FlakeRef flakeRef(i->value("uri", "")); - LockFile::NonFlakeEntry nonEntry(flakeRef, Hash(i->value("narHash", ""))); - if (!flakeRef.isImmutable()) - throw Error("found mutable FlakeRef '%s' in lockfile at path %s", flakeRef, path); - lockFile.nonFlakeEntries.insert_or_assign(i.key(), nonEntry); - } - - auto inputs = json["inputs"]; - - for (auto i = inputs.begin(); i != inputs.end(); ++i) - lockFile.flakeEntries.insert_or_assign(i.key(), readFlakeEntry(*i)); - - return lockFile; -} - -nlohmann::json flakeEntryToJson(const LockFile::FlakeEntry & entry) -{ - nlohmann::json json; - json["uri"] = entry.ref.to_string(); - json["narHash"] = entry.narHash.to_string(SRI); - for (auto & x : entry.nonFlakeEntries) { - json["nonFlakeInputs"][x.first]["uri"] = x.second.ref.to_string(); - json["nonFlakeInputs"][x.first]["narHash"] = x.second.narHash.to_string(SRI); - } - for (auto & x : entry.flakeEntries) - json["inputs"][x.first.to_string()] = flakeEntryToJson(x.second); - return json; + return LockFile(json); + } else + return LockFile(); } std::ostream & operator <<(std::ostream & stream, const LockFile & lockFile) { - nlohmann::json json; - json["version"] = 1; - json["nonFlakeInputs"] = nlohmann::json::object(); - for (auto & x : lockFile.nonFlakeEntries) { - json["nonFlakeInputs"][x.first]["uri"] = x.second.ref.to_string(); - json["nonFlakeInputs"][x.first]["narHash"] = x.second.narHash.to_string(SRI); - } - json["inputs"] = nlohmann::json::object(); - for (auto & x : lockFile.flakeEntries) - json["inputs"][x.first.to_string()] = flakeEntryToJson(x.second); - stream << json.dump(4); // '4' = indentation in json file + stream << lockFile.toJson().dump(4); // '4' = indentation in json file return stream; } @@ -387,33 +391,6 @@ NonFlake getNonFlake(EvalState & state, const FlakeRef & flakeRef, FlakeAlias al return nonFlake; } -LockFile entryToLockFile(const LockFile::FlakeEntry & entry) -{ - LockFile lockFile; - lockFile.flakeEntries = entry.flakeEntries; - lockFile.nonFlakeEntries = entry.nonFlakeEntries; - return lockFile; -} - -LockFile::FlakeEntry dependenciesToFlakeEntry(const ResolvedFlake & resolvedFlake) -{ - LockFile::FlakeEntry entry( - resolvedFlake.flake.sourceInfo.resolvedRef, - resolvedFlake.flake.sourceInfo.narHash); - - for (auto & info : resolvedFlake.flakeDeps) - entry.flakeEntries.insert_or_assign(info.first.to_string(), dependenciesToFlakeEntry(info.second)); - - for (auto & nonFlake : resolvedFlake.nonFlakeDeps) { - LockFile::NonFlakeEntry nonEntry( - nonFlake.sourceInfo.resolvedRef, - nonFlake.sourceInfo.narHash); - entry.nonFlakeEntries.insert_or_assign(nonFlake.alias, nonEntry); - } - - return entry; -} - bool allowedToWrite(HandleLockFile handle) { return handle == UpdateLockFile || handle == RecreateLockFile; @@ -435,43 +412,50 @@ bool allowedToUseRegistries(HandleLockFile handle, bool isTopRef) else assert(false); } -ResolvedFlake resolveFlakeFromLockFile(EvalState & state, const FlakeRef & flakeRef, - HandleLockFile handleLockFile, LockFile lockFile = {}, bool topRef = false) +static std::pair updateLocks( + EvalState & state, + const FlakeRef & flakeRef, + HandleLockFile handleLockFile, + const FlakeInputs & oldEntry, + bool topRef) { - Flake flake = getFlake(state, flakeRef, allowedToUseRegistries(handleLockFile, topRef)); + auto flake = getFlake(state, flakeRef, allowedToUseRegistries(handleLockFile, topRef)); - ResolvedFlake deps(flake); + FlakeDep newEntry( + flake.id, + flake.sourceInfo.resolvedRef, + flake.sourceInfo.narHash); - for (auto & nonFlakeInfo : flake.nonFlakeInputs) { - FlakeRef ref = nonFlakeInfo.second; - auto i = lockFile.nonFlakeEntries.find(nonFlakeInfo.first); - if (i != lockFile.nonFlakeEntries.end()) { - NonFlake nonFlake = getNonFlake(state, i->second.ref, nonFlakeInfo.first); - if (nonFlake.sourceInfo.narHash != i->second.narHash) - throw Error("the content hash of flakeref '%s' doesn't match", i->second.ref.to_string()); - deps.nonFlakeDeps.push_back(nonFlake); + for (auto & input : flake.nonFlakeInputs) { + auto & id = input.first; + auto & ref = input.second; + auto i = oldEntry.nonFlakeDeps.find(id); + if (i != oldEntry.nonFlakeDeps.end()) { + newEntry.nonFlakeDeps.insert_or_assign(i->first, i->second); } else { if (handleLockFile == AllPure || handleLockFile == TopRefUsesRegistries) - throw Error("cannot update non-flake dependency '%s' in pure mode", nonFlakeInfo.first); - deps.nonFlakeDeps.push_back(getNonFlake(state, nonFlakeInfo.second, nonFlakeInfo.first, allowedToUseRegistries(handleLockFile, false))); + throw Error("cannot update non-flake dependency '%s' in pure mode", id); + auto nonFlake = getNonFlake(state, ref, id, allowedToUseRegistries(handleLockFile, false)); + newEntry.nonFlakeDeps.insert_or_assign(id, + NonFlakeDep( + nonFlake.sourceInfo.resolvedRef, + nonFlake.sourceInfo.narHash)); } } - for (auto newFlakeRef : flake.inputs) { - auto i = lockFile.flakeEntries.find(newFlakeRef); - if (i != lockFile.flakeEntries.end()) { // Propagate lockFile downwards if possible - ResolvedFlake newResFlake = resolveFlakeFromLockFile(state, i->second.ref, handleLockFile, entryToLockFile(i->second)); - if (newResFlake.flake.sourceInfo.narHash != i->second.narHash) - throw Error("the content hash of flakeref '%s' doesn't match", i->second.ref.to_string()); - deps.flakeDeps.insert_or_assign(newFlakeRef, newResFlake); + for (auto & inputRef : flake.inputs) { + auto i = oldEntry.flakeDeps.find(inputRef); + if (i != oldEntry.flakeDeps.end()) { + newEntry.flakeDeps.insert_or_assign(inputRef, i->second); } else { if (handleLockFile == AllPure || handleLockFile == TopRefUsesRegistries) - throw Error("cannot update flake dependency '%s' in pure mode", newFlakeRef.to_string()); - deps.flakeDeps.insert_or_assign(newFlakeRef, resolveFlakeFromLockFile(state, newFlakeRef, handleLockFile)); + throw Error("cannot update flake dependency '%s' in pure mode", inputRef); + newEntry.flakeDeps.insert_or_assign(inputRef, + updateLocks(state, inputRef, handleLockFile, {}, false).second); } } - return deps; + return {flake, newEntry}; } /* Given a flake reference, recursively fetch it and its dependencies. @@ -479,7 +463,8 @@ ResolvedFlake resolveFlakeFromLockFile(EvalState & state, const FlakeRef & flake */ ResolvedFlake resolveFlake(EvalState & state, const FlakeRef & topRef, HandleLockFile handleLockFile) { - Flake flake = getFlake(state, topRef, allowedToUseRegistries(handleLockFile, true)); + auto flake = getFlake(state, topRef, allowedToUseRegistries(handleLockFile, true)); + LockFile oldLockFile; if (!recreateLockFile(handleLockFile)) { @@ -490,10 +475,9 @@ ResolvedFlake resolveFlake(EvalState & state, const FlakeRef & topRef, HandleLoc + "/" + flake.sourceInfo.resolvedRef.subdir + "/flake.lock"); } - LockFile lockFile(oldLockFile); - - ResolvedFlake resFlake = resolveFlakeFromLockFile(state, topRef, handleLockFile, lockFile, true); - lockFile = entryToLockFile(dependenciesToFlakeEntry(resFlake)); + // FIXME: get rid of duplicate getFlake call + LockFile lockFile(updateLocks( + state, topRef, handleLockFile, oldLockFile, true).second); if (!(lockFile == oldLockFile)) { if (allowedToWrite(handleLockFile)) { @@ -509,7 +493,7 @@ ResolvedFlake resolveFlake(EvalState & state, const FlakeRef & topRef, HandleLoc warn("using updated lockfile without writing it to file"); } - return resFlake; + return ResolvedFlake(std::move(flake), std::move(lockFile)); } void updateLockFile(EvalState & state, const FlakeRef & flakeRef, bool recreateLockFile) @@ -520,7 +504,9 @@ void updateLockFile(EvalState & state, const FlakeRef & flakeRef, bool recreateL static void emitSourceInfoAttrs(EvalState & state, const SourceInfo & sourceInfo, Value & vAttrs) { auto & path = sourceInfo.storePath; - state.store->isValidPath(path); + assert(state.store->isValidPath(path)); + // FIXME: turn into fetchGit etc. + // FIXME: check narHash. mkString(*state.allocAttr(vAttrs, state.sOutPath), path, {path}); if (sourceInfo.resolvedRef.rev) { @@ -539,42 +525,74 @@ static void emitSourceInfoAttrs(EvalState & state, const SourceInfo & sourceInfo std::put_time(std::gmtime(&*sourceInfo.lastModified), "%Y%m%d%H%M%S"))); } -void callFlake(EvalState & state, const ResolvedFlake & resFlake, Value & v) +/* Helper primop to make callFlake (below) fetch/call its inputs + lazily. Note that this primop cannot be called by user code since + it doesn't appear in 'builtins'. */ +static void prim_callFlake(EvalState & state, const Pos & pos, Value * * args, Value & v) +{ + auto lazyFlake = (FlakeDep *) args[0]->attrs; + auto flake = getFlake(state, lazyFlake->ref, false); + callFlake(state, flake, *lazyFlake, v); +} + +void callFlake(EvalState & state, + const Flake & flake, + const FlakeInputs & inputs, + Value & v) { // Construct the resulting attrset '{description, outputs, // ...}'. This attrset is passed lazily as an argument to 'outputs'. - state.mkAttrs(v, resFlake.flakeDeps.size() + resFlake.nonFlakeDeps.size() + 8); + state.mkAttrs(v, + inputs.flakeDeps.size() + + inputs.nonFlakeDeps.size() + 8); - for (auto info : resFlake.flakeDeps) { - const ResolvedFlake newResFlake = info.second; - auto vFlake = state.allocAttr(v, newResFlake.flake.id); - callFlake(state, newResFlake, *vFlake); + for (auto & dep : inputs.flakeDeps) { + auto vFlake = state.allocAttr(v, dep.second.id); + auto vPrimOp = state.allocValue(); + static auto primOp = new PrimOp(prim_callFlake, 1, state.symbols.create("callFlake")); + vPrimOp->type = tPrimOp; + vPrimOp->primOp = primOp; + auto vArg = state.allocValue(); + vArg->type = tNull; + // FIXME: leak + vArg->attrs = (Bindings *) new FlakeDep(dep.second); // evil! also inefficient + mkApp(*vFlake, *vPrimOp, *vArg); } - for (const NonFlake nonFlake : resFlake.nonFlakeDeps) { - auto vNonFlake = state.allocAttr(v, nonFlake.alias); + for (auto & dep : inputs.nonFlakeDeps) { + auto vNonFlake = state.allocAttr(v, dep.first); state.mkAttrs(*vNonFlake, 8); - state.store->isValidPath(nonFlake.sourceInfo.storePath); + auto nonFlake = getNonFlake(state, dep.second.ref, dep.first); + + assert(state.store->isValidPath(nonFlake.sourceInfo.storePath)); + mkString(*state.allocAttr(*vNonFlake, state.sOutPath), nonFlake.sourceInfo.storePath, {nonFlake.sourceInfo.storePath}); emitSourceInfoAttrs(state, nonFlake.sourceInfo, *vNonFlake); } - mkString(*state.allocAttr(v, state.sDescription), resFlake.flake.description); + mkString(*state.allocAttr(v, state.sDescription), flake.description); - emitSourceInfoAttrs(state, resFlake.flake.sourceInfo, v); + emitSourceInfoAttrs(state, flake.sourceInfo, v); auto vOutputs = state.allocAttr(v, state.symbols.create("outputs")); - mkApp(*vOutputs, *resFlake.flake.vOutputs, v); + mkApp(*vOutputs, *flake.vOutputs, v); v.attrs->push_back(Attr(state.symbols.create("self"), &v)); v.attrs->sort(); } +void callFlake(EvalState & state, + const ResolvedFlake & resFlake, + Value & v) +{ + callFlake(state, resFlake.flake, resFlake.lockFile, v); +} + // This function is exposed to be used in nix files. static void prim_getFlake(EvalState & state, const Pos & pos, Value * * args, Value & v) { diff --git a/src/libexpr/primops/flake.hh b/src/libexpr/primops/flake.hh index 82b0973f6..692fa744d 100644 --- a/src/libexpr/primops/flake.hh +++ b/src/libexpr/primops/flake.hh @@ -2,6 +2,7 @@ #include "flakeref.hh" #include +#include namespace nix { @@ -19,51 +20,12 @@ struct FlakeRegistry std::map entries; }; -struct LockFile -{ - struct NonFlakeEntry - { - FlakeRef ref; - Hash narHash; - NonFlakeEntry(const FlakeRef & flakeRef, const Hash & hash) : ref(flakeRef), narHash(hash) {}; - - bool operator ==(const NonFlakeEntry & other) const - { - return ref == other.ref && narHash == other.narHash; - } - }; - - struct FlakeEntry - { - FlakeRef ref; - Hash narHash; - std::map flakeEntries; - std::map nonFlakeEntries; - FlakeEntry(const FlakeRef & flakeRef, const Hash & hash) : ref(flakeRef), narHash(hash) {}; - - bool operator ==(const FlakeEntry & other) const - { - return - ref == other.ref - && narHash == other.narHash - && flakeEntries == other.flakeEntries - && nonFlakeEntries == other.nonFlakeEntries; - } - }; - - std::map flakeEntries; - std::map nonFlakeEntries; - - bool operator ==(const LockFile & other) const - { - return - flakeEntries == other.flakeEntries - && nonFlakeEntries == other.nonFlakeEntries; - } -}; - typedef std::vector> Registries; +std::shared_ptr readRegistry(const Path &); + +void writeRegistry(const FlakeRegistry &, const Path &); + Path getUserRegistryPath(); enum HandleLockFile : unsigned int @@ -75,9 +37,80 @@ enum HandleLockFile : unsigned int , UseNewLockFile // `RecreateLockFile` without writing to file }; -std::shared_ptr readRegistry(const Path &); +struct NonFlakeDep +{ + FlakeRef ref; + Hash narHash; -void writeRegistry(const FlakeRegistry &, const Path &); + NonFlakeDep(const FlakeRef & flakeRef, const Hash & narHash) + : ref(flakeRef), narHash(narHash) {}; + + NonFlakeDep(const nlohmann::json & json); + + bool operator ==(const NonFlakeDep & other) const + { + return ref == other.ref && narHash == other.narHash; + } + + nlohmann::json toJson() const; +}; + +struct FlakeDep; + +struct FlakeInputs +{ + std::map flakeDeps; + std::map nonFlakeDeps; + + FlakeInputs() {}; + FlakeInputs(const nlohmann::json & json); + + nlohmann::json toJson() const; +}; + +struct FlakeDep : FlakeInputs +{ + FlakeId id; + FlakeRef ref; + Hash narHash; + + FlakeDep(const FlakeId & id, const FlakeRef & flakeRef, const Hash & narHash) + : id(id), ref(flakeRef), narHash(narHash) {}; + + FlakeDep(const nlohmann::json & json); + + bool operator ==(const FlakeDep & other) const + { + return + id == other.id + && ref == other.ref + && narHash == other.narHash + && flakeDeps == other.flakeDeps + && nonFlakeDeps == other.nonFlakeDeps; + } + + nlohmann::json toJson() const; +}; + +struct LockFile : FlakeInputs +{ + bool operator ==(const LockFile & other) const + { + return + flakeDeps == other.flakeDeps + && nonFlakeDeps == other.nonFlakeDeps; + } + + LockFile() {} + LockFile(const nlohmann::json & json) : FlakeInputs(json) {} + LockFile(FlakeDep && dep) + { + flakeDeps = std::move(dep.flakeDeps); + nonFlakeDeps = std::move(dep.nonFlakeDeps); + } + + nlohmann::json toJson() const; +}; struct SourceInfo { @@ -129,14 +162,21 @@ Flake getFlake(EvalState &, const FlakeRef &, bool impureIsAllowed); struct ResolvedFlake { Flake flake; - std::map flakeDeps; // The key in this map, is the originalRef as written in flake.nix - std::vector nonFlakeDeps; - ResolvedFlake(const Flake & flake) : flake(flake) {} + LockFile lockFile; + ResolvedFlake(Flake && flake, LockFile && lockFile) + : flake(flake), lockFile(lockFile) {} }; ResolvedFlake resolveFlake(EvalState &, const FlakeRef &, HandleLockFile); -void callFlake(EvalState & state, const ResolvedFlake & resFlake, Value & v); +void callFlake(EvalState & state, + const Flake & flake, + const FlakeInputs & inputs, + Value & v); + +void callFlake(EvalState & state, + const ResolvedFlake & resFlake, + Value & v); void updateLockFile(EvalState &, const FlakeRef & flakeRef, bool recreateLockFile); diff --git a/src/nix/flake.cc b/src/nix/flake.cc index 8d6716391..d229c7512 100644 --- a/src/nix/flake.cc +++ b/src/nix/flake.cc @@ -136,6 +136,7 @@ static nlohmann::json nonFlakeToJson(const NonFlake & nonFlake) return j; } +#if 0 // FIXME: merge info CmdFlakeInfo? struct CmdFlakeDeps : FlakeCommand { @@ -173,6 +174,7 @@ struct CmdFlakeDeps : FlakeCommand } } }; +#endif struct CmdFlakeUpdate : FlakeCommand { @@ -232,6 +234,7 @@ struct CmdFlakeInfo : FlakeCommand, MixJSON if (json) { auto json = flakeToJson(flake); +#if 0 auto state = getEvalState(); auto vFlake = state->allocValue(); @@ -254,6 +257,7 @@ struct CmdFlakeInfo : FlakeCommand, MixJSON }); json["outputs"] = std::move(outputs); +#endif std::cout << json.dump() << std::endl; } else @@ -518,7 +522,7 @@ struct CmdFlake : virtual MultiCommand, virtual Command , make_ref() , make_ref() , make_ref() - , make_ref() + //, make_ref() , make_ref() , make_ref() , make_ref() diff --git a/src/nix/installables.cc b/src/nix/installables.cc index b6f05b314..86b4a9b93 100644 --- a/src/nix/installables.cc +++ b/src/nix/installables.cc @@ -190,6 +190,7 @@ void makeFlakeClosureGCRoot(Store & store, const FlakeRef & origFlakeRef, const flake::ResolvedFlake & resFlake) { +#if 0 if (std::get_if(&origFlakeRef.data)) return; /* Get the store paths of all non-local flakes. */ @@ -224,6 +225,7 @@ void makeFlakeClosureGCRoot(Store & store, debug("writing GC root '%s' for flake closure of '%s'", symlink, origFlakeRef); replaceSymlink(closurePath, symlink); store.addIndirectRoot(symlink); +#endif } struct InstallableFlake : InstallableValue diff --git a/src/nlohmann/json_fwd.hpp b/src/nlohmann/json_fwd.hpp new file mode 100644 index 000000000..ae6e4c64f --- /dev/null +++ b/src/nlohmann/json_fwd.hpp @@ -0,0 +1,10 @@ +#pragma once + +namespace nlohmann { + +struct json : basic_json<> +{ + using basic_json<>::basic_json; +}; + +} diff --git a/tests/flakes.sh b/tests/flakes.sh index 998abfd09..29845e5ed 100644 --- a/tests/flakes.sh +++ b/tests/flakes.sh @@ -191,6 +191,8 @@ nix build -o $TEST_ROOT/result --flake-registry $registry $flake3Dir:sth # Check whether it saved the lockfile [[ ! (-z $(git -C $flake3Dir diff master)) ]] +git -C $flake3Dir commit -m 'Add lockfile' + # Unsupported epochs should be an error. sed -i $flake3Dir/flake.nix -e s/201906/201909/ nix build -o $TEST_ROOT/result --flake-registry $registry $flake3Dir:sth 2>&1 | grep 'unsupported epoch' @@ -241,3 +243,12 @@ git -C $flake3Dir commit -m 'Add nonFlakeInputs' # Check whether `nix build` works with a lockfile which is missing a nonFlakeInputs nix build -o $TEST_ROOT/result --flake-registry $registry $flake3Dir:sth + +# Check whether flake input fetching is lazy: flake3:sth does not +# depend on flake2, so this shouldn't fail. +rm -rf $TEST_HOME/.cache +clearStore +mv $flake2Dir $flake2Dir.tmp +nix build -o $TEST_ROOT/result --flake-registry $registry flake3:sth +(! nix build -o $TEST_ROOT/result --flake-registry $registry flake3:xyzzy) +mv $flake2Dir.tmp $flake2Dir From 278114d559109199ff8e6f23b6700ab7909f5320 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Tue, 4 Jun 2019 19:45:16 +0200 Subject: [PATCH 190/634] Fix GC closure generation --- src/libexpr/primops/flake.cc | 17 +++++++++-------- src/libexpr/primops/flake.hh | 26 +++++++++++++++++--------- src/nix/installables.cc | 21 +++++++++++++-------- tests/flakes.sh | 1 + 4 files changed, 40 insertions(+), 25 deletions(-) diff --git a/src/libexpr/primops/flake.cc b/src/libexpr/primops/flake.cc index 189663e51..d0405a377 100644 --- a/src/libexpr/primops/flake.cc +++ b/src/libexpr/primops/flake.cc @@ -50,7 +50,7 @@ void writeRegistry(const FlakeRegistry & registry, const Path & path) writeFile(path, json.dump(4)); // The '4' is the number of spaces used in the indentation in the json file. } -NonFlakeDep::NonFlakeDep(const nlohmann::json & json) +AbstractDep::AbstractDep(const nlohmann::json & json) : ref(json["uri"]) , narHash(Hash((std::string) json["narHash"])) { @@ -58,7 +58,7 @@ NonFlakeDep::NonFlakeDep(const nlohmann::json & json) throw Error("lockfile contains mutable flakeref '%s'", ref); } -nlohmann::json NonFlakeDep::toJson() const +nlohmann::json AbstractDep::toJson() const { nlohmann::json json; json["uri"] = ref.to_string(); @@ -66,22 +66,23 @@ nlohmann::json NonFlakeDep::toJson() const return json; } +Path AbstractDep::computeStorePath(Store & store) const +{ + return store.makeFixedOutputPath(true, narHash, "source"); +} + FlakeDep::FlakeDep(const nlohmann::json & json) : FlakeInputs(json) + , AbstractDep(json) , id(json["id"]) - , ref(json["uri"]) - , narHash(Hash((std::string) json["narHash"])) { - if (!ref.isImmutable()) - throw Error("lockfile contains mutable flakeref '%s'", ref); } nlohmann::json FlakeDep::toJson() const { auto json = FlakeInputs::toJson(); + json.update(AbstractDep::toJson()); json["id"] = id; - json["uri"] = ref.to_string(); - json["narHash"] = narHash.to_string(SRI); return json; } diff --git a/src/libexpr/primops/flake.hh b/src/libexpr/primops/flake.hh index 692fa744d..933bc2593 100644 --- a/src/libexpr/primops/flake.hh +++ b/src/libexpr/primops/flake.hh @@ -1,3 +1,5 @@ +#pragma once + #include "types.hh" #include "flakeref.hh" @@ -8,6 +10,7 @@ namespace nix { struct Value; class EvalState; +class Store; namespace flake { @@ -37,22 +40,29 @@ enum HandleLockFile : unsigned int , UseNewLockFile // `RecreateLockFile` without writing to file }; -struct NonFlakeDep +struct AbstractDep { FlakeRef ref; Hash narHash; - NonFlakeDep(const FlakeRef & flakeRef, const Hash & narHash) + AbstractDep(const FlakeRef & flakeRef, const Hash & narHash) : ref(flakeRef), narHash(narHash) {}; - NonFlakeDep(const nlohmann::json & json); + AbstractDep(const nlohmann::json & json); + + nlohmann::json toJson() const; + + Path computeStorePath(Store & store) const; +}; + +struct NonFlakeDep : AbstractDep +{ + using AbstractDep::AbstractDep; bool operator ==(const NonFlakeDep & other) const { return ref == other.ref && narHash == other.narHash; } - - nlohmann::json toJson() const; }; struct FlakeDep; @@ -68,14 +78,12 @@ struct FlakeInputs nlohmann::json toJson() const; }; -struct FlakeDep : FlakeInputs +struct FlakeDep : FlakeInputs, AbstractDep { FlakeId id; - FlakeRef ref; - Hash narHash; FlakeDep(const FlakeId & id, const FlakeRef & flakeRef, const Hash & narHash) - : id(id), ref(flakeRef), narHash(narHash) {}; + : AbstractDep(flakeRef, narHash), id(id) {}; FlakeDep(const nlohmann::json & json); diff --git a/src/nix/installables.cc b/src/nix/installables.cc index 86b4a9b93..c44a37f1e 100644 --- a/src/nix/installables.cc +++ b/src/nix/installables.cc @@ -190,22 +190,28 @@ void makeFlakeClosureGCRoot(Store & store, const FlakeRef & origFlakeRef, const flake::ResolvedFlake & resFlake) { -#if 0 if (std::get_if(&origFlakeRef.data)) return; /* Get the store paths of all non-local flakes. */ PathSet closure; - std::queue> queue; - queue.push(resFlake); + assert(store.isValidPath(resFlake.flake.sourceInfo.storePath)); + closure.insert(resFlake.flake.sourceInfo.storePath); + + std::queue> queue; + queue.push(resFlake.lockFile); while (!queue.empty()) { - const flake::ResolvedFlake & flake = queue.front(); + const flake::FlakeInputs & flake = queue.front(); queue.pop(); - if (!std::get_if(&flake.flake.sourceInfo.resolvedRef.data)) - closure.insert(flake.flake.sourceInfo.storePath); - for (const auto & dep : flake.flakeDeps) + /* Note: due to lazy fetching, these paths might not exist + yet. */ + for (auto & dep : flake.flakeDeps) { + closure.insert(dep.second.computeStorePath(store)); queue.push(dep.second); + } + for (auto & dep : flake.nonFlakeDeps) + closure.insert(dep.second.computeStorePath(store)); } if (closure.empty()) return; @@ -225,7 +231,6 @@ void makeFlakeClosureGCRoot(Store & store, debug("writing GC root '%s' for flake closure of '%s'", symlink, origFlakeRef); replaceSymlink(closurePath, symlink); store.addIndirectRoot(symlink); -#endif } struct InstallableFlake : InstallableValue diff --git a/tests/flakes.sh b/tests/flakes.sh index 29845e5ed..f44b9509f 100644 --- a/tests/flakes.sh +++ b/tests/flakes.sh @@ -204,6 +204,7 @@ nix flake list --flake-registry file://$registry --tarball-ttl 0 | grep -q flake mv $registry.tmp $registry # Test whether flakes are registered as GC roots for offline use. +# FIXME: use tarballs rather than git. rm -rf $TEST_HOME/.cache nix build -o $TEST_ROOT/result --flake-registry file://$registry file://$flake2Dir:bar mv $flake1Dir $flake1Dir.tmp From 9e99b5205c4035753106448241ae44e7447f019c Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Tue, 4 Jun 2019 20:01:21 +0200 Subject: [PATCH 191/634] Move LockFile and related types to a separate file --- src/libexpr/primops/flake.cc | 103 +----------------------------- src/libexpr/primops/flake.hh | 85 +----------------------- src/libexpr/primops/lockfile.cc | 104 ++++++++++++++++++++++++++++++ src/libexpr/primops/lockfile.hh | 110 ++++++++++++++++++++++++++++++++ 4 files changed, 218 insertions(+), 184 deletions(-) create mode 100644 src/libexpr/primops/lockfile.cc create mode 100644 src/libexpr/primops/lockfile.hh diff --git a/src/libexpr/primops/flake.cc b/src/libexpr/primops/flake.cc index d0405a377..b38971c36 100644 --- a/src/libexpr/primops/flake.cc +++ b/src/libexpr/primops/flake.cc @@ -1,4 +1,5 @@ #include "flake.hh" +#include "lockfile.hh" #include "primops.hh" #include "eval-inline.hh" #include "fetchGit.hh" @@ -50,104 +51,6 @@ void writeRegistry(const FlakeRegistry & registry, const Path & path) writeFile(path, json.dump(4)); // The '4' is the number of spaces used in the indentation in the json file. } -AbstractDep::AbstractDep(const nlohmann::json & json) - : ref(json["uri"]) - , narHash(Hash((std::string) json["narHash"])) -{ - if (!ref.isImmutable()) - throw Error("lockfile contains mutable flakeref '%s'", ref); -} - -nlohmann::json AbstractDep::toJson() const -{ - nlohmann::json json; - json["uri"] = ref.to_string(); - json["narHash"] = narHash.to_string(SRI); - return json; -} - -Path AbstractDep::computeStorePath(Store & store) const -{ - return store.makeFixedOutputPath(true, narHash, "source"); -} - -FlakeDep::FlakeDep(const nlohmann::json & json) - : FlakeInputs(json) - , AbstractDep(json) - , id(json["id"]) -{ -} - -nlohmann::json FlakeDep::toJson() const -{ - auto json = FlakeInputs::toJson(); - json.update(AbstractDep::toJson()); - json["id"] = id; - return json; -} - -FlakeInputs::FlakeInputs(const nlohmann::json & json) -{ - auto nonFlakeInputs = json["nonFlakeInputs"]; - for (auto i = nonFlakeInputs.begin(); i != nonFlakeInputs.end(); ++i) - nonFlakeDeps.insert_or_assign(i.key(), NonFlakeDep(*i)); - - auto inputs = json["inputs"]; - for (auto i = inputs.begin(); i != inputs.end(); ++i) - flakeDeps.insert_or_assign(i.key(), FlakeDep(*i)); -} - -nlohmann::json FlakeInputs::toJson() const -{ - nlohmann::json json; - { - auto j = nlohmann::json::object(); - for (auto & i : nonFlakeDeps) - j[i.first] = i.second.toJson(); - json["nonFlakeInputs"] = std::move(j); - } - { - auto j = nlohmann::json::object(); - for (auto & i : flakeDeps) - j[i.first.to_string()] = i.second.toJson(); - json["inputs"] = std::move(j); - } - return json; -} - -nlohmann::json LockFile::toJson() const -{ - auto json = FlakeInputs::toJson(); - json["version"] = 2; - return json; -} - -LockFile readLockFile(const Path & path) -{ - if (pathExists(path)) { - auto json = nlohmann::json::parse(readFile(path)); - - auto version = json.value("version", 0); - if (version != 2) - throw Error("lock file '%s' has unsupported version %d", path, version); - - return LockFile(json); - } else - return LockFile(); -} - -std::ostream & operator <<(std::ostream & stream, const LockFile & lockFile) -{ - stream << lockFile.toJson().dump(4); // '4' = indentation in json file - return stream; -} - -void writeLockFile(const LockFile & lockFile, const Path & path) -{ - createDirs(dirOf(path)); - writeFile(path, fmt("%s\n", lockFile)); -} - Path getUserRegistryPath() { return getHome() + "/.config/nix/registry.json"; @@ -471,7 +374,7 @@ ResolvedFlake resolveFlake(EvalState & state, const FlakeRef & topRef, HandleLoc if (!recreateLockFile(handleLockFile)) { // If recreateLockFile, start with an empty lockfile // FIXME: symlink attack - oldLockFile = readLockFile( + oldLockFile = LockFile::read( state.store->toRealPath(flake.sourceInfo.storePath) + "/" + flake.sourceInfo.resolvedRef.subdir + "/flake.lock"); } @@ -483,7 +386,7 @@ ResolvedFlake resolveFlake(EvalState & state, const FlakeRef & topRef, HandleLoc if (!(lockFile == oldLockFile)) { if (allowedToWrite(handleLockFile)) { if (auto refData = std::get_if(&topRef.data)) { - writeLockFile(lockFile, refData->path + (topRef.subdir == "" ? "" : "/" + topRef.subdir) + "/flake.lock"); + lockFile.write(refData->path + (topRef.subdir == "" ? "" : "/" + topRef.subdir) + "/flake.lock"); // Hack: Make sure that flake.lock is visible to Git, so it ends up in the Nix store. runProgram("git", true, { "-C", refData->path, "add", diff --git a/src/libexpr/primops/flake.hh b/src/libexpr/primops/flake.hh index 933bc2593..bbf35da02 100644 --- a/src/libexpr/primops/flake.hh +++ b/src/libexpr/primops/flake.hh @@ -2,15 +2,12 @@ #include "types.hh" #include "flakeref.hh" - -#include -#include +#include "lockfile.hh" namespace nix { struct Value; class EvalState; -class Store; namespace flake { @@ -40,86 +37,6 @@ enum HandleLockFile : unsigned int , UseNewLockFile // `RecreateLockFile` without writing to file }; -struct AbstractDep -{ - FlakeRef ref; - Hash narHash; - - AbstractDep(const FlakeRef & flakeRef, const Hash & narHash) - : ref(flakeRef), narHash(narHash) {}; - - AbstractDep(const nlohmann::json & json); - - nlohmann::json toJson() const; - - Path computeStorePath(Store & store) const; -}; - -struct NonFlakeDep : AbstractDep -{ - using AbstractDep::AbstractDep; - - bool operator ==(const NonFlakeDep & other) const - { - return ref == other.ref && narHash == other.narHash; - } -}; - -struct FlakeDep; - -struct FlakeInputs -{ - std::map flakeDeps; - std::map nonFlakeDeps; - - FlakeInputs() {}; - FlakeInputs(const nlohmann::json & json); - - nlohmann::json toJson() const; -}; - -struct FlakeDep : FlakeInputs, AbstractDep -{ - FlakeId id; - - FlakeDep(const FlakeId & id, const FlakeRef & flakeRef, const Hash & narHash) - : AbstractDep(flakeRef, narHash), id(id) {}; - - FlakeDep(const nlohmann::json & json); - - bool operator ==(const FlakeDep & other) const - { - return - id == other.id - && ref == other.ref - && narHash == other.narHash - && flakeDeps == other.flakeDeps - && nonFlakeDeps == other.nonFlakeDeps; - } - - nlohmann::json toJson() const; -}; - -struct LockFile : FlakeInputs -{ - bool operator ==(const LockFile & other) const - { - return - flakeDeps == other.flakeDeps - && nonFlakeDeps == other.nonFlakeDeps; - } - - LockFile() {} - LockFile(const nlohmann::json & json) : FlakeInputs(json) {} - LockFile(FlakeDep && dep) - { - flakeDeps = std::move(dep.flakeDeps); - nonFlakeDeps = std::move(dep.nonFlakeDeps); - } - - nlohmann::json toJson() const; -}; - struct SourceInfo { // Immutable flakeref that this source tree was obtained from. diff --git a/src/libexpr/primops/lockfile.cc b/src/libexpr/primops/lockfile.cc new file mode 100644 index 000000000..ea0a93510 --- /dev/null +++ b/src/libexpr/primops/lockfile.cc @@ -0,0 +1,104 @@ +#include "lockfile.hh" +#include "store-api.hh" + +namespace nix::flake { + +AbstractDep::AbstractDep(const nlohmann::json & json) + : ref(json["uri"]) + , narHash(Hash((std::string) json["narHash"])) +{ + if (!ref.isImmutable()) + throw Error("lockfile contains mutable flakeref '%s'", ref); +} + +nlohmann::json AbstractDep::toJson() const +{ + nlohmann::json json; + json["uri"] = ref.to_string(); + json["narHash"] = narHash.to_string(SRI); + return json; +} + +Path AbstractDep::computeStorePath(Store & store) const +{ + return store.makeFixedOutputPath(true, narHash, "source"); +} + +FlakeDep::FlakeDep(const nlohmann::json & json) + : FlakeInputs(json) + , AbstractDep(json) + , id(json["id"]) +{ +} + +nlohmann::json FlakeDep::toJson() const +{ + auto json = FlakeInputs::toJson(); + json.update(AbstractDep::toJson()); + json["id"] = id; + return json; +} + +FlakeInputs::FlakeInputs(const nlohmann::json & json) +{ + auto nonFlakeInputs = json["nonFlakeInputs"]; + for (auto i = nonFlakeInputs.begin(); i != nonFlakeInputs.end(); ++i) + nonFlakeDeps.insert_or_assign(i.key(), NonFlakeDep(*i)); + + auto inputs = json["inputs"]; + for (auto i = inputs.begin(); i != inputs.end(); ++i) + flakeDeps.insert_or_assign(i.key(), FlakeDep(*i)); +} + +nlohmann::json FlakeInputs::toJson() const +{ + nlohmann::json json; + { + auto j = nlohmann::json::object(); + for (auto & i : nonFlakeDeps) + j[i.first] = i.second.toJson(); + json["nonFlakeInputs"] = std::move(j); + } + { + auto j = nlohmann::json::object(); + for (auto & i : flakeDeps) + j[i.first.to_string()] = i.second.toJson(); + json["inputs"] = std::move(j); + } + return json; +} + +nlohmann::json LockFile::toJson() const +{ + auto json = FlakeInputs::toJson(); + json["version"] = 2; + return json; +} + +LockFile LockFile::read(const Path & path) +{ + if (pathExists(path)) { + auto json = nlohmann::json::parse(readFile(path)); + + auto version = json.value("version", 0); + if (version != 2) + throw Error("lock file '%s' has unsupported version %d", path, version); + + return LockFile(json); + } else + return LockFile(); +} + +std::ostream & operator <<(std::ostream & stream, const LockFile & lockFile) +{ + stream << lockFile.toJson().dump(4); // '4' = indentation in json file + return stream; +} + +void LockFile::write(const Path & path) const +{ + createDirs(dirOf(path)); + writeFile(path, fmt("%s\n", *this)); +} + +} diff --git a/src/libexpr/primops/lockfile.hh b/src/libexpr/primops/lockfile.hh new file mode 100644 index 000000000..fee15c1da --- /dev/null +++ b/src/libexpr/primops/lockfile.hh @@ -0,0 +1,110 @@ +#pragma once + +#include "flakeref.hh" + +#include + +namespace nix { +class Store; +} + +namespace nix::flake { + +/* Common lock file information about a flake input, namely the + immutable ref and the NAR hash. */ +struct AbstractDep +{ + FlakeRef ref; + Hash narHash; + + AbstractDep(const FlakeRef & flakeRef, const Hash & narHash) + : ref(flakeRef), narHash(narHash) + { + assert(ref.isImmutable()); + }; + + AbstractDep(const nlohmann::json & json); + + nlohmann::json toJson() const; + + Path computeStorePath(Store & store) const; +}; + +/* Lock file information about a non-flake input. */ +struct NonFlakeDep : AbstractDep +{ + using AbstractDep::AbstractDep; + + bool operator ==(const NonFlakeDep & other) const + { + return ref == other.ref && narHash == other.narHash; + } +}; + +struct FlakeDep; + +/* Lock file information about the dependencies of a flake. */ +struct FlakeInputs +{ + std::map flakeDeps; + std::map nonFlakeDeps; + + FlakeInputs() {}; + FlakeInputs(const nlohmann::json & json); + + nlohmann::json toJson() const; +}; + +/* Lock file information about a flake input. */ +struct FlakeDep : FlakeInputs, AbstractDep +{ + FlakeId id; + + FlakeDep(const FlakeId & id, const FlakeRef & flakeRef, const Hash & narHash) + : AbstractDep(flakeRef, narHash), id(id) {}; + + FlakeDep(const nlohmann::json & json); + + bool operator ==(const FlakeDep & other) const + { + return + id == other.id + && ref == other.ref + && narHash == other.narHash + && flakeDeps == other.flakeDeps + && nonFlakeDeps == other.nonFlakeDeps; + } + + nlohmann::json toJson() const; +}; + +/* An entire lock file. Note that this cannot be a FlakeDep for the + top-level flake, because then the lock file would need to contain + the hash of the top-level flake, but committing the lock file + would invalidate that hash. */ +struct LockFile : FlakeInputs +{ + bool operator ==(const LockFile & other) const + { + return + flakeDeps == other.flakeDeps + && nonFlakeDeps == other.nonFlakeDeps; + } + + LockFile() {} + LockFile(const nlohmann::json & json) : FlakeInputs(json) {} + LockFile(FlakeDep && dep) + { + flakeDeps = std::move(dep.flakeDeps); + nonFlakeDeps = std::move(dep.nonFlakeDeps); + } + + nlohmann::json toJson() const; + + static LockFile read(const Path & path); + + void write(const Path & path) const; +}; + +} + From 5fe7be2409966d673d59d049c3fc6e7710d03b53 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Tue, 4 Jun 2019 20:08:13 +0200 Subject: [PATCH 192/634] Rename dep -> input Also use nlohmann::json range-based for. --- src/libexpr/primops/flake.cc | 34 ++++++++++++------------- src/libexpr/primops/lockfile.cc | 28 ++++++++++----------- src/libexpr/primops/lockfile.hh | 44 ++++++++++++++++----------------- src/nix/installables.cc | 4 +-- 4 files changed, 54 insertions(+), 56 deletions(-) diff --git a/src/libexpr/primops/flake.cc b/src/libexpr/primops/flake.cc index b38971c36..6919bc66b 100644 --- a/src/libexpr/primops/flake.cc +++ b/src/libexpr/primops/flake.cc @@ -316,7 +316,7 @@ bool allowedToUseRegistries(HandleLockFile handle, bool isTopRef) else assert(false); } -static std::pair updateLocks( +static std::pair updateLocks( EvalState & state, const FlakeRef & flakeRef, HandleLockFile handleLockFile, @@ -325,7 +325,7 @@ static std::pair updateLocks( { auto flake = getFlake(state, flakeRef, allowedToUseRegistries(handleLockFile, topRef)); - FlakeDep newEntry( + FlakeInput newEntry( flake.id, flake.sourceInfo.resolvedRef, flake.sourceInfo.narHash); @@ -333,28 +333,28 @@ static std::pair updateLocks( for (auto & input : flake.nonFlakeInputs) { auto & id = input.first; auto & ref = input.second; - auto i = oldEntry.nonFlakeDeps.find(id); - if (i != oldEntry.nonFlakeDeps.end()) { - newEntry.nonFlakeDeps.insert_or_assign(i->first, i->second); + auto i = oldEntry.nonFlakeInputs.find(id); + if (i != oldEntry.nonFlakeInputs.end()) { + newEntry.nonFlakeInputs.insert_or_assign(i->first, i->second); } else { if (handleLockFile == AllPure || handleLockFile == TopRefUsesRegistries) throw Error("cannot update non-flake dependency '%s' in pure mode", id); auto nonFlake = getNonFlake(state, ref, id, allowedToUseRegistries(handleLockFile, false)); - newEntry.nonFlakeDeps.insert_or_assign(id, - NonFlakeDep( + newEntry.nonFlakeInputs.insert_or_assign(id, + NonFlakeInput( nonFlake.sourceInfo.resolvedRef, nonFlake.sourceInfo.narHash)); } } for (auto & inputRef : flake.inputs) { - auto i = oldEntry.flakeDeps.find(inputRef); - if (i != oldEntry.flakeDeps.end()) { - newEntry.flakeDeps.insert_or_assign(inputRef, i->second); + auto i = oldEntry.flakeInputs.find(inputRef); + if (i != oldEntry.flakeInputs.end()) { + newEntry.flakeInputs.insert_or_assign(inputRef, i->second); } else { if (handleLockFile == AllPure || handleLockFile == TopRefUsesRegistries) throw Error("cannot update flake dependency '%s' in pure mode", inputRef); - newEntry.flakeDeps.insert_or_assign(inputRef, + newEntry.flakeInputs.insert_or_assign(inputRef, updateLocks(state, inputRef, handleLockFile, {}, false).second); } } @@ -434,7 +434,7 @@ static void emitSourceInfoAttrs(EvalState & state, const SourceInfo & sourceInfo it doesn't appear in 'builtins'. */ static void prim_callFlake(EvalState & state, const Pos & pos, Value * * args, Value & v) { - auto lazyFlake = (FlakeDep *) args[0]->attrs; + auto lazyFlake = (FlakeInput *) args[0]->attrs; auto flake = getFlake(state, lazyFlake->ref, false); callFlake(state, flake, *lazyFlake, v); } @@ -448,10 +448,10 @@ void callFlake(EvalState & state, // ...}'. This attrset is passed lazily as an argument to 'outputs'. state.mkAttrs(v, - inputs.flakeDeps.size() + - inputs.nonFlakeDeps.size() + 8); + inputs.flakeInputs.size() + + inputs.nonFlakeInputs.size() + 8); - for (auto & dep : inputs.flakeDeps) { + for (auto & dep : inputs.flakeInputs) { auto vFlake = state.allocAttr(v, dep.second.id); auto vPrimOp = state.allocValue(); static auto primOp = new PrimOp(prim_callFlake, 1, state.symbols.create("callFlake")); @@ -460,11 +460,11 @@ void callFlake(EvalState & state, auto vArg = state.allocValue(); vArg->type = tNull; // FIXME: leak - vArg->attrs = (Bindings *) new FlakeDep(dep.second); // evil! also inefficient + vArg->attrs = (Bindings *) new FlakeInput(dep.second); // evil! also inefficient mkApp(*vFlake, *vPrimOp, *vArg); } - for (auto & dep : inputs.nonFlakeDeps) { + for (auto & dep : inputs.nonFlakeInputs) { auto vNonFlake = state.allocAttr(v, dep.first); state.mkAttrs(*vNonFlake, 8); diff --git a/src/libexpr/primops/lockfile.cc b/src/libexpr/primops/lockfile.cc index ea0a93510..97c748c66 100644 --- a/src/libexpr/primops/lockfile.cc +++ b/src/libexpr/primops/lockfile.cc @@ -3,7 +3,7 @@ namespace nix::flake { -AbstractDep::AbstractDep(const nlohmann::json & json) +AbstractInput::AbstractInput(const nlohmann::json & json) : ref(json["uri"]) , narHash(Hash((std::string) json["narHash"])) { @@ -11,7 +11,7 @@ AbstractDep::AbstractDep(const nlohmann::json & json) throw Error("lockfile contains mutable flakeref '%s'", ref); } -nlohmann::json AbstractDep::toJson() const +nlohmann::json AbstractInput::toJson() const { nlohmann::json json; json["uri"] = ref.to_string(); @@ -19,35 +19,33 @@ nlohmann::json AbstractDep::toJson() const return json; } -Path AbstractDep::computeStorePath(Store & store) const +Path AbstractInput::computeStorePath(Store & store) const { return store.makeFixedOutputPath(true, narHash, "source"); } -FlakeDep::FlakeDep(const nlohmann::json & json) +FlakeInput::FlakeInput(const nlohmann::json & json) : FlakeInputs(json) - , AbstractDep(json) + , AbstractInput(json) , id(json["id"]) { } -nlohmann::json FlakeDep::toJson() const +nlohmann::json FlakeInput::toJson() const { auto json = FlakeInputs::toJson(); - json.update(AbstractDep::toJson()); + json.update(AbstractInput::toJson()); json["id"] = id; return json; } FlakeInputs::FlakeInputs(const nlohmann::json & json) { - auto nonFlakeInputs = json["nonFlakeInputs"]; - for (auto i = nonFlakeInputs.begin(); i != nonFlakeInputs.end(); ++i) - nonFlakeDeps.insert_or_assign(i.key(), NonFlakeDep(*i)); + for (auto & i : json["nonFlakeInputs"].items()) + nonFlakeInputs.insert_or_assign(i.key(), NonFlakeInput(i.value())); - auto inputs = json["inputs"]; - for (auto i = inputs.begin(); i != inputs.end(); ++i) - flakeDeps.insert_or_assign(i.key(), FlakeDep(*i)); + for (auto & i : json["inputs"].items()) + flakeInputs.insert_or_assign(i.key(), FlakeInput(i.value())); } nlohmann::json FlakeInputs::toJson() const @@ -55,13 +53,13 @@ nlohmann::json FlakeInputs::toJson() const nlohmann::json json; { auto j = nlohmann::json::object(); - for (auto & i : nonFlakeDeps) + for (auto & i : nonFlakeInputs) j[i.first] = i.second.toJson(); json["nonFlakeInputs"] = std::move(j); } { auto j = nlohmann::json::object(); - for (auto & i : flakeDeps) + for (auto & i : flakeInputs) j[i.first.to_string()] = i.second.toJson(); json["inputs"] = std::move(j); } diff --git a/src/libexpr/primops/lockfile.hh b/src/libexpr/primops/lockfile.hh index fee15c1da..f2e598528 100644 --- a/src/libexpr/primops/lockfile.hh +++ b/src/libexpr/primops/lockfile.hh @@ -12,18 +12,18 @@ namespace nix::flake { /* Common lock file information about a flake input, namely the immutable ref and the NAR hash. */ -struct AbstractDep +struct AbstractInput { FlakeRef ref; Hash narHash; - AbstractDep(const FlakeRef & flakeRef, const Hash & narHash) + AbstractInput(const FlakeRef & flakeRef, const Hash & narHash) : ref(flakeRef), narHash(narHash) { assert(ref.isImmutable()); }; - AbstractDep(const nlohmann::json & json); + AbstractInput(const nlohmann::json & json); nlohmann::json toJson() const; @@ -31,23 +31,23 @@ struct AbstractDep }; /* Lock file information about a non-flake input. */ -struct NonFlakeDep : AbstractDep +struct NonFlakeInput : AbstractInput { - using AbstractDep::AbstractDep; + using AbstractInput::AbstractInput; - bool operator ==(const NonFlakeDep & other) const + bool operator ==(const NonFlakeInput & other) const { return ref == other.ref && narHash == other.narHash; } }; -struct FlakeDep; +struct FlakeInput; /* Lock file information about the dependencies of a flake. */ struct FlakeInputs { - std::map flakeDeps; - std::map nonFlakeDeps; + std::map flakeInputs; + std::map nonFlakeInputs; FlakeInputs() {}; FlakeInputs(const nlohmann::json & json); @@ -56,29 +56,29 @@ struct FlakeInputs }; /* Lock file information about a flake input. */ -struct FlakeDep : FlakeInputs, AbstractDep +struct FlakeInput : FlakeInputs, AbstractInput { FlakeId id; - FlakeDep(const FlakeId & id, const FlakeRef & flakeRef, const Hash & narHash) - : AbstractDep(flakeRef, narHash), id(id) {}; + FlakeInput(const FlakeId & id, const FlakeRef & flakeRef, const Hash & narHash) + : AbstractInput(flakeRef, narHash), id(id) {}; - FlakeDep(const nlohmann::json & json); + FlakeInput(const nlohmann::json & json); - bool operator ==(const FlakeDep & other) const + bool operator ==(const FlakeInput & other) const { return id == other.id && ref == other.ref && narHash == other.narHash - && flakeDeps == other.flakeDeps - && nonFlakeDeps == other.nonFlakeDeps; + && flakeInputs == other.flakeInputs + && nonFlakeInputs == other.nonFlakeInputs; } nlohmann::json toJson() const; }; -/* An entire lock file. Note that this cannot be a FlakeDep for the +/* An entire lock file. Note that this cannot be a FlakeInput for the top-level flake, because then the lock file would need to contain the hash of the top-level flake, but committing the lock file would invalidate that hash. */ @@ -87,16 +87,16 @@ struct LockFile : FlakeInputs bool operator ==(const LockFile & other) const { return - flakeDeps == other.flakeDeps - && nonFlakeDeps == other.nonFlakeDeps; + flakeInputs == other.flakeInputs + && nonFlakeInputs == other.nonFlakeInputs; } LockFile() {} LockFile(const nlohmann::json & json) : FlakeInputs(json) {} - LockFile(FlakeDep && dep) + LockFile(FlakeInput && dep) { - flakeDeps = std::move(dep.flakeDeps); - nonFlakeDeps = std::move(dep.nonFlakeDeps); + flakeInputs = std::move(dep.flakeInputs); + nonFlakeInputs = std::move(dep.nonFlakeInputs); } nlohmann::json toJson() const; diff --git a/src/nix/installables.cc b/src/nix/installables.cc index c44a37f1e..e7549b57c 100644 --- a/src/nix/installables.cc +++ b/src/nix/installables.cc @@ -206,11 +206,11 @@ void makeFlakeClosureGCRoot(Store & store, queue.pop(); /* Note: due to lazy fetching, these paths might not exist yet. */ - for (auto & dep : flake.flakeDeps) { + for (auto & dep : flake.flakeInputs) { closure.insert(dep.second.computeStorePath(store)); queue.push(dep.second); } - for (auto & dep : flake.nonFlakeDeps) + for (auto & dep : flake.nonFlakeInputs) closure.insert(dep.second.computeStorePath(store)); } From 4ec1a9ab40705b651fe4b2ef6186c0a6ae279681 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Tue, 4 Jun 2019 20:33:49 +0200 Subject: [PATCH 193/634] Fix test --- tests/flakes.sh | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tests/flakes.sh b/tests/flakes.sh index f44b9509f..f9f8e5272 100644 --- a/tests/flakes.sh +++ b/tests/flakes.sh @@ -245,6 +245,8 @@ git -C $flake3Dir commit -m 'Add nonFlakeInputs' # Check whether `nix build` works with a lockfile which is missing a nonFlakeInputs nix build -o $TEST_ROOT/result --flake-registry $registry $flake3Dir:sth +git -C $flake3Dir commit -m 'Update nonFlakeInputs' + # Check whether flake input fetching is lazy: flake3:sth does not # depend on flake2, so this shouldn't fail. rm -rf $TEST_HOME/.cache From 45b5c606ac44550de14562df4fa99773a81a1015 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Tue, 4 Jun 2019 20:34:08 +0200 Subject: [PATCH 194/634] Don't register invalid paths as GC roots Unfortunately this doesn't work. Maybe we should keep separate roots for each path. --- src/nix/installables.cc | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/src/nix/installables.cc b/src/nix/installables.cc index e7549b57c..ca88ec0da 100644 --- a/src/nix/installables.cc +++ b/src/nix/installables.cc @@ -207,11 +207,16 @@ void makeFlakeClosureGCRoot(Store & store, /* Note: due to lazy fetching, these paths might not exist yet. */ for (auto & dep : flake.flakeInputs) { - closure.insert(dep.second.computeStorePath(store)); + auto path = dep.second.computeStorePath(store); + if (store.isValidPath(path)) + closure.insert(path); queue.push(dep.second); } - for (auto & dep : flake.nonFlakeInputs) - closure.insert(dep.second.computeStorePath(store)); + for (auto & dep : flake.nonFlakeInputs) { + auto path = dep.second.computeStorePath(store); + if (store.isValidPath(path)) + closure.insert(path); + } } if (closure.empty()) return; From 1c5067b9a7e1f561bf9e9e84642c495a50ca44a7 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Tue, 4 Jun 2019 20:34:44 +0200 Subject: [PATCH 195/634] Check hash --- src/libexpr/primops/flake.cc | 6 ++++-- src/libexpr/primops/lockfile.hh | 2 ++ 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/src/libexpr/primops/flake.cc b/src/libexpr/primops/flake.cc index 6919bc66b..52d8df69e 100644 --- a/src/libexpr/primops/flake.cc +++ b/src/libexpr/primops/flake.cc @@ -409,8 +409,6 @@ static void emitSourceInfoAttrs(EvalState & state, const SourceInfo & sourceInfo { auto & path = sourceInfo.storePath; assert(state.store->isValidPath(path)); - // FIXME: turn into fetchGit etc. - // FIXME: check narHash. mkString(*state.allocAttr(vAttrs, state.sOutPath), path, {path}); if (sourceInfo.resolvedRef.rev) { @@ -436,6 +434,10 @@ static void prim_callFlake(EvalState & state, const Pos & pos, Value * * args, V { auto lazyFlake = (FlakeInput *) args[0]->attrs; auto flake = getFlake(state, lazyFlake->ref, false); + + if (flake.sourceInfo.narHash != lazyFlake->narHash) + throw Error("the content hash of flake '%s' doesn't match the hash recorded in the referring lockfile", flake.sourceInfo.resolvedRef); + callFlake(state, flake, *lazyFlake, v); } diff --git a/src/libexpr/primops/lockfile.hh b/src/libexpr/primops/lockfile.hh index f2e598528..b76124190 100644 --- a/src/libexpr/primops/lockfile.hh +++ b/src/libexpr/primops/lockfile.hh @@ -106,5 +106,7 @@ struct LockFile : FlakeInputs void write(const Path & path) const; }; +std::ostream & operator <<(std::ostream & stream, const LockFile & lockFile); + } From 1e53a07712fba830eb3967cc16894992d5a33922 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Tue, 4 Jun 2019 20:56:13 +0200 Subject: [PATCH 196/634] Make non-flake inputs lazy Also add a proper test for non-flake inputs. --- src/libexpr/primops/flake.cc | 44 ++++++++++++++++++++++++------------ src/libexpr/primops/flake.hh | 1 - src/nix/flake.cc | 2 +- tests/flakes.sh | 25 +++++++++++++++++--- 4 files changed, 53 insertions(+), 19 deletions(-) diff --git a/src/libexpr/primops/flake.cc b/src/libexpr/primops/flake.cc index 52d8df69e..f99738db5 100644 --- a/src/libexpr/primops/flake.cc +++ b/src/libexpr/primops/flake.cc @@ -276,7 +276,7 @@ Flake getFlake(EvalState & state, const FlakeRef & flakeRef, bool impureIsAllowe } // Get the `NonFlake` corresponding to a `FlakeRef`. -NonFlake getNonFlake(EvalState & state, const FlakeRef & flakeRef, FlakeAlias alias, bool impureIsAllowed = false) +NonFlake getNonFlake(EvalState & state, const FlakeRef & flakeRef, bool impureIsAllowed = false) { auto sourceInfo = fetchFlake(state, flakeRef, impureIsAllowed); debug("got non-flake source '%s' with flakeref %s", sourceInfo.storePath, sourceInfo.resolvedRef.to_string()); @@ -290,8 +290,6 @@ NonFlake getNonFlake(EvalState & state, const FlakeRef & flakeRef, FlakeAlias al if (state.allowedPaths) state.allowedPaths->insert(nonFlake.sourceInfo.storePath); - nonFlake.alias = alias; - return nonFlake; } @@ -339,7 +337,7 @@ static std::pair updateLocks( } else { if (handleLockFile == AllPure || handleLockFile == TopRefUsesRegistries) throw Error("cannot update non-flake dependency '%s' in pure mode", id); - auto nonFlake = getNonFlake(state, ref, id, allowedToUseRegistries(handleLockFile, false)); + auto nonFlake = getNonFlake(state, ref, allowedToUseRegistries(handleLockFile, false)); newEntry.nonFlakeInputs.insert_or_assign(id, NonFlakeInput( nonFlake.sourceInfo.resolvedRef, @@ -441,6 +439,25 @@ static void prim_callFlake(EvalState & state, const Pos & pos, Value * * args, V callFlake(state, flake, *lazyFlake, v); } +static void prim_callNonFlake(EvalState & state, const Pos & pos, Value * * args, Value & v) +{ + auto lazyNonFlake = (NonFlakeInput *) args[0]->attrs; + + auto nonFlake = getNonFlake(state, lazyNonFlake->ref); + + if (nonFlake.sourceInfo.narHash != lazyNonFlake->narHash) + throw Error("the content hash of repository '%s' doesn't match the hash recorded in the referring lockfile", nonFlake.sourceInfo.resolvedRef); + + state.mkAttrs(v, 8); + + assert(state.store->isValidPath(nonFlake.sourceInfo.storePath)); + + mkString(*state.allocAttr(v, state.sOutPath), + nonFlake.sourceInfo.storePath, {nonFlake.sourceInfo.storePath}); + + emitSourceInfoAttrs(state, nonFlake.sourceInfo, v); +} + void callFlake(EvalState & state, const Flake & flake, const FlakeInputs & inputs, @@ -468,16 +485,15 @@ void callFlake(EvalState & state, for (auto & dep : inputs.nonFlakeInputs) { auto vNonFlake = state.allocAttr(v, dep.first); - state.mkAttrs(*vNonFlake, 8); - - auto nonFlake = getNonFlake(state, dep.second.ref, dep.first); - - assert(state.store->isValidPath(nonFlake.sourceInfo.storePath)); - - mkString(*state.allocAttr(*vNonFlake, state.sOutPath), - nonFlake.sourceInfo.storePath, {nonFlake.sourceInfo.storePath}); - - emitSourceInfoAttrs(state, nonFlake.sourceInfo, *vNonFlake); + auto vPrimOp = state.allocValue(); + static auto primOp = new PrimOp(prim_callNonFlake, 1, state.symbols.create("callNonFlake")); + vPrimOp->type = tPrimOp; + vPrimOp->primOp = primOp; + auto vArg = state.allocValue(); + vArg->type = tNull; + // FIXME: leak + vArg->attrs = (Bindings *) new NonFlakeInput(dep.second); // evil! also inefficient + mkApp(*vNonFlake, *vPrimOp, *vArg); } mkString(*state.allocAttr(v, state.sDescription), flake.description); diff --git a/src/libexpr/primops/flake.hh b/src/libexpr/primops/flake.hh index bbf35da02..b8d0da252 100644 --- a/src/libexpr/primops/flake.hh +++ b/src/libexpr/primops/flake.hh @@ -75,7 +75,6 @@ struct Flake struct NonFlake { - FlakeAlias alias; FlakeRef originalRef; SourceInfo sourceInfo; NonFlake(const FlakeRef & origRef, const SourceInfo & sourceInfo) diff --git a/src/nix/flake.cc b/src/nix/flake.cc index d229c7512..653154aaa 100644 --- a/src/nix/flake.cc +++ b/src/nix/flake.cc @@ -122,6 +122,7 @@ static nlohmann::json flakeToJson(const Flake & flake) return j; } +#if 0 static void printNonFlakeInfo(const NonFlake & nonFlake) { std::cout << fmt("ID: %s\n", nonFlake.alias); @@ -136,7 +137,6 @@ static nlohmann::json nonFlakeToJson(const NonFlake & nonFlake) return j; } -#if 0 // FIXME: merge info CmdFlakeInfo? struct CmdFlakeDeps : FlakeCommand { diff --git a/tests/flakes.sh b/tests/flakes.sh index f9f8e5272..1cd8259b9 100644 --- a/tests/flakes.sh +++ b/tests/flakes.sh @@ -83,7 +83,7 @@ git -C $flake3Dir add flake.nix git -C $flake3Dir commit -m 'Initial' cat > $nonFlakeDir/README.md < $flake3Dir/flake.nix < \$out + ''; + }; }; } EOF -git -C $flake3Dir add flake.nix +cp ./config.nix $flake3Dir + +git -C $flake3Dir add flake.nix config.nix git -C $flake3Dir commit -m 'Add nonFlakeInputs' -# Check whether `nix build` works with a lockfile which is missing a nonFlakeInputs +# Check whether `nix build` works with a lockfile which is missing a +# nonFlakeInputs. nix build -o $TEST_ROOT/result --flake-registry $registry $flake3Dir:sth git -C $flake3Dir commit -m 'Update nonFlakeInputs' +nix build -o $TEST_ROOT/result --flake-registry $registry flake3:fnord +[[ $(cat $TEST_ROOT/result) = FNORD ]] + # Check whether flake input fetching is lazy: flake3:sth does not # depend on flake2, so this shouldn't fail. rm -rf $TEST_HOME/.cache clearStore mv $flake2Dir $flake2Dir.tmp +mv $nonFlakeDir $nonFlakeDir.tmp nix build -o $TEST_ROOT/result --flake-registry $registry flake3:sth (! nix build -o $TEST_ROOT/result --flake-registry $registry flake3:xyzzy) +(! nix build -o $TEST_ROOT/result --flake-registry $registry flake3:fnord) mv $flake2Dir.tmp $flake2Dir +mv $nonFlakeDir.tmp $nonFlakeDir +nix build -o $TEST_ROOT/result --flake-registry $registry flake3:xyzzy flake3:fnord From 4d31cf83f288ed2e4a14f72e99b77859de981bb4 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Tue, 4 Jun 2019 20:57:18 +0200 Subject: [PATCH 197/634] Update flake.lock --- flake.lock | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/flake.lock b/flake.lock index b85571b61..727d16d73 100644 --- a/flake.lock +++ b/flake.lock @@ -1,10 +1,13 @@ { "inputs": { "nixpkgs": { - "narHash": "sha256-rMiWaLXkhizEEMEeMDutUl0Y/c+VEjfjvMkvBwvuQJU=", - "uri": "github:edolstra/nixpkgs/eeeffd24cd7e407cfaa99e98cfbb8f93bf4cc033" + "id": "nixpkgs", + "inputs": {}, + "narHash": "sha256-eYtxncIMFVmOHaHBtTdPGcs/AnJqKqA6tHCm0UmPYQU=", + "nonFlakeInputs": {}, + "uri": "github:edolstra/nixpkgs/e9d5882bb861dc48f8d46960e7c820efdbe8f9c1" } }, "nonFlakeInputs": {}, - "version": 1 + "version": 2 } From 087530dec40bd5ab23fe2da83fca517bb91d2282 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Tue, 4 Jun 2019 21:07:55 +0200 Subject: [PATCH 198/634] Add comments --- src/libexpr/primops/flake.cc | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-) diff --git a/src/libexpr/primops/flake.cc b/src/libexpr/primops/flake.cc index f99738db5..000a43764 100644 --- a/src/libexpr/primops/flake.cc +++ b/src/libexpr/primops/flake.cc @@ -314,6 +314,15 @@ bool allowedToUseRegistries(HandleLockFile handle, bool isTopRef) else assert(false); } +/* Given a flakeref and its subtree of the lockfile, return an updated + subtree of the lockfile. That is, if the 'flake.nix' of the + referenced flake has inputs that don't have a corresponding entry + in the lockfile, they're added to the lockfile; conversely, any + lockfile entries that don't have a corresponding entry in flake.nix + are removed. + + Note that this is lazy: we only recursively fetch inputs that are + not in the lockfile yet. */ static std::pair updateLocks( EvalState & state, const FlakeRef & flakeRef, @@ -360,9 +369,8 @@ static std::pair updateLocks( return {flake, newEntry}; } -/* Given a flake reference, recursively fetch it and its dependencies. - FIXME: this should return a graph of flakes. -*/ +/* Compute an in-memory lockfile for the specified top-level flake, + and optionally write it to file, it the flake is writable. */ ResolvedFlake resolveFlake(EvalState & state, const FlakeRef & topRef, HandleLockFile handleLockFile) { auto flake = getFlake(state, topRef, allowedToUseRegistries(handleLockFile, true)); From ce225615c3ee08b7b63a8488dbf74ff2598d8d74 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Tue, 4 Jun 2019 21:10:53 +0200 Subject: [PATCH 199/634] Eliminate duplicate fetching of the top-level flake --- src/libexpr/primops/flake.cc | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/src/libexpr/primops/flake.cc b/src/libexpr/primops/flake.cc index 000a43764..734f650f2 100644 --- a/src/libexpr/primops/flake.cc +++ b/src/libexpr/primops/flake.cc @@ -325,13 +325,11 @@ bool allowedToUseRegistries(HandleLockFile handle, bool isTopRef) not in the lockfile yet. */ static std::pair updateLocks( EvalState & state, - const FlakeRef & flakeRef, + const Flake & flake, HandleLockFile handleLockFile, const FlakeInputs & oldEntry, bool topRef) { - auto flake = getFlake(state, flakeRef, allowedToUseRegistries(handleLockFile, topRef)); - FlakeInput newEntry( flake.id, flake.sourceInfo.resolvedRef, @@ -362,7 +360,9 @@ static std::pair updateLocks( if (handleLockFile == AllPure || handleLockFile == TopRefUsesRegistries) throw Error("cannot update flake dependency '%s' in pure mode", inputRef); newEntry.flakeInputs.insert_or_assign(inputRef, - updateLocks(state, inputRef, handleLockFile, {}, false).second); + updateLocks(state, + getFlake(state, inputRef, allowedToUseRegistries(handleLockFile, false)), + handleLockFile, {}, false).second); } } @@ -385,9 +385,8 @@ ResolvedFlake resolveFlake(EvalState & state, const FlakeRef & topRef, HandleLoc + "/" + flake.sourceInfo.resolvedRef.subdir + "/flake.lock"); } - // FIXME: get rid of duplicate getFlake call LockFile lockFile(updateLocks( - state, topRef, handleLockFile, oldLockFile, true).second); + state, flake, handleLockFile, oldLockFile, true).second); if (!(lockFile == oldLockFile)) { if (allowedToWrite(handleLockFile)) { From 1b057929885fd3f339d4c85b44ad9f10fef7d8a9 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Tue, 4 Jun 2019 22:35:43 +0200 Subject: [PATCH 200/634] Shorter syntax for referencing flake outputs Fixes #2819. --- src/libexpr/primops/flake.cc | 16 +++++++++++++--- tests/flakes.sh | 12 ++++++------ 2 files changed, 19 insertions(+), 9 deletions(-) diff --git a/src/libexpr/primops/flake.cc b/src/libexpr/primops/flake.cc index 734f650f2..793d6da35 100644 --- a/src/libexpr/primops/flake.cc +++ b/src/libexpr/primops/flake.cc @@ -468,10 +468,12 @@ static void prim_callNonFlake(EvalState & state, const Pos & pos, Value * * args void callFlake(EvalState & state, const Flake & flake, const FlakeInputs & inputs, - Value & v) + Value & vRes) { - // Construct the resulting attrset '{description, outputs, - // ...}'. This attrset is passed lazily as an argument to 'outputs'. + // Construct the resulting attrset '{outputs, ...}'. This attrset + // is passed lazily as an argument to the 'outputs' function. + + auto & v = *state.allocValue(); state.mkAttrs(v, inputs.flakeInputs.size() + @@ -513,6 +515,14 @@ void callFlake(EvalState & state, v.attrs->push_back(Attr(state.symbols.create("self"), &v)); v.attrs->sort(); + + /* For convenience, put the outputs directly in the result, so you + can refer to an output of an input as 'inputs.foo.bar' rather + than 'inputs.foo.outputs.bar'. */ + auto v2 = *state.allocValue(); + state.eval(state.parseExprFromString("res: res.outputs // res", "/"), v2); + + state.callFunction(v2, v, vRes, noPos); } void callFlake(EvalState & state, diff --git a/tests/flakes.sh b/tests/flakes.sh index 1cd8259b9..c380b405b 100644 --- a/tests/flakes.sh +++ b/tests/flakes.sh @@ -55,7 +55,7 @@ cat > $flake2Dir/flake.nix < $flake3Dir/flake.nix < $flake3Dir/flake.nix < $flake3Dir/flake.nix < Date: Wed, 5 Jun 2019 16:51:54 +0200 Subject: [PATCH 201/634] Move flake-related stuff to src/libexpr/flake --- src/libexpr/eval.cc | 2 +- src/libexpr/{primops => flake}/flake.cc | 2 +- src/libexpr/{primops => flake}/flake.hh | 0 src/libexpr/{primops => flake}/flakeref.cc | 0 src/libexpr/{primops => flake}/flakeref.hh | 0 src/libexpr/{primops => flake}/lockfile.cc | 0 src/libexpr/{primops => flake}/lockfile.hh | 0 src/libexpr/local.mk | 7 ++++++- src/nix/flake.cc | 2 +- src/nix/installables.cc | 2 +- 10 files changed, 10 insertions(+), 5 deletions(-) rename src/libexpr/{primops => flake}/flake.cc (99%) rename src/libexpr/{primops => flake}/flake.hh (100%) rename src/libexpr/{primops => flake}/flakeref.cc (100%) rename src/libexpr/{primops => flake}/flakeref.hh (100%) rename src/libexpr/{primops => flake}/lockfile.cc (100%) rename src/libexpr/{primops => flake}/lockfile.hh (100%) diff --git a/src/libexpr/eval.cc b/src/libexpr/eval.cc index 0f8a105b1..46c622ee8 100644 --- a/src/libexpr/eval.cc +++ b/src/libexpr/eval.cc @@ -7,7 +7,7 @@ #include "eval-inline.hh" #include "download.hh" #include "json.hh" -#include "primops/flake.hh" +#include "flake/flake.hh" #include #include diff --git a/src/libexpr/primops/flake.cc b/src/libexpr/flake/flake.cc similarity index 99% rename from src/libexpr/primops/flake.cc rename to src/libexpr/flake/flake.cc index 793d6da35..bb0543541 100644 --- a/src/libexpr/primops/flake.cc +++ b/src/libexpr/flake/flake.cc @@ -2,7 +2,7 @@ #include "lockfile.hh" #include "primops.hh" #include "eval-inline.hh" -#include "fetchGit.hh" +#include "primops/fetchGit.hh" #include "download.hh" #include "args.hh" diff --git a/src/libexpr/primops/flake.hh b/src/libexpr/flake/flake.hh similarity index 100% rename from src/libexpr/primops/flake.hh rename to src/libexpr/flake/flake.hh diff --git a/src/libexpr/primops/flakeref.cc b/src/libexpr/flake/flakeref.cc similarity index 100% rename from src/libexpr/primops/flakeref.cc rename to src/libexpr/flake/flakeref.cc diff --git a/src/libexpr/primops/flakeref.hh b/src/libexpr/flake/flakeref.hh similarity index 100% rename from src/libexpr/primops/flakeref.hh rename to src/libexpr/flake/flakeref.hh diff --git a/src/libexpr/primops/lockfile.cc b/src/libexpr/flake/lockfile.cc similarity index 100% rename from src/libexpr/primops/lockfile.cc rename to src/libexpr/flake/lockfile.cc diff --git a/src/libexpr/primops/lockfile.hh b/src/libexpr/flake/lockfile.hh similarity index 100% rename from src/libexpr/primops/lockfile.hh rename to src/libexpr/flake/lockfile.hh diff --git a/src/libexpr/local.mk b/src/libexpr/local.mk index ccd5293e4..a9cb6b7b6 100644 --- a/src/libexpr/local.mk +++ b/src/libexpr/local.mk @@ -4,7 +4,12 @@ libexpr_NAME = libnixexpr libexpr_DIR := $(d) -libexpr_SOURCES := $(wildcard $(d)/*.cc) $(wildcard $(d)/primops/*.cc) $(d)/lexer-tab.cc $(d)/parser-tab.cc +libexpr_SOURCES := \ + $(wildcard $(d)/*.cc) \ + $(wildcard $(d)/primops/*.cc) \ + $(wildcard $(d)/flake/*.cc) \ + $(d)/lexer-tab.cc \ + $(d)/parser-tab.cc libexpr_LIBS = libutil libstore diff --git a/src/nix/flake.cc b/src/nix/flake.cc index 653154aaa..af1a361b3 100644 --- a/src/nix/flake.cc +++ b/src/nix/flake.cc @@ -4,7 +4,7 @@ #include "progress-bar.hh" #include "eval.hh" #include "eval-inline.hh" -#include "primops/flake.hh" +#include "flake/flake.hh" #include "get-drvs.hh" #include "store-api.hh" diff --git a/src/nix/installables.cc b/src/nix/installables.cc index ca88ec0da..a85295a09 100644 --- a/src/nix/installables.cc +++ b/src/nix/installables.cc @@ -7,7 +7,7 @@ #include "get-drvs.hh" #include "store-api.hh" #include "shared.hh" -#include "primops/flake.hh" +#include "flake/flake.hh" #include #include From 6644b6099be2d3393206bf1c9c091c888c0a0f57 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Fri, 7 Jun 2019 22:25:48 +0200 Subject: [PATCH 202/634] Add flake evaluation cache This exploits the hermetic nature of flake evaluation to speed up repeated evaluations of a flake output attribute. For example (doing 'nix build' on an already present package): $ time nix build nixpkgs:firefox real 0m1.497s user 0m1.160s sys 0m0.139s $ time nix build nixpkgs:firefox real 0m0.052s user 0m0.038s sys 0m0.007s The cache is ~/.cache/nix/eval-cache-v1.sqlite, which has entries like INSERT INTO Attributes VALUES( X'92a907d4efe933af2a46959b082cdff176aa5bfeb47a98fabd234809a67ab195', 'packages.firefox', 1, '/nix/store/pbalzf8x19hckr8cwdv62rd6g0lqgc38-firefox-67.0.drv /nix/store/g6q0gx0v6xvdnizp8lrcw7c4gdkzana0-firefox-67.0 out'); where the hash 92a9... is a fingerprint over the flake store path and the contents of the lockfile. Because flakes are evaluated in pure mode, this uniquely identifies the evaluation result. --- src/libexpr/flake/eval-cache.cc | 111 ++++++++++++++++++++++++ src/libexpr/flake/eval-cache.hh | 39 +++++++++ src/libexpr/flake/flake.cc | 9 ++ src/libexpr/flake/flake.hh | 6 ++ src/libstore/local-store.cc | 9 +- src/libstore/nar-info-disk-cache.cc | 7 +- src/libstore/sqlite.cc | 25 +++++- src/libstore/sqlite.hh | 6 +- src/nix/installables.cc | 130 +++++++++++++++++++++------- 9 files changed, 293 insertions(+), 49 deletions(-) create mode 100644 src/libexpr/flake/eval-cache.cc create mode 100644 src/libexpr/flake/eval-cache.hh diff --git a/src/libexpr/flake/eval-cache.cc b/src/libexpr/flake/eval-cache.cc new file mode 100644 index 000000000..fece1a2b5 --- /dev/null +++ b/src/libexpr/flake/eval-cache.cc @@ -0,0 +1,111 @@ +#include "eval-cache.hh" +#include "sqlite.hh" + +#include + +namespace nix::flake { + +static const char * schema = R"sql( + +create table if not exists Fingerprints ( + fingerprint blob primary key not null, + timestamp integer not null +); + +create table if not exists Attributes ( + fingerprint blob not null, + attrPath text not null, + type integer, + value text, + primary key (fingerprint, attrPath), + foreign key (fingerprint) references Fingerprints(fingerprint) on delete cascade +); +)sql"; + +struct EvalCache::State +{ + SQLite db; + SQLiteStmt insertFingerprint; + SQLiteStmt insertAttribute; + SQLiteStmt queryAttribute; + std::set fingerprints; +}; + +EvalCache::EvalCache() + : _state(std::make_unique>()) +{ + auto state(_state->lock()); + + Path dbPath = getCacheDir() + "/nix/eval-cache-v1.sqlite"; + createDirs(dirOf(dbPath)); + + state->db = SQLite(dbPath); + state->db.isCache(); + state->db.exec(schema); + + state->insertFingerprint.create(state->db, + "insert or ignore into Fingerprints(fingerprint, timestamp) values (?, ?)"); + + state->insertAttribute.create(state->db, + "insert or replace into Attributes(fingerprint, attrPath, type, value) values (?, ?, ?, ?)"); + + state->queryAttribute.create(state->db, + "select type, value from Attributes where fingerprint = ? and attrPath = ?"); +} + +enum ValueType { + Derivation = 1, +}; + +void EvalCache::addDerivation( + const Fingerprint & fingerprint, + const std::string & attrPath, + const Derivation & drv) +{ + auto state(_state->lock()); + + if (state->fingerprints.insert(fingerprint).second) + // FIXME: update timestamp + state->insertFingerprint.use() + (fingerprint.hash, fingerprint.hashSize) + (time(0)).exec(); + + state->insertAttribute.use() + (fingerprint.hash, fingerprint.hashSize) + (attrPath) + (ValueType::Derivation) + (drv.drvPath + " " + drv.outPath + " " + drv.outputName).exec(); +} + +std::optional EvalCache::getDerivation( + const Fingerprint & fingerprint, + const std::string & attrPath) +{ + auto state(_state->lock()); + + auto queryAttribute(state->queryAttribute.use() + (fingerprint.hash, fingerprint.hashSize) + (attrPath)); + if (!queryAttribute.next()) return {}; + + // FIXME: handle negative results + + auto type = (ValueType) queryAttribute.getInt(0); + auto s = queryAttribute.getStr(1); + + if (type != ValueType::Derivation) return {}; + + auto ss = tokenizeString>(s, " "); + + debug("evaluation cache hit for '%s'", attrPath); + + return Derivation { ss[0], ss[1], ss[2] }; +} + +EvalCache & EvalCache::singleton() +{ + static std::unique_ptr evalCache(new EvalCache()); + return *evalCache; +} + +} diff --git a/src/libexpr/flake/eval-cache.hh b/src/libexpr/flake/eval-cache.hh new file mode 100644 index 000000000..03aea142e --- /dev/null +++ b/src/libexpr/flake/eval-cache.hh @@ -0,0 +1,39 @@ +#pragma once + +#include "sync.hh" +#include "flake.hh" + +namespace nix { struct SQLite; struct SQLiteStmt; } + +namespace nix::flake { + +class EvalCache +{ + struct State; + + std::unique_ptr> _state; + + EvalCache(); + +public: + + struct Derivation + { + Path drvPath; + Path outPath; + std::string outputName; + }; + + void addDerivation( + const Fingerprint & fingerprint, + const std::string & attrPath, + const Derivation & drv); + + std::optional getDerivation( + const Fingerprint & fingerprint, + const std::string & attrPath); + + static EvalCache & singleton(); +}; + +} diff --git a/src/libexpr/flake/flake.cc b/src/libexpr/flake/flake.cc index bb0543541..0018a0d07 100644 --- a/src/libexpr/flake/flake.cc +++ b/src/libexpr/flake/flake.cc @@ -601,4 +601,13 @@ const Registries EvalState::getFlakeRegistries() return registries; } +Fingerprint ResolvedFlake::getFingerprint() const +{ + // FIXME: as an optimization, if the flake contains a lockfile and + // we haven't changed it, then it's sufficient to use + // flake.sourceInfo.storePath for the fingerprint. + return hashString(htSHA256, + fmt("%s;%s", flake.sourceInfo.storePath, lockFile)); +} + } diff --git a/src/libexpr/flake/flake.hh b/src/libexpr/flake/flake.hh index b8d0da252..81b6541f0 100644 --- a/src/libexpr/flake/flake.hh +++ b/src/libexpr/flake/flake.hh @@ -83,12 +83,18 @@ struct NonFlake Flake getFlake(EvalState &, const FlakeRef &, bool impureIsAllowed); +/* Fingerprint of a locked flake; used as a cache key. */ +typedef Hash Fingerprint; + struct ResolvedFlake { Flake flake; LockFile lockFile; + ResolvedFlake(Flake && flake, LockFile && lockFile) : flake(flake), lockFile(lockFile) {} + + Fingerprint getFingerprint() const; }; ResolvedFlake resolveFlake(EvalState &, const FlakeRef &, HandleLockFile); diff --git a/src/libstore/local-store.cc b/src/libstore/local-store.cc index 485fdd691..f39c73b23 100644 --- a/src/libstore/local-store.cc +++ b/src/libstore/local-store.cc @@ -294,9 +294,7 @@ void LocalStore::openDB(State & state, bool create) /* Open the Nix database. */ string dbPath = dbDir + "/db.sqlite"; auto & db(state.db); - if (sqlite3_open_v2(dbPath.c_str(), &db.db, - SQLITE_OPEN_READWRITE | (create ? SQLITE_OPEN_CREATE : 0), 0) != SQLITE_OK) - throw Error(format("cannot open Nix database '%1%'") % dbPath); + state.db = SQLite(dbPath, create); #ifdef __CYGWIN__ /* The cygwin version of sqlite3 has a patch which calls @@ -308,11 +306,6 @@ void LocalStore::openDB(State & state, bool create) SetDllDirectoryW(L""); #endif - if (sqlite3_busy_timeout(db, 60 * 60 * 1000) != SQLITE_OK) - throwSQLiteError(db, "setting timeout"); - - db.exec("pragma foreign_keys = 1"); - /* !!! check whether sqlite has been built with foreign key support */ diff --git a/src/libstore/nar-info-disk-cache.cc b/src/libstore/nar-info-disk-cache.cc index 32ad7f2b2..3f6dbbcf5 100644 --- a/src/libstore/nar-info-disk-cache.cc +++ b/src/libstore/nar-info-disk-cache.cc @@ -78,12 +78,7 @@ public: state->db = SQLite(dbPath); - if (sqlite3_busy_timeout(state->db, 60 * 60 * 1000) != SQLITE_OK) - throwSQLiteError(state->db, "setting timeout"); - - // We can always reproduce the cache. - state->db.exec("pragma synchronous = off"); - state->db.exec("pragma main.journal_mode = truncate"); + state->db.isCache(); state->db.exec(schema); diff --git a/src/libstore/sqlite.cc b/src/libstore/sqlite.cc index a061d64f3..eb1daafc5 100644 --- a/src/libstore/sqlite.cc +++ b/src/libstore/sqlite.cc @@ -25,11 +25,16 @@ namespace nix { throw SQLiteError("%s: %s (in '%s')", fs.s, sqlite3_errstr(exterr), path); } -SQLite::SQLite(const Path & path) +SQLite::SQLite(const Path & path, bool create) { if (sqlite3_open_v2(path.c_str(), &db, - SQLITE_OPEN_READWRITE | SQLITE_OPEN_CREATE, 0) != SQLITE_OK) + SQLITE_OPEN_READWRITE | (create ? SQLITE_OPEN_CREATE : 0), 0) != SQLITE_OK) throw Error(format("cannot open SQLite database '%s'") % path); + + if (sqlite3_busy_timeout(db, 60 * 60 * 1000) != SQLITE_OK) + throwSQLiteError(db, "setting timeout"); + + exec("pragma foreign_keys = 1"); } SQLite::~SQLite() @@ -42,6 +47,12 @@ SQLite::~SQLite() } } +void SQLite::isCache() +{ + exec("pragma synchronous = off"); + exec("pragma main.journal_mode = truncate"); +} + void SQLite::exec(const std::string & stmt) { retrySQLite([&]() { @@ -94,6 +105,16 @@ SQLiteStmt::Use & SQLiteStmt::Use::operator () (const std::string & value, bool return *this; } +SQLiteStmt::Use & SQLiteStmt::Use::operator () (const unsigned char * data, size_t len, bool notNull) +{ + if (notNull) { + if (sqlite3_bind_blob(stmt, curArg++, data, len, SQLITE_TRANSIENT) != SQLITE_OK) + throwSQLiteError(stmt.db, "binding argument"); + } else + bind(); + return *this; +} + SQLiteStmt::Use & SQLiteStmt::Use::operator () (int64_t value, bool notNull) { if (notNull) { diff --git a/src/libstore/sqlite.hh b/src/libstore/sqlite.hh index 115679b84..78e53fa32 100644 --- a/src/libstore/sqlite.hh +++ b/src/libstore/sqlite.hh @@ -15,13 +15,16 @@ struct SQLite { sqlite3 * db = 0; SQLite() { } - SQLite(const Path & path); + SQLite(const Path & path, bool create = true); SQLite(const SQLite & from) = delete; SQLite& operator = (const SQLite & from) = delete; SQLite& operator = (SQLite && from) { db = from.db; from.db = 0; return *this; } ~SQLite(); operator sqlite3 * () { return db; } + /* Disable synchronous mode, set truncate journal mode. */ + void isCache(); + void exec(const std::string & stmt); }; @@ -52,6 +55,7 @@ struct SQLiteStmt /* Bind the next parameter. */ Use & operator () (const std::string & value, bool notNull = true); + Use & operator () (const unsigned char * data, size_t len, bool notNull = true); Use & operator () (int64_t value, bool notNull = true); Use & bind(); // null diff --git a/src/nix/installables.cc b/src/nix/installables.cc index a85295a09..86e601bc4 100644 --- a/src/nix/installables.cc +++ b/src/nix/installables.cc @@ -8,6 +8,7 @@ #include "store-api.hh" #include "shared.hh" #include "flake/flake.hh" +#include "flake/eval-cache.hh" #include #include @@ -110,7 +111,7 @@ struct InstallableValue : Installable InstallableValue(SourceExprCommand & cmd) : cmd(cmd) { } - Buildables toBuildables() override + virtual std::vector toDerivations() { auto state = cmd.getEvalState(); @@ -118,22 +119,36 @@ struct InstallableValue : Installable Bindings & autoArgs = *cmd.getAutoArgs(*state); - DrvInfos drvs; - getDerivations(*state, *v, "", autoArgs, drvs, false); + DrvInfos drvInfos; + getDerivations(*state, *v, "", autoArgs, drvInfos, false); + std::vector res; + for (auto & drvInfo : drvInfos) { + res.push_back({ + drvInfo.queryDrvPath(), + drvInfo.queryOutPath(), + drvInfo.queryOutputName() + }); + } + + return res; + } + + Buildables toBuildables() override + { Buildables res; PathSet drvPaths; - for (auto & drv : drvs) { - Buildable b{drv.queryDrvPath()}; + for (auto & drv : toDerivations()) { + Buildable b{drv.drvPath}; drvPaths.insert(b.drvPath); - auto outputName = drv.queryOutputName(); + auto outputName = drv.outputName; if (outputName == "") throw Error("derivation '%s' lacks an 'outputName' attribute", b.drvPath); - b.outputs.emplace(outputName, drv.queryOutPath()); + b.outputs.emplace(outputName, drv.outPath); res.push_back(std::move(b)); } @@ -254,12 +269,30 @@ struct InstallableFlake : InstallableValue std::string what() override { return flakeRef.to_string() + ":" + *attrPaths.begin(); } - Value * toValue(EvalState & state) override + std::vector getActualAttrPaths() + { + std::vector res; + + if (searchPackages) { + // As a convenience, look for the attribute in + // 'outputs.packages'. + res.push_back("packages." + *attrPaths.begin()); + + // As a temporary hack until Nixpkgs is properly converted + // to provide a clean 'packages' set, look in 'legacyPackages'. + res.push_back("legacyPackages." + *attrPaths.begin()); + } + + for (auto & s : attrPaths) + res.push_back(s); + + return res; + } + + Value * getFlakeOutputs(EvalState & state, const flake::ResolvedFlake & resFlake) { auto vFlake = state.allocValue(); - auto resFlake = resolveFlake(state, flakeRef, cmd.getLockFileMode()); - callFlake(state, resFlake, *vFlake); makeFlakeClosureGCRoot(*state.store, flakeRef, resFlake); @@ -268,34 +301,67 @@ struct InstallableFlake : InstallableValue state.forceValue(*vOutputs); - auto emptyArgs = state.allocBindings(0); + return vOutputs; + } - if (searchPackages) { - // As a convenience, look for the attribute in - // 'outputs.packages'. - if (auto aPackages = *vOutputs->attrs->get(state.symbols.create("packages"))) { - try { - auto * v = findAlongAttrPath(state, *attrPaths.begin(), *emptyArgs, *aPackages->value); - state.forceValue(*v); - return v; - } catch (AttrPathNotFound & e) { - } + std::vector toDerivations() override + { + auto state = cmd.getEvalState(); + + auto resFlake = resolveFlake(*state, flakeRef, cmd.getLockFileMode()); + + Value * vOutputs = nullptr; + + auto emptyArgs = state->allocBindings(0); + + auto & evalCache = flake::EvalCache::singleton(); + + auto fingerprint = resFlake.getFingerprint(); + + for (auto & attrPath : getActualAttrPaths()) { + auto drv = evalCache.getDerivation(fingerprint, attrPath); + if (drv) { + if (state->store->isValidPath(drv->drvPath)) + return {*drv}; } - // As a temporary hack until Nixpkgs is properly converted - // to provide a clean 'packages' set, look in 'legacyPackages'. - if (auto aPackages = *vOutputs->attrs->get(state.symbols.create("legacyPackages"))) { - try { - auto * v = findAlongAttrPath(state, *attrPaths.begin(), *emptyArgs, *aPackages->value); - state.forceValue(*v); - return v; - } catch (AttrPathNotFound & e) { - } + if (!vOutputs) + vOutputs = getFlakeOutputs(*state, resFlake); + + try { + auto * v = findAlongAttrPath(*state, attrPath, *emptyArgs, *vOutputs); + state->forceValue(*v); + + auto drvInfo = getDerivation(*state, *v, false); + if (!drvInfo) + throw Error("flake output attribute '%s' is not a derivation", attrPath); + + auto drv = flake::EvalCache::Derivation{ + drvInfo->queryDrvPath(), + drvInfo->queryOutPath(), + drvInfo->queryOutputName() + }; + + evalCache.addDerivation(fingerprint, attrPath, drv); + + return {drv}; + } catch (AttrPathNotFound & e) { } } - // Otherwise, look for it in 'outputs'. - for (auto & attrPath : attrPaths) { + throw Error("flake '%s' does not provide attribute %s", + flakeRef, concatStringsSep(", ", quoteStrings(attrPaths))); + } + + Value * toValue(EvalState & state) override + { + auto resFlake = resolveFlake(state, flakeRef, cmd.getLockFileMode()); + + auto vOutputs = getFlakeOutputs(state, resFlake); + + auto emptyArgs = state.allocBindings(0); + + for (auto & attrPath : getActualAttrPaths()) { try { auto * v = findAlongAttrPath(state, attrPath, *emptyArgs, *vOutputs); state.forceValue(*v); From c47d2dac6c7b404714e4c3429f26791790a483f5 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Fri, 7 Jun 2019 22:38:39 +0200 Subject: [PATCH 203/634] Disable EvalCache in impure mode --- src/libexpr/flake/eval-cache.cc | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/src/libexpr/flake/eval-cache.cc b/src/libexpr/flake/eval-cache.cc index fece1a2b5..b32d502f7 100644 --- a/src/libexpr/flake/eval-cache.cc +++ b/src/libexpr/flake/eval-cache.cc @@ -1,5 +1,6 @@ #include "eval-cache.hh" #include "sqlite.hh" +#include "eval.hh" #include @@ -62,6 +63,8 @@ void EvalCache::addDerivation( const std::string & attrPath, const Derivation & drv) { + if (!evalSettings.pureEval) return; + auto state(_state->lock()); if (state->fingerprints.insert(fingerprint).second) @@ -81,6 +84,8 @@ std::optional EvalCache::getDerivation( const Fingerprint & fingerprint, const std::string & attrPath) { + if (!evalSettings.pureEval) return {}; + auto state(_state->lock()); auto queryAttribute(state->queryAttribute.use() From 69b047f4ce8b3c915e13fbe9d6e0b60598a1e786 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Tue, 11 Jun 2019 21:32:57 +0200 Subject: [PATCH 204/634] writeRegistry(): Write correct version --- src/libexpr/flake/flake.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/libexpr/flake/flake.cc b/src/libexpr/flake/flake.cc index 0018a0d07..8aa989620 100644 --- a/src/libexpr/flake/flake.cc +++ b/src/libexpr/flake/flake.cc @@ -44,7 +44,7 @@ std::shared_ptr readRegistry(const Path & path) void writeRegistry(const FlakeRegistry & registry, const Path & path) { nlohmann::json json; - json["version"] = 2; + json["version"] = 1; for (auto elem : registry.entries) json["flakes"][elem.first.to_string()] = { {"uri", elem.second.to_string()} }; createDirs(dirOf(path)); From e2d75696857af36232f189f2bee4fccec9b6076f Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Tue, 11 Jun 2019 21:45:57 +0200 Subject: [PATCH 205/634] Add a test for GitHub flakes Fixes #2889. --- flake.lock | 4 +- release.nix | 5 ++ tests/github-flakes.nix | 145 ++++++++++++++++++++++++++++++++++++++++ 3 files changed, 152 insertions(+), 2 deletions(-) create mode 100644 tests/github-flakes.nix diff --git a/flake.lock b/flake.lock index 727d16d73..336e3eb86 100644 --- a/flake.lock +++ b/flake.lock @@ -3,9 +3,9 @@ "nixpkgs": { "id": "nixpkgs", "inputs": {}, - "narHash": "sha256-eYtxncIMFVmOHaHBtTdPGcs/AnJqKqA6tHCm0UmPYQU=", + "narHash": "sha256-5/HXWs25BLukwG9VaxmdmUf/9o79e32aW/tmhcWEbKk=", "nonFlakeInputs": {}, - "uri": "github:edolstra/nixpkgs/e9d5882bb861dc48f8d46960e7c820efdbe8f9c1" + "uri": "github:edolstra/nixpkgs/62ac6f7f504c8d3998558d9b269d22d26f13f1f0" } }, "nonFlakeInputs": {}, diff --git a/release.nix b/release.nix index d28c44910..28bf7897f 100644 --- a/release.nix +++ b/release.nix @@ -228,6 +228,11 @@ let nix = build.x86_64-linux; system = "x86_64-linux"; }); + tests.githubFlakes = (import ./tests/github-flakes.nix rec { + inherit nixpkgs; + nix = build.x86_64-linux; system = "x86_64-linux"; + }); + tests.setuid = pkgs.lib.genAttrs ["i686-linux" "x86_64-linux"] (system: diff --git a/tests/github-flakes.nix b/tests/github-flakes.nix new file mode 100644 index 000000000..2345972ce --- /dev/null +++ b/tests/github-flakes.nix @@ -0,0 +1,145 @@ +{ nixpkgs, system, nix }: + +with import (nixpkgs + "/nixos/lib/testing.nix") { inherit system; }; + +let + + # Generate a fake root CA and a fake github.com certificate. + cert = pkgs.runCommand "cert" { buildInputs = [ pkgs.openssl ]; } + '' + mkdir -p $out + + openssl genrsa -out ca.key 2048 + openssl req -new -x509 -days 36500 -key ca.key \ + -subj "/C=NL/ST=Denial/L=Springfield/O=Dis/CN=Root CA" -out $out/ca.crt + + openssl req -newkey rsa:2048 -nodes -keyout $out/server.key \ + -subj "/C=CN/ST=Denial/L=Springfield/O=Dis/CN=github.com" -out server.csr + openssl x509 -req -extfile <(printf "subjectAltName=DNS:api.github.com,DNS:github.com,DNS:raw.githubusercontent.com") \ + -days 36500 -in server.csr -CA $out/ca.crt -CAkey ca.key -CAcreateserial -out $out/server.crt + ''; + + registry = pkgs.writeTextFile { + name = "registry"; + text = '' + { + "flakes": { + "nixpkgs": { + "uri": "github:NixOS/nixpkgs" + } + }, + "version": 1 + } + ''; + destination = "/flake-registry.json"; + }; + + tarball = pkgs.runCommand "nixpkgs-flake" {} + '' + mkdir $out + dir=NixOS-nixpkgs-${nixpkgs.shortRev} + cp -prd ${nixpkgs} $dir + # Set the correct timestamp in the tarball. + find $dir -print0 | xargs -0 touch -t ${builtins.substring 0 12 nixpkgs.lastModified}.${builtins.substring 12 2 nixpkgs.lastModified} -- + tar cfz $out/${nixpkgs.rev} $dir + ln -s ${nixpkgs.rev} $out/master + ''; + +in + +makeTest ( + +{ + + nodes = + { # Impersonate github.com and api.github.com. + github = + { config, pkgs, ... }: + { networking.firewall.allowedTCPPorts = [ 80 443 ]; + + services.httpd.enable = true; + services.httpd.adminAddr = "foo@example.org"; + services.httpd.extraConfig = '' + ErrorLog syslog:local6 + ''; + services.httpd.virtualHosts = + [ { hostName = "github.com"; + enableSSL = true; + sslServerKey = "${cert}/server.key"; + sslServerCert = "${cert}/server.crt"; + } + + { hostName = "api.github.com"; + enableSSL = true; + sslServerKey = "${cert}/server.key"; + sslServerCert = "${cert}/server.crt"; + servedDirs = + [ { urlPath = "/repos/NixOS/nixpkgs/tarball"; + dir = tarball; + } + ]; + extraConfig = + '' + Header set ETag "\"${nixpkgs.rev}\"" + ''; + } + + { hostName = "raw.githubusercontent.com"; + enableSSL = true; + sslServerKey = "${cert}/server.key"; + sslServerCert = "${cert}/server.crt"; + servedDirs = + [ { urlPath = "/NixOS/flake-registry/master"; + dir = registry; + } + ]; + } + ]; + }; + + client = + { config, pkgs, nodes, ... }: + { virtualisation.writableStore = true; + virtualisation.pathsInNixDB = [ pkgs.hello pkgs.fuse ]; + nix.package = nix; + nix.binaryCaches = [ ]; + environment.systemPackages = [ pkgs.jq ]; + networking.hosts.${(builtins.head nodes.github.config.networking.interfaces.eth1.ipv4.addresses).address} = + [ "github.com" "api.github.com" "raw.githubusercontent.com" ]; + security.pki.certificateFiles = [ "${cert}/ca.crt" ]; + }; + }; + + testScript = { nodes }: + '' + use POSIX qw(strftime); + + startAll; + + $github->waitForUnit("httpd.service"); + + $client->succeed("curl -v https://github.com/ >&2"); + + $client->succeed("nix flake list | grep nixpkgs"); + + $client->succeed("nix flake info nixpkgs --json | jq -r .revision") eq "${nixpkgs.rev}\n" + or die "revision mismatch"; + + $client->succeed("nix flake pin nixpkgs"); + + $client->succeed("nix flake info nixpkgs --tarball-ttl 0 >&2"); + + # Shut down the web server. The flake should be cached on the client. + $github->succeed("systemctl stop httpd.service"); + + my $date = $client->succeed("nix flake info nixpkgs --json | jq -M .lastModified"); + strftime("%Y%m%d%H%M%S", gmtime($date)) eq "${nixpkgs.lastModified}" or die "time mismatch"; + + $client->succeed("nix build nixpkgs:hello"); + + # The build shouldn't fail even with --tarball-ttl 0 (the server + # being down should not be a fatal error). + $client->succeed("nix build nixpkgs:fuse --tarball-ttl 0"); + ''; + +}) From 06010eaf199005a393f212023ec5e8bc97978537 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Wed, 12 Jun 2019 10:34:13 +0200 Subject: [PATCH 206/634] Fix fetchTarball with chroot stores Fixes #2405. --- src/libexpr/primops.cc | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/libexpr/primops.cc b/src/libexpr/primops.cc index 070e72f3a..a9a1a1e35 100644 --- a/src/libexpr/primops.cc +++ b/src/libexpr/primops.cc @@ -2083,12 +2083,12 @@ void fetch(EvalState & state, const Pos & pos, Value * * args, Value & v, if (evalSettings.pureEval && !request.expectedHash) throw Error("in pure evaluation mode, '%s' requires a 'sha256' argument", who); - Path res = getDownloader()->downloadCached(state.store, request).path; + auto res = getDownloader()->downloadCached(state.store, request); if (state.allowedPaths) - state.allowedPaths->insert(res); + state.allowedPaths->insert(res.path); - mkString(v, res, PathSet({res})); + mkString(v, res.storePath, PathSet({res.storePath})); } From 415fc233e3f01a3bb3cfac01deb9f4ca4af7fb19 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Thu, 13 Jun 2019 14:07:25 +0200 Subject: [PATCH 207/634] For nixpkgs., use legacyPackages This makes commands like 'nix run nixpkgs.chromium' work again. --- src/nix/installables.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/nix/installables.cc b/src/nix/installables.cc index 86e601bc4..1c7debf4e 100644 --- a/src/nix/installables.cc +++ b/src/nix/installables.cc @@ -411,7 +411,7 @@ std::vector> SourceExprCommand::parseInstallables( bool static warned; warnOnce(warned, "the syntax 'nixpkgs.' is deprecated; use 'nixpkgs:' instead"); result.push_back(std::make_shared(*this, FlakeRef("nixpkgs"), - Strings{"packages." + std::string(s, 8)})); + Strings{"legacyPackages." + std::string(s, 8)})); } else if (auto flakeRef = parseFlakeRef(s, true)) From 8ea842260b4fd93315d35c5ba94b1ff99ab391d8 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Mon, 17 Jun 2019 08:43:45 +0200 Subject: [PATCH 208/634] Add '--no-net' convenience flag This flag * Disables substituters. * Sets the tarball-ttl to infinity (ensuring e.g. that the flake registry and any downloaded flakes are considered current). * Disables retrying downloads and sets the connection timeout to the minimum. (So it doesn't completely disable downloads at the moment.) --- src/libmain/common-args.cc | 11 +++++++++++ src/libstore/download.cc | 18 +----------------- src/libstore/download.hh | 23 ++++++++++++++++++++++- src/libstore/globals.hh | 2 +- src/libstore/http-binary-cache-store.cc | 1 - 5 files changed, 35 insertions(+), 20 deletions(-) diff --git a/src/libmain/common-args.cc b/src/libmain/common-args.cc index 4c35a4199..94dd092c5 100644 --- a/src/libmain/common-args.cc +++ b/src/libmain/common-args.cc @@ -1,5 +1,6 @@ #include "common-args.hh" #include "globals.hh" +#include "download.hh" namespace nix { @@ -35,6 +36,16 @@ MixCommonArgs::MixCommonArgs(const string & programName) } }); + mkFlag() + .longName("no-net") + .description("disable substituters and consider all previously downloaded files up-to-date") + .handler([]() { + settings.useSubstitutes = false; + settings.tarballTtl = std::numeric_limits::max(); + downloadSettings.tries = 0; + downloadSettings.connectTimeout = 1; + }); + std::string cat = "config"; globalConfig.convertToArgs(*this, cat); diff --git a/src/libstore/download.cc b/src/libstore/download.cc index 0338727c1..5c1705e2f 100644 --- a/src/libstore/download.cc +++ b/src/libstore/download.cc @@ -30,23 +30,7 @@ using namespace std::string_literals; namespace nix { -struct DownloadSettings : Config -{ - Setting enableHttp2{this, true, "http2", - "Whether to enable HTTP/2 support."}; - - Setting userAgentSuffix{this, "", "user-agent-suffix", - "String appended to the user agent in HTTP requests."}; - - Setting httpConnections{this, 25, "http-connections", - "Number of parallel HTTP connections.", - {"binary-caches-parallel-connections"}}; - - Setting connectTimeout{this, 0, "connect-timeout", - "Timeout for connecting to servers during downloads. 0 means use curl's builtin default."}; -}; - -static DownloadSettings downloadSettings; +DownloadSettings downloadSettings; static GlobalConfig::Register r1(&downloadSettings); diff --git a/src/libstore/download.hh b/src/libstore/download.hh index 43b1c5c09..c095ad053 100644 --- a/src/libstore/download.hh +++ b/src/libstore/download.hh @@ -9,13 +9,34 @@ namespace nix { +struct DownloadSettings : Config +{ + Setting enableHttp2{this, true, "http2", + "Whether to enable HTTP/2 support."}; + + Setting userAgentSuffix{this, "", "user-agent-suffix", + "String appended to the user agent in HTTP requests."}; + + Setting httpConnections{this, 25, "http-connections", + "Number of parallel HTTP connections.", + {"binary-caches-parallel-connections"}}; + + Setting connectTimeout{this, 0, "connect-timeout", + "Timeout for connecting to servers during downloads. 0 means use curl's builtin default."}; + + Setting tries{this, 5, "download-attempts", + "How often Nix will attempt to download a file before giving up."}; +}; + +extern DownloadSettings downloadSettings; + struct DownloadRequest { std::string uri; std::string expectedETag; bool verifyTLS = true; bool head = false; - size_t tries = 5; + size_t tries = downloadSettings.tries; unsigned int baseRetryTimeMs = 250; ActivityId parentAct; bool decompress = true; diff --git a/src/libstore/globals.hh b/src/libstore/globals.hh index 80d70fba3..2aecebe3d 100644 --- a/src/libstore/globals.hh +++ b/src/libstore/globals.hh @@ -255,7 +255,7 @@ public: "Secret keys with which to sign local builds."}; Setting tarballTtl{this, 60 * 60, "tarball-ttl", - "How soon to expire files fetched by builtins.fetchTarball and builtins.fetchurl."}; + "How long downloaded files are considered up-to-date."}; Setting requireSigs{this, true, "require-sigs", "Whether to check that any non-content-addressed path added to the " diff --git a/src/libstore/http-binary-cache-store.cc b/src/libstore/http-binary-cache-store.cc index 105e1dcdd..11c34fdac 100644 --- a/src/libstore/http-binary-cache-store.cc +++ b/src/libstore/http-binary-cache-store.cc @@ -84,7 +84,6 @@ protected: try { DownloadRequest request(cacheUri + "/" + path); request.head = true; - request.tries = 5; getDownloader()->download(request); return true; } catch (DownloadError & e) { From 615a9d031d22a6aee64f8511e15685e47b6f8796 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Mon, 17 Jun 2019 09:12:03 +0200 Subject: [PATCH 209/634] Add "warning" verbosity level This ensures that "nix" shows warnings. Previously these were hidden because they were at "info" level. --- src/libutil/logging.cc | 3 ++- src/libutil/logging.hh | 1 + src/nix/main.cc | 2 +- 3 files changed, 4 insertions(+), 2 deletions(-) diff --git a/src/libutil/logging.cc b/src/libutil/logging.cc index 799c6e1ae..b379306f6 100644 --- a/src/libutil/logging.cc +++ b/src/libutil/logging.cc @@ -21,7 +21,7 @@ Logger * logger = makeDefaultLogger(); void Logger::warn(const std::string & msg) { - log(lvlInfo, ANSI_RED "warning:" ANSI_NORMAL " " + msg); + log(lvlWarn, ANSI_RED "warning:" ANSI_NORMAL " " + msg); } class SimpleLogger : public Logger @@ -46,6 +46,7 @@ public: char c; switch (lvl) { case lvlError: c = '3'; break; + case lvlWarn: c = '4'; break; case lvlInfo: c = '5'; break; case lvlTalkative: case lvlChatty: c = '6'; break; default: c = '7'; diff --git a/src/libutil/logging.hh b/src/libutil/logging.hh index 678703102..5f2219445 100644 --- a/src/libutil/logging.hh +++ b/src/libutil/logging.hh @@ -6,6 +6,7 @@ namespace nix { typedef enum { lvlError = 0, + lvlWarn, lvlInfo, lvlTalkative, lvlChatty, diff --git a/src/nix/main.cc b/src/nix/main.cc index a1fcb892a..ced87f653 100644 --- a/src/nix/main.cc +++ b/src/nix/main.cc @@ -102,7 +102,7 @@ void mainWrapped(int argc, char * * argv) if (legacy) return legacy(argc, argv); } - verbosity = lvlError; + verbosity = lvlWarn; settings.verboseBuild = false; evalSettings.pureEval = true; From 04a59769963fe2a28d10ba15de743fe499333c80 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Mon, 17 Jun 2019 09:57:22 +0200 Subject: [PATCH 210/634] Automatically use --no-net if there are no network interfaces --- src/libmain/common-args.cc | 11 -------- src/nix/main.cc | 55 ++++++++++++++++++++++++++++++++++++++ 2 files changed, 55 insertions(+), 11 deletions(-) diff --git a/src/libmain/common-args.cc b/src/libmain/common-args.cc index 94dd092c5..4c35a4199 100644 --- a/src/libmain/common-args.cc +++ b/src/libmain/common-args.cc @@ -1,6 +1,5 @@ #include "common-args.hh" #include "globals.hh" -#include "download.hh" namespace nix { @@ -36,16 +35,6 @@ MixCommonArgs::MixCommonArgs(const string & programName) } }); - mkFlag() - .longName("no-net") - .description("disable substituters and consider all previously downloaded files up-to-date") - .handler([]() { - settings.useSubstitutes = false; - settings.tarballTtl = std::numeric_limits::max(); - downloadSettings.tries = 0; - downloadSettings.connectTimeout = 1; - }); - std::string cat = "config"; globalConfig.convertToArgs(*this, cat); diff --git a/src/nix/main.cc b/src/nix/main.cc index ced87f653..101ed5d5c 100644 --- a/src/nix/main.cc +++ b/src/nix/main.cc @@ -8,19 +8,52 @@ #include "shared.hh" #include "store-api.hh" #include "progress-bar.hh" +#include "download.hh" #include "finally.hh" +#include +#include +#include +#include + extern std::string chrootHelperName; void chrootHelper(int argc, char * * argv); namespace nix { +/* Check if we have a non-loopback/link-local network interface. */ +static bool haveInternet() +{ + struct ifaddrs * addrs; + + if (getifaddrs(&addrs)) + return true; + + Finally free([&]() { freeifaddrs(addrs); }); + + for (auto i = addrs; i; i = i->ifa_next) { + if (!i->ifa_addr) continue; + if (i->ifa_addr->sa_family == AF_INET) { + if (ntohl(((sockaddr_in *) i->ifa_addr)->sin_addr.s_addr) != INADDR_LOOPBACK) { + return true; + } + } else if (i->ifa_addr->sa_family == AF_INET6) { + if (!IN6_IS_ADDR_LOOPBACK(((sockaddr_in6 *) i->ifa_addr)->sin6_addr.s6_addr) && + !IN6_IS_ADDR_LINKLOCAL(((sockaddr_in6 *) i->ifa_addr)->sin6_addr.s6_addr)) + return true; + } + } + + return false; +} + std::string programPath; struct NixArgs : virtual MultiCommand, virtual MixCommonArgs { bool printBuildLogs = false; + bool useNet = true; NixArgs() : MultiCommand(*RegisterCommand::commands), MixCommonArgs("nix") { @@ -52,6 +85,11 @@ struct NixArgs : virtual MultiCommand, virtual MixCommonArgs .longName("version") .description("show version information") .handler([&]() { printVersion(programName); }); + + mkFlag() + .longName("no-net") + .description("disable substituters and consider all previously downloaded files up-to-date") + .handler([&]() { useNet = false; }); } void printFlags(std::ostream & out) override @@ -118,6 +156,23 @@ void mainWrapped(int argc, char * * argv) startProgressBar(args.printBuildLogs); + if (args.useNet && !haveInternet()) { + warn("you don't have Internet access; disabling some network-dependent features"); + args.useNet = false; + } + + if (!args.useNet) { + // FIXME: should check for command line overrides only. + if (!settings.useSubstitutes.overriden) + settings.useSubstitutes = false; + if (!settings.tarballTtl.overriden) + settings.tarballTtl = std::numeric_limits::max(); + if (!downloadSettings.tries.overriden) + downloadSettings.tries = 0; + if (!downloadSettings.connectTimeout.overriden) + downloadSettings.connectTimeout = 1; + } + args.command->prepare(); args.command->run(); } From 2467c9837500b26aab5c1dcd3cac12cda44898ca Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Mon, 17 Jun 2019 16:58:59 +0200 Subject: [PATCH 211/634] nix app: Search for installable in the 'apps' output I.e. you can write $ nix app blender-bin:blender_2_80 which is equivalent to $ nix app blender-bin:apps.blender_2_80 --- src/nix/command.hh | 12 ++++++++++++ src/nix/installables.cc | 27 +++++++++++++-------------- src/nix/run.cc | 5 +++++ 3 files changed, 30 insertions(+), 14 deletions(-) diff --git a/src/nix/command.hh b/src/nix/command.hh index 659b724c3..3dad64947 100644 --- a/src/nix/command.hh +++ b/src/nix/command.hh @@ -102,6 +102,18 @@ struct SourceExprCommand : virtual Args, EvalCommand, MixFlakeOptions { return {"defaultPackage"}; } + + virtual Strings getDefaultFlakeAttrPathPrefixes() + { + return { + // As a convenience, look for the attribute in + // 'outputs.packages'. + "packages.", + // As a temporary hack until Nixpkgs is properly converted + // to provide a clean 'packages' set, look in 'legacyPackages'. + "legacyPackages." + }; + } }; enum RealiseMode { Build, NoBuild, DryRun }; diff --git a/src/nix/installables.cc b/src/nix/installables.cc index 1c7debf4e..d7dd95606 100644 --- a/src/nix/installables.cc +++ b/src/nix/installables.cc @@ -257,14 +257,16 @@ struct InstallableFlake : InstallableValue { FlakeRef flakeRef; Strings attrPaths; - bool searchPackages = false; + Strings prefixes; InstallableFlake(SourceExprCommand & cmd, FlakeRef && flakeRef, Strings attrPaths) : InstallableValue(cmd), flakeRef(flakeRef), attrPaths(std::move(attrPaths)) { } - InstallableFlake(SourceExprCommand & cmd, FlakeRef && flakeRef, std::string attrPath) - : InstallableValue(cmd), flakeRef(flakeRef), attrPaths{attrPath}, searchPackages(true) + InstallableFlake(SourceExprCommand & cmd, FlakeRef && flakeRef, + std::string attrPath, Strings && prefixes) + : InstallableValue(cmd), flakeRef(flakeRef), attrPaths{attrPath}, + prefixes(prefixes) { } std::string what() override { return flakeRef.to_string() + ":" + *attrPaths.begin(); } @@ -273,15 +275,8 @@ struct InstallableFlake : InstallableValue { std::vector res; - if (searchPackages) { - // As a convenience, look for the attribute in - // 'outputs.packages'. - res.push_back("packages." + *attrPaths.begin()); - - // As a temporary hack until Nixpkgs is properly converted - // to provide a clean 'packages' set, look in 'legacyPackages'. - res.push_back("legacyPackages." + *attrPaths.begin()); - } + for (auto & prefix : prefixes) + res.push_back(prefix + *attrPaths.begin()); for (auto & s : attrPaths) res.push_back(s); @@ -421,7 +416,11 @@ std::vector> SourceExprCommand::parseInstallables( else if ((colon = s.rfind(':')) != std::string::npos) { auto flakeRef = std::string(s, 0, colon); auto attrPath = std::string(s, colon + 1); - result.push_back(std::make_shared(*this, FlakeRef(flakeRef, true), attrPath)); + result.push_back(std::make_shared( + *this, + FlakeRef(flakeRef, true), + attrPath, + getDefaultFlakeAttrPathPrefixes())); } else if (s.find('/') != std::string::npos || s == ".") { @@ -437,7 +436,7 @@ std::vector> SourceExprCommand::parseInstallables( } else - result.push_back(std::make_shared(*this, FlakeRef("nixpkgs"), s)); + throw Error("unsupported argument '%s'", s); } } diff --git a/src/nix/run.cc b/src/nix/run.cc index 00a682832..62aae12f6 100644 --- a/src/nix/run.cc +++ b/src/nix/run.cc @@ -225,6 +225,11 @@ struct CmdApp : InstallableCommand, RunCommon return {"defaultApp"}; } + Strings getDefaultFlakeAttrPathPrefixes() override + { + return {"apps."}; + } + void run(ref store) override { auto state = getEvalState(); From 0d69f7f3f012aceb4c494f3c1cc866b378c5eac1 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Mon, 17 Jun 2019 17:05:37 +0200 Subject: [PATCH 212/634] nix app: Accept arguments Example: $ nix app blender-bin -- --version Blender 2.80 (sub 74) --- src/nix/run.cc | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/src/nix/run.cc b/src/nix/run.cc index 62aae12f6..d30851d47 100644 --- a/src/nix/run.cc +++ b/src/nix/run.cc @@ -196,8 +196,11 @@ static RegisterCommand r1(make_ref()); struct CmdApp : InstallableCommand, RunCommon { + std::vector args; + CmdApp() { + expectArgs("args", &args); } std::string name() override @@ -238,7 +241,10 @@ struct CmdApp : InstallableCommand, RunCommon state->realiseContext(app.context); - runProgram(store, app.program, {app.program}); + Strings allArgs{app.program}; + for (auto & i : args) allArgs.push_back(i); + + runProgram(store, app.program, allArgs); } }; From 3b2ebd029cac0b1ce715d8aca44204130c93a869 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Mon, 17 Jun 2019 17:31:34 +0200 Subject: [PATCH 213/634] nix flake info --json: Revive enumerating the outputs --- src/nix/flake.cc | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/src/nix/flake.cc b/src/nix/flake.cc index af1a361b3..6f6d3c130 100644 --- a/src/nix/flake.cc +++ b/src/nix/flake.cc @@ -234,8 +234,8 @@ struct CmdFlakeInfo : FlakeCommand, MixJSON if (json) { auto json = flakeToJson(flake); -#if 0 auto state = getEvalState(); + auto flake = resolveFlake(); auto vFlake = state->allocValue(); flake::callFlake(*state, flake, *vFlake); @@ -257,7 +257,6 @@ struct CmdFlakeInfo : FlakeCommand, MixJSON }); json["outputs"] = std::move(outputs); -#endif std::cout << json.dump() << std::endl; } else From 9d1207c02c091a00454fcb7266653d18a6023923 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Mon, 17 Jun 2019 17:59:57 +0200 Subject: [PATCH 214/634] nix flake check: Check apps --- src/libexpr/primops.cc | 14 +++++++------- src/nix/command.hh | 2 ++ src/nix/flake.cc | 29 +++++++++++++++++++++++++++-- src/nix/installables.cc | 23 +++++++++++------------ 4 files changed, 47 insertions(+), 21 deletions(-) diff --git a/src/libexpr/primops.cc b/src/libexpr/primops.cc index a9a1a1e35..10ce1abf5 100644 --- a/src/libexpr/primops.cc +++ b/src/libexpr/primops.cc @@ -51,21 +51,20 @@ void EvalState::realiseContext(const PathSet & context) PathSet drvs; for (auto & i : context) { - std::pair decoded = decodeContext(i); - Path ctx = decoded.first; + auto [ctx, outputName] = decodeContext(i); assert(store->isStorePath(ctx)); if (!store->isValidPath(ctx)) throw InvalidPathError(ctx); - if (!decoded.second.empty() && nix::isDerivation(ctx)) { - drvs.insert(decoded.first + "!" + decoded.second); + if (!outputName.empty() && nix::isDerivation(ctx)) { + drvs.insert(ctx + "!" + outputName); /* Add the output of this derivation to the allowed paths. */ if (allowedPaths) { - auto drv = store->derivationFromPath(decoded.first); - DerivationOutputs::iterator i = drv.outputs.find(decoded.second); + auto drv = store->derivationFromPath(ctx); + DerivationOutputs::iterator i = drv.outputs.find(outputName); if (i == drv.outputs.end()) - throw Error("derivation '%s' does not have an output named '%s'", decoded.first, decoded.second); + throw Error("derivation '%s' does not have an output named '%s'", ctx, outputName); allowedPaths->insert(i->second.path); } } @@ -80,6 +79,7 @@ void EvalState::realiseContext(const PathSet & context) PathSet willBuild, willSubstitute, unknown; unsigned long long downloadSize, narSize; store->queryMissing(drvs, willBuild, willSubstitute, unknown, downloadSize, narSize); + store->buildPaths(drvs); } diff --git a/src/nix/command.hh b/src/nix/command.hh index 3dad64947..0ffbe46f5 100644 --- a/src/nix/command.hh +++ b/src/nix/command.hh @@ -43,6 +43,8 @@ struct App PathSet context; Path program; // FIXME: add args, sandbox settings, metadata, ... + + App(EvalState & state, Value & vApp); }; struct Installable diff --git a/src/nix/flake.cc b/src/nix/flake.cc index 6f6d3c130..6f6d1a0aa 100644 --- a/src/nix/flake.cc +++ b/src/nix/flake.cc @@ -7,6 +7,7 @@ #include "flake/flake.hh" #include "get-drvs.hh" #include "store-api.hh" +#include "derivations.hh" #include #include @@ -301,13 +302,27 @@ struct CmdFlakeCheck : FlakeCommand, MixJSON // FIXME: check meta attributes return drvInfo->queryDrvPath(); } catch (Error & e) { - e.addPrefix(fmt("while checking flake output attribute '" ANSI_BOLD "%s" ANSI_NORMAL "':\n", attrPath)); + e.addPrefix(fmt("while checking the derivation '" ANSI_BOLD "%s" ANSI_NORMAL "':\n", attrPath)); throw; } }; PathSet drvPaths; + auto checkApp = [&](const std::string & attrPath, Value & v) { + try { + auto app = App(*state, v); + for (auto & i : app.context) { + auto [drvPath, outputName] = decodeContext(i); + if (!outputName.empty() && nix::isDerivation(drvPath)) + drvPaths.insert(drvPath + "!" + outputName); + } + } catch (Error & e) { + e.addPrefix(fmt("while checking the app definition '" ANSI_BOLD "%s" ANSI_NORMAL "':\n", attrPath)); + throw; + } + }; + { Activity act(*logger, lvlInfo, actUnknown, "evaluating flake"); @@ -337,9 +352,19 @@ struct CmdFlakeCheck : FlakeCommand, MixJSON name + "." + (std::string) aCheck.name, *aCheck.value); } + else if (name == "apps") { + state->forceAttrs(vProvide); + for (auto & aCheck : *vProvide.attrs) + checkApp( + name + "." + (std::string) aCheck.name, *aCheck.value); + } + else if (name == "defaultPackage" || name == "devShell") checkDerivation(name, vProvide); + else if (name == "defaultApp") + checkApp(name, vProvide); + } catch (Error & e) { e.addPrefix(fmt("while checking flake output '" ANSI_BOLD "%s" ANSI_NORMAL "':\n", name)); throw; @@ -347,7 +372,7 @@ struct CmdFlakeCheck : FlakeCommand, MixJSON }); } - if (build) { + if (build && !drvPaths.empty()) { Activity act(*logger, lvlInfo, actUnknown, "running flake checks"); store->buildPaths(drvPaths); } diff --git a/src/nix/installables.cc b/src/nix/installables.cc index d7dd95606..feaf57f0c 100644 --- a/src/nix/installables.cc +++ b/src/nix/installables.cc @@ -69,26 +69,25 @@ Buildable Installable::toBuildable() return std::move(buildables[0]); } -App Installable::toApp(EvalState & state) +App::App(EvalState & state, Value & vApp) { - auto v = toValue(state); + state.forceAttrs(vApp); - state.forceAttrs(*v); - - auto aType = v->attrs->need(state.sType); + auto aType = vApp.attrs->need(state.sType); if (state.forceStringNoCtx(*aType.value, *aType.pos) != "app") throw Error("value does not have type 'app', at %s", *aType.pos); - App app; - - auto aProgram = v->attrs->need(state.symbols.create("program")); - app.program = state.forceString(*aProgram.value, app.context, *aProgram.pos); + auto aProgram = vApp.attrs->need(state.symbols.create("program")); + program = state.forceString(*aProgram.value, context, *aProgram.pos); // FIXME: check that 'program' is in the closure of 'context'. - if (!state.store->isInStore(app.program)) - throw Error("app program '%s' is not in the Nix store", app.program); + if (!state.store->isInStore(program)) + throw Error("app program '%s' is not in the Nix store", program); +} - return app; +App Installable::toApp(EvalState & state) +{ + return App(state, *toValue(state)); } struct InstallableStorePath : Installable From f2fcc163fa6c38566f3a78cd3757ab22f39821fc Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Mon, 17 Jun 2019 18:05:32 +0200 Subject: [PATCH 215/634] nix flake check: Warn about unknown flake outputs --- src/nix/flake.cc | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/nix/flake.cc b/src/nix/flake.cc index 6f6d1a0aa..8248a5d7b 100644 --- a/src/nix/flake.cc +++ b/src/nix/flake.cc @@ -365,6 +365,9 @@ struct CmdFlakeCheck : FlakeCommand, MixJSON else if (name == "defaultApp") checkApp(name, vProvide); + else + warn("unknown flake output '%s'", name); + } catch (Error & e) { e.addPrefix(fmt("while checking flake output '" ANSI_BOLD "%s" ANSI_NORMAL "':\n", name)); throw; From 556f33422d0a064ef100fe232f60b42b34bf40e2 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Tue, 18 Jun 2019 09:45:14 +0200 Subject: [PATCH 216/634] nix flake check: Ignore legacyPackages --- src/nix/flake.cc | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/nix/flake.cc b/src/nix/flake.cc index 8248a5d7b..b98cce078 100644 --- a/src/nix/flake.cc +++ b/src/nix/flake.cc @@ -365,6 +365,10 @@ struct CmdFlakeCheck : FlakeCommand, MixJSON else if (name == "defaultApp") checkApp(name, vProvide); + else if (name == "legacyPackages") + // FIXME: do getDerivations? + ; + else warn("unknown flake output '%s'", name); From 8a6704d826578d5640d3fca347308298020a6fde Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Sun, 26 May 2019 15:59:50 +0200 Subject: [PATCH 217/634] Updated documentation --- doc/flakes/design.md | 555 ++++++++++++++++++------------------------- src/nix/flake.cc | 8 - 2 files changed, 235 insertions(+), 328 deletions(-) diff --git a/doc/flakes/design.md b/doc/flakes/design.md index 63198e577..c9520bcbf 100644 --- a/doc/flakes/design.md +++ b/doc/flakes/design.md @@ -2,92 +2,83 @@ ## Goals -* To provide Nix repositories with an easy and standard way to - reference other Nix repositories. +* Standard and easy way for Nix repos to reference other Nix repos as + dependencies -* To allow such references to be queried and updated automatically. +* Discoverability: Be able to query and update these references to Nix repos + automatically -* To provide a replacement for `nix-channel`, `NIX_PATH` and Hydra - jobset definitions. +* To provide a replacement for `nix-channel`, `NIX_PATH` and Hydra jobset + definitions -* To enable reproducible, hermetic evaluation of packages and NixOS - configurations. +* Reproducibility: Evaluate packages and NixOS configurations hermetic by + default -Things that we probably won't do in the initial iteration: +Upcoming but not yet implemented: -* Sophisticated flake versioning, such as the ability to specify - version ranges on dependencies. +* Sophisticated flake versioning, such as the ability to specify version ranges + on dependencies. -* A way to specify the types of values provided by a flake. For the - most part, flakes can provide arbitrary Nix values, but there will - be some standard attribute names (e.g. `packages` must be a set of - installable derivations). +* A way to specify the types of values provided by a flake. For the most part, + flakes can provide arbitrary Nix values, but there will be some standard + attribute names (e.g. `packages` must be a set of installable derivations). ## Overview -* A flake is (usually) a Git repository that contains a file named - `flake.nix` at top-level. +* A flake is (usually) a Git repository that contains a file named `flake.nix` + at top-level -* Flakes *provide* an attribute set of values, such as packages, - Nixpkgs overlays, NixOS modules, library functions, Hydra jobs, - `nix-shell` definitions, etc. +* A flake *provides* an attribute set of values, such as packages, Nixpkgs + overlays, NixOS modules, library functions, Hydra jobs, `nix-shell` + definitions, etc. -* Flakes can *depend* on other flakes. +* Flakes can *depend* on other flakes or other repositories which aren't flakes -* Flakes are referred to using a *flake reference*, which is either a - URL specifying its repository's location - (e.g. `github:NixOS/nixpkgs/release-18.09`) or an identifier - (e.g. `nixpkgs`) looked up in a *lock file* or *flake - registry*. They can also specify revisions, - e.g. `github:NixOS/nixpkgs/98a2a5b5370c1e2092d09cb38b9dcff6d98a109f`. +* Flakes are referred to using a *flake reference*, which is either a URL + specifying its repository's location or an identifier looked up in a *lock + file* or *flake registry*. -* The *flake registry* is a centrally maintained mapping (on - `nixos.org`) from flake identifiers to flake locations - (e.g. `nixpkgs -> github:NixOS/nixpkgs/release-18.09`). +* A *flake registry* is a mapping from flake identifiers to flake locations + (e.g. `nixpkgs -> github:NixOS/nixpkgs/release-18.09`). There is a centrally + maintained flake registry on `nixos.org`. -* A flake can contain a *lock file* (`flake.lock`) used when resolving - the dependencies in `flake.nix`. It maps flake references to - references containing revisions (e.g. `nixpkgs -> +* A flake can contain a *lock file* (`flake.lock`) used when resolving the + dependencies in `flake.nix`. It maps mutable flake references + (e.g. `github:NixOS/nixpkgs/release-18.09`) to references containing revisions + (e.g. `nixpkgs -> github:NixOS/nixpkgs/98a2a5b5370c1e2092d09cb38b9dcff6d98a109f`). -* The `nix` command uses the flake registry as its default - installation source. For example, `nix build nixpkgs.hello` builds the - `hello` package provided by the `nixpkgs` flake listed in the - registry. `nix` will automatically download/upload the registry and - flakes as needed. +* The `nix` command uses the flake registry as its default installation source. + For example, `nix build nixpkgs.hello` builds the `hello` package provided by + the `nixpkgs` flake listed in the registry. `nix` will automatically + download/upload the registry and flakes as needed. * `nix build` without arguments will build the flake in the current directory (or some parent). -* The command `nix flake update` generates/updates `flake.lock` from - `flake.nix`. This should probably also be done automatically when - building from a local flake. +* `nix flake update` generates `flake.lock` from `flake.nix`, ignoring the old + lockfile. -* `nixos-rebuild` will build a configuration from a (locked) - flake. Evaluation will be done in pure mode to ensure there are no - unaccounted inputs. Thus the NixOS configuration can be reproduced - unambiguously from the top-level flake. +* `nixos-rebuild` will build a configuration from a (locked) flake. Evaluation + is done in pure mode to ensure there are no unaccounted inputs. Thus the + NixOS configuration can be reproduced unambiguously from the top-level flake. -* Nix code can query flake metadata such as `commitHash` (the Git - revision) or `date` (the date of the last commit). This is useful - for NixOS to compute the NixOS version string (which will be the - revision of the top-level configuration flake, uniquely identifying - the configuration). +* Nix code can query flake metadata such as `commitHash` (the Git revision) or + `epoch` (the date of the last commit). This is useful for NixOS to compute + the NixOS version string (which will be the revision of the top-level + configuration flake, uniquely identifying the configuration). -* Hydra jobset configurations will consist of a single flake - reference. Thus we can get rid of jobset inputs; any other needed - repositories can be fetched by the top-level flake. The top-level - flake can be locked or unlocked; if some dependencies are unlocked, - then Nix will fetch the latest revision for each. +* Hydra jobset configurations will consist of a single flake reference. Thus we + can get rid of jobset inputs; any other needed repositories can be fetched by + the top-level flake. The top-level flake can be locked or unlocked; if some + dependencies are unlocked, then Nix will fetch the latest revision for each. ## Example flake -A flake is a Git repository that contains a file named -`flake.nix`. For example, here is the `flake.nix` for `dwarffs`, a -small repository that provides a single package and a single NixOS -module. +Let us look at an example of a `flake.nix` file, here for `dwarffs`, a small +repository that provides a single package and a single NixOS module. ```nix { @@ -101,23 +92,26 @@ module. # Some other metadata. description = "A filesystem that fetches DWARF debug info from the Internet on demand"; - # A list of flake references denoting the flakes that this flake - # depends on. Nix will resolve and fetch these flakes and pass them - # as a function argument to `outputs` below. + # The flake dependencies. Nix will resolve and fetch these flakes and pass + # them as a function argument to `outputs` below. # - # `flake:nixpkgs` denotes a flake named `nixpkgs` which is looked up + # "nixpkgs" denotes a flake named `nixpkgs` which is looked up # in the flake registry, or in `flake.lock` inside this flake, if it # exists. inputs = [ flake:nixpkgs ]; + # An attribute set listing dependencies which aren't flakes, also to be passed as + # a function argument to `provides`. + nonFlakeRequires = {}; + # The stuff provided by this flake. Flakes can provide whatever they # want (convention over configuration), but some attributes have - # special meaning to tools / other flakes: for example, `packages` + # special meaning to tools / other flakes. For example, `packages` # is used by the `nix` CLI to search for packages, and # `nixosModules` is used by NixOS to automatically pull in the # modules provided by a flake. # - # `outputs` takes a single argument named `deps` that contains + # `outputs` takes a single argument (`deps`) that contains # the resolved set of flakes. (See below.) outputs = deps: { @@ -153,7 +147,11 @@ module. nixosModules.dwarffs = import ./module.nix deps; # Provide a single Hydra job (`hydraJobs.dwarffs`). - hydraJobs = deps.this.packages; + hydraJobs.build.x86_64-linux = packages.dwarffs; + + # A bunch of things which can be checked (through `nix flake check`) to + # make sure the flake is well-defined. + checks.build = packages.dwarffs; }; } ``` @@ -170,8 +168,11 @@ Similarly, a minimal `flake.nix` for Nixpkgs: outputs = deps: let pkgs = import ./. {}; in + let pkgs = import ./. { system = "x86_64-linux"; }; in { - lib = import ./lib; + lib = (import ./lib) // { + nixosSystem = import ./nixos/lib/eval-config.nix; + }; builders = { inherit (pkgs) stdenv fetchurl; @@ -180,145 +181,124 @@ Similarly, a minimal `flake.nix` for Nixpkgs: packages = { inherit (pkgs) hello nix fuse nlohmann_json boost; }; + + legacyPkgs = pkgs; }; } ``` -Note that `packages` is an unpolluted set of packages: non-package -values like `lib` or `fetchurl` are not part of it. +Note that `packages` is an unpolluted set of packages: non-package values like +`lib` or `fetchurl` are not part of it. +## Flake registries -## Flake identifiers - -A flake has an identifier (e.g. `nixpkgs` or `dwarffs`). +Note: If a flake registry contains an entry `nixpkgs -> github:NixOS/nixpkgs`, +then `nixpkgs/release-18.09` will match to become +`github:NixOS/nixpkgs/release-18.09`. This is referred to as "fuzzymatching". ## Flake references -Flake references are a URI-like syntax to specify the physical -location of a flake (e.g. a Git repository) or to denote a lookup in -the flake registry or lock file. +Flake references are a URI-like syntax to specify the physical location of a +flake (e.g. a Git repository) or to denote a lookup in the flake registry or +lock file. There are four options for the syntax: -* `(flake:)?(/rev-or-ref(/rev)?)?` +* Flake aliases + A flake alias is a name which requires a lookup in a flake + registry or lock file. - Look up a flake by ID in the flake lock file or in the flake - registry. These must specify an actual location for the flake using - the formats listed below. Note that in pure evaluation mode, the - flake registry is empty. + Example: "nixpkgs" - Optionally, the `rev` or `ref` from the dereferenced flake can be - overriden. For example, +* GitHub repositories + A repository which is stored on GitHub can easily be fetched using this type. + Note: + * Only the code in this particular commit is downloaded, not the entire repo + * By default, the commit to download is the last commit on the `master` branch. + See later for how to change this. - > nixpkgs/19.09 + Example: `github:NixOS/nixpkgs` - uses the `19.09` branch of the `nixpkgs` flake's GitHub repository, - while - - > nixpkgs/98a2a5b5370c1e2092d09cb38b9dcff6d98a109f - - uses the specified revision. For Git (rather than GitHub) - repositories, both the rev and ref must be given, e.g. - - > nixpkgs/19.09/98a2a5b5370c1e2092d09cb38b9dcff6d98a109f - -* `github:/(/)?` - - A repository on GitHub. These differ from Git references in that - they're downloaded in a efficient way (via the tarball mechanism) - and that they support downloading a specific revision without - specifying a branch. `rev-or-ref` is either a commit hash (`rev`) - or a branch or tag name (`ref`). The default is `master` if none is - specified. Note that in pure evaluation mode, a commit hash must be - used. - - Flakes fetched in this manner expose `rev` and `date` attributes, - but not `revCount`. - - Examples: - - > github:edolstra/dwarffs - - > github:edolstra/dwarffs/unstable - - > github:edolstra/dwarffs/41c0c1bf292ea3ac3858ff393b49ca1123dbd553 - -* > https:///.git(\?attr(&attr)*)? - - > ssh:///.git(\?attr(&attr)*)? - - > git:///.git(\?attr(&attr)*)? - - > file:///(\?attr(&attr)*)? - - where `attr` is one of `rev=` or `ref=`. - - A Git repository fetched through https. Note that the path must end - in `.git`. The default for `ref` is `master`. - - Examples: - - > https://example.org/my/repo.git - > https://example.org/my/repo.git?ref=release-1.2.3 - > https://example.org/my/repo.git?rev=e72daba8250068216d79d2aeef40d4d95aff6666 - -* > /path.git(\?attr(&attr)*)? - - Like `file://path.git`, but if no `ref` or `rev` is specified, the - (possibly dirty) working tree will be used. Using a working tree is - not allowed in pure evaluation mode. +* `ssh/https/git/file` + These are generic `FlakeRef`s for downloadding git repositories or tarballs. Examples: + - https://example.org/my/repo.git + - ssh://git@github.com:NixOS/nix.git + - git://github.com/edolstra/dwarffs.git + - file:///home/my-user/some-repo/some-repo.git + - https://releases.nixos.org/nixos/unstable/nixos-19.03pre167858.f2a1a4e93be/nixexprs.tar.xz + - file:///.tar.xz - > /path/to/my/repo +* Local, dirty paths + This `FlakeRef` is the equivalent of `file://` used for dirty paths. - > /path/to/my/repo?ref=develop + Example: /path/to/my/repo - > /path/to/my/repo?rev=e72daba8250068216d79d2aeef40d4d95aff6666 - -* > https:///.tar.xz(?hash=) - - > file:///.tar.xz(?hash=) - - A flake distributed as a tarball. In pure evaluation mode, an SRI - hash is mandatory. It exposes a `date` attribute, being the newest - file inside the tarball. - - Example: - - > https://releases.nixos.org/nixos/unstable/nixos-19.03pre167858.f2a1a4e93be/nixexprs.tar.xz - - > https://releases.nixos.org/nixos/unstable/nixos-19.03pre167858.f2a1a4e93be/nixexprs.tar.xz?hash=sha256-56bbc099995ea8581ead78f22832fee7dbcb0a0b6319293d8c2d0aef5379397c - -Note: currently, there can be only one flake per Git repository, and -it must be at top-level. In the future, we may want to add a field -(e.g. `dir=`) to specify a subdirectory inside the repository. +Notes: +- Each FlakeRef (except for the Path option) allows for a Git revision (i.e. + commit hash) and/or referenceo(i.e. git branch name) to be added. For + tarbals, an SRI hash needs to be added. + Examples: + * `"nixpkgs/release-18.09"` + * `github:NixOS/nixpkgs/1e9e709953e315ab004951248b186ac8e2306451` + * `git://github.com/edolstra/dwarffs.git?ref=flake&rev=2efca4bc9da70fb001b26c3dc858c6397d3c4817` + * file:///.tar.xz(?hash=) +- In full pure mode, no mutable `FlakeRef`s can be used + * No aliases, because they need to be looked up + * `github` requires a specified `rev` + * `ssh/https/git/file` require a specified `ref` _and_ `rev` + * `path` is always mutable +- Flakes don't need to be top-level, but can also reside in a subdirectory. This is shown by adding `dir=` to the `FlakeRef`. + Example: `./foo?dir=bar` ## Flake lock files -This is a JSON file named `flake.lock` that maps flake identifiers -used in the corresponding `flake.nix` to "immutable" flake references; -that is, flake references that contain a revision (for Git -repositories) or a content hash (for tarballs). +A lockfile is a JSON file named `flake.lock` which contains a forrest of +entries mapping `FlakeRef`s to the immutable `FlakeRef` they were resolved to. Example: ```json { - "nixpkgs": "github:NixOS/nixpkgs/41c0c1bf292ea3ac3858ff393b49ca1123dbd553", - "foo": "https://example.org/foo.tar.xz?hash=sha256-56bbc099995ea8581ead78f22832fee7dbcb0a0b6319293d8c2d0aef5379397c" + "nixpkgs": { + "uri": "github:NixOS/nixpkgs/41c0c1bf292ea3ac3858ff393b49ca1123dbd553", + "content-hash": "sha256-vy2UmXQM66aS/Kn2tCtjt9RwxfBvV+nQVb5tJQFwi8E=" + }, + "foo": { + "uri": "https://example.org/foo.tar.xz?hash=sha256-56bbc099995ea8581ead78f22832fee7dbcb0a0b6319293d8c2d0aef5379397c", + "content-hash": "sha256-vy2UmXQM66aS/Kn2tCtjt9RwxfBvV+nQVb5tJQFwi8E=" + } } ``` +Lockfiles are used to help resolve the dependencies of a flake. +- `nix build github:<..>` uses the remote lockfile and update it +- `nix build /home/user/dwarffs` uses the local lockfile, updates it and writes the result to file +- `nix flake update ` recreates the lockfile from scratch and writes it to file +- `--no-registries` makes the command pure, also when fetching dependencies +- `--no-save-lock-file`: Several commands will update the lockfile (e.g. `nix + build`). This flag prevents the updated lockfile to be written to file. +- `--recreate-lock-file` makes prevents the current lockfile from being used ## `outputs` -The flake attribute `outputs` is a function that takes an argument -named `deps` and returns a (mostly) arbitrary attrset of values. Some -of the standard result attributes: +The function argument `deps` is an attrset containing all dependencies listed +in `requires` and `nonFlakeRequires` as well as `path` (for the flake's source +code) and an attribute `meta` with: +- `description` +- `commitHash` (not for tarball flakes): The Git commit hash. +- `date`: The timestamp of the most recent commit (for Git repos), or of the + most recently modified file (for tarballs) +- `revCount` (for Git flakes, but not GitHub flakes): The number of ancestors + of the revision. Useful for generating version strings. -* `packages`: A set of installable derivations used by the `nix` - command. That is, commands such as `nix install` ignore all other - flake attributes. +The flake attribute `outputs` is a function that takes an argument named `deps` +and returns an attribute set. Some of the members of this set have protected +names: + +* `packages`: A set of installable derivations used by the `nix` command. That + is, commands such as `nix install` ignore all other flake attributes. It + cannot be a nested set. * `hydraJobs`: Used by Hydra. @@ -329,213 +309,155 @@ of the standard result attributes: we need to avoid a situation where `nixos-rebuild` needs to fetch its own `nixpkgs` just to do `evalModules`.) -* `devShell`: A specification of a development environment in some TBD - format. +* `devShell`: A derivation to create a development environment -The function argument `flakes` is an attrset that contains an -attribute for each dependency specified in `inputs`. (Should it -contain transitive dependencies? Probably not.) Each attribute is an -attrset containing the `outputs` of the dependency, in addition to -the following attributes: - -* `path`: The path to the flake's source code. Useful when you want to - use non-Nix artifacts from the flake, or if you want to *store* the - source code of the dependency in a derivation. (For example, we - could store the sources of all flake dependencies in a NixOS system - configuration, as a generalization of - `system.copySystemConfiguration`.) - -* `meta`: An attrset containing the following: - - * `description` - - * `commitHash` (or `rev`?) (not for tarball flakes): The Git commit - hash. - - * `date`: The timestamp of the most recent commit (for Git - repositories), or the timestamp of the most recently modified file - (for tarballs). - - * `revCount` (for Git flakes, but not GitHub flakes): The number of - ancestors of the revision. Useful for generating version strings. - - -## Non-flake dependencies - -It may be useful to pull in repositories that are not flakes -(i.e. don't contain a `flake.nix`). This could be done in two ways: - -* Allow flakes not to have a `flake.nix` file, in which case it's a - flake with no inputs and no outputs. The downside of this - approach is that we can't detect accidental use of a non-flake - repository. (Also, we need to conjure up an identifier somehow.) - -* Add a flake attribute to specifiy non-flake dependencies, e.g. - - > nonFlakeInputs.foobar = github:foo/bar; +* `self`: The result of the flake's output which is passed to itself + Example: `self.outputs.foo` works. ## Flake registry -The flake registry maps flake IDs to flake references (where the -latter cannot be another indirection, i.e. it must not be a -`flake:` reference). - -The default registry is kept at -`https://nixos.org/flake-registry.json`. It looks like this: +A flake registry is a JSON file mapping flake references to flake references. +The default/global registry is kept at +`https://github.com/NixOS/flake-registry/blob/master/flake-registry.json` and +looks like this: ```json { - "version": 1, "flakes": { "dwarffs": { "uri": "github:edolstra/dwarffs/flake" }, + "nix": { + "uri": "github:NixOS/nix/flakes" + }, "nixpkgs": { - "uri": "github:NixOS/nixpkgs/release-18.09" + "uri": "github:edolstra/nixpkgs/release-19.03" + }, + "hydra": { + "uri": "github:NixOS/hydra/flake" + }, + "patchelf": { + "uri": "github:NixOS/patchelf" } - } + }, + "version": 1 } ``` -Nix automatically (re)downloads the registry. The downloaded file is a -GC root so the registry remains available if nixos.org is unreachable. -TBD: when to redownload? +Nix automatically (re)downloads this file whenever you have network access. The +downloaded file is a GC root so the registry remains available if nixos.org is +unreachable. + +In addition to a global registry, there is also a user registry stored in +`~/.config/nix/registry.json`. ## Nix UI -Commands for registry / user flake configuration: +There is a list of new commands added to the `nix` CLI: -* `nix flake list`: Show all flakes in the registry. +* `nix flake list`: Show all flakes in the registry -* `nix flake add `: Add or override a flake to/in the - user's flake configuration (`~/.config/nix/flakes.nix`). For - example, `nix flake add nixpkgs/nixos-18.03` overrides the `nixpkgs` - flake to use the `nixos-18.03` branch. There should also be a way to - add multiple branches/revisions of the same flake by giving them a - different ID, e.g. `nix flake add --id nixpkgs-ancient - nixpkgs/nixos-16.03`). +* `nix flake add `: Add or override a flake + to/in the user flake registry. -* `nix flake remove `: Remove a flake from the user's flake - configuration. Any flake with the same ID in the registry remains - available. +* `nix flake remove `: Remove a FlakeRef from the user flake + registry. -* `nix flake lock `: Lock a flake. For example, `nix flake - lock nixpkgs` pins `nixpkgs` to the current revision. +* `nix flake pin `: Look up to which immutable FlakeRef the + alias FlakeRef maps to currently, and store that map in the user registry. + Example: `nix flake pin github:NixOS/nixpkgs` will create an entry + `github:NixOS/nixpkgs -> + github:NixOS/nixpkgs/444f22ca892a873f76acd88d5d55bdc24ed08757`. -Commands for creating/modifying a flake: +* `nix flake init`: Create a `flake.nix` in the current directory -* `nix flake init`: Create a `flake.nix` in the current directory. - -* `nix flake update`: Update the lock file for the `flake.nix` in the - current directory. In most cases, this should be done - automatically. (E.g. `nix build` should automatically update the - lock file is a new dependency is added to `flake.nix`.) +* `nix flake update`: Recreate the lock file from scratch, from the `flake.nix`. * `nix flake check`: Do some checks on the flake, e.g. check that all `packages` are really packages. -* `nix flake clone`: Do a Git clone of the flake repository. This is a - convenience to easily start hacking on a flake. E.g. `nix flake - clone dwarffs` clones the `dwarffs` GitHub repository to `./dwarffs`. - -TODO: maybe the first set of commands should have a different name -from the second set. +* `nix flake clone`: `git clone` the flake repo Flags / configuration options: -* `--flakes (=)*`: add/override some flakes. +* `--flakes (=)*`: add/override some + FlakeRef -* (In `nix`) `--flake `: set the specified flake as the - installation source. E.g. `nix build --flake ./my-nixpkgs hello`. +* `--flake `: set the specified flake as the installation source + E.g. `nix build --flake ./my-nixpkgs hello`. -The default installation source in `nix` is the `packages` from all -flakes in the registry, that is: +The default installation source in `nix` is the `packages` from all flakes in +the registry, that is: ``` builtins.mapAttrs (flakeName: flakeInfo: (getFlake flakeInfo.uri).${flakeName}.outputs.packages or {}) builtins.flakeRegistry ``` -(where `builtins.flakeRegistry` is the global registry with user -overrides applied, and `builtins.getFlake` downloads a flake and -resolves its dependencies.) - -It may be nice to extend the default installation source with the -`packages` from the flake in the current directory, so that - -> nix build hello - -does something similar to the old - -> nix-build -A hello - -Specifically, it builds `packages.hello` from the flake in the current -directory. Of course, this creates some ambiguity if there is a flake -in the registry named `hello`. - -Maybe the command - -> nix dev-shell - -should do something like use `outputs.devShell` to initialize the -shell, but probably we should ditch `nix shell` / `nix-shell` for -direnv. +where `builtins.flakeRegistry` is the global registry with user overrides +applied, and `builtins.getFlake` downloads a flake and resolves its +dependencies. ## Pure evaluation and caching -Flake evaluation should be done in pure mode. Thus: +Flake evaluation is done in pure mode. Thus: -* Flakes cannot do `NIX_PATH` lookups via the `<...>` syntax. +* Flakes cannot use `NIX_PATH` via the `<...>` syntax. -* They can't read random stuff from non-flake directories, such as +* Flakes cannot read random stuff from non-flake directories, such as `~/.nix/config.nix` or overlays. -This enables aggressive caching or precomputation of Nixpkgs package -sets. For example, for a particular Nixpkgs flake closure (as -identified by, say, a hash of the fully-qualified flake references -after dependency resolution) and system type, an attribute like -`packages.hello` should always evaluate to the same derivation. So we -can: +This enables aggressive caching or precomputation of Nixpkgs package sets. For +example, for a particular Nixpkgs flake closure (as identified by, say, a hash +of the fully-qualified flake references after dependency resolution) and system +type, an attribute like `packages.hello` should always evaluate to the same +derivation. So we can: -* Keep a local evaluation cache (say `~/.cache/nix/eval.sqlite`) +* Keep a local evaluation cache (say `~/.cache/nix/eval-cache-v1.sqlite`) mapping `() -> (, , )`. -* Download a precomputed cache - (e.g. `https://releases.nixos.org/eval/.sqlite`). So - a command like `nix search` could avoid evaluating Nixpkgs entirely. +* Download a precomputed cache, e.g. + `https://releases.nixos.org/eval/.sqlite`. So a command + like `nix search` could avoid evaluating Nixpkgs entirely. -Of course, this doesn't allow overlays. With pure evaluation, the only -way to have these is to define a top-level flake that depends on the -Nixpkgs flake and somehow passes in a set of overlays. - -TODO: in pure mode we have to pass the system type explicitly! +Of course, this doesn't allow overlays. With pure evaluation, the only way to +have these is to define a top-level flake that depends on the Nixpkgs flake and +somehow passes in a set of overlays. ## Hydra jobset dependencies -Hydra can use the flake dependency resolution mechanism to fetch -dependencies. This allows us to get rid of jobset configuration in the -web interface: a jobset only requires a flake reference. That is, *a -jobset is a flake*. Hydra then just builds the `hydraJobs` attrset -`provide`d by the flake. (It omitted, maybe it can build `packages`.) +Hydra can use the flake dependency resolution mechanism to fetch dependencies. +This allows us to get rid of jobset configuration in the web interface: a +jobset only requires a flake reference. That is, a jobset *is* a flake. Hydra +then just builds the `hydraJobs` attrset ## NixOS system configuration -NixOS currently contains a lot of modules that really should be moved -into their own repositories. For example, it contains a Hydra module -that duplicates the one in the Hydra repository. Also, we want -reproducible evaluation for NixOS system configurations. So NixOS -system configurations should be stored as flakes in (local) Git -repositories. +NixOS currently contains a lot of modules that really should be moved into +their own repositories. For example, it contains a Hydra module that duplicates +the one in the Hydra repository. Also, we want reproducible evaluation for +NixOS system configurations. So NixOS system configurations should be stored as +flakes in (local) Git repositories. `my-system/flake.nix`: - ```nix { + name = "my-system"; + + epoch = 201906; + + inputs = + [ "nixpkgs/nixos-18.09" + "dwarffs" + "hydra" + ... lots of other module flakes ... + ]; + outputs = flakes: { nixosSystems.default = flakes.nixpkgs.lib.evalModules { @@ -549,13 +471,6 @@ repositories. ]; }; }; - - inputs = - [ "nixpkgs/nixos-18.09" - "dwarffs" - "hydra" - ... lots of other module flakes ... - ]; } ``` @@ -563,5 +478,5 @@ We can then build the system: ``` nixos-rebuild switch --flake ~/my-system ``` -This performs dependency resolution starting at `~/my-system/flake.nix` -and builds the `system` attribute in `nixosSystems.default`. +This performs dependency resolution starting at `~/my-system/flake.nix` and +builds the `system` attribute in `nixosSystems.default`. diff --git a/src/nix/flake.cc b/src/nix/flake.cc index 8d6716391..b673ca73e 100644 --- a/src/nix/flake.cc +++ b/src/nix/flake.cc @@ -128,14 +128,6 @@ static void printNonFlakeInfo(const NonFlake & nonFlake) printSourceInfo(nonFlake.sourceInfo); } -static nlohmann::json nonFlakeToJson(const NonFlake & nonFlake) -{ - nlohmann::json j; - j["id"] = nonFlake.alias; - sourceInfoToJson(nonFlake.sourceInfo, j); - return j; -} - // FIXME: merge info CmdFlakeInfo? struct CmdFlakeDeps : FlakeCommand { From a0de58f471c9087d8e6cc60a6078f9940a125b15 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Tue, 18 Jun 2019 16:01:35 +0200 Subject: [PATCH 218/634] Make subcommand construction in MultiCommand lazy --- src/libutil/args.cc | 15 ++++---- src/libutil/args.hh | 11 ++++-- src/nix/add-to-store.cc | 7 +--- src/nix/build.cc | 7 +--- src/nix/cat.cc | 14 +------ src/nix/command.cc | 2 +- src/nix/command.hh | 15 ++++++-- src/nix/copy.cc | 7 +--- src/nix/doctor.cc | 7 +--- src/nix/dump-path.cc | 7 +--- src/nix/edit.cc | 7 +--- src/nix/eval.cc | 7 +--- src/nix/flake.cc | 79 ++++++-------------------------------- src/nix/hash.cc | 26 +++---------- src/nix/log.cc | 11 +----- src/nix/ls.cc | 14 +------ src/nix/optimise-store.cc | 11 +----- src/nix/path-info.cc | 7 +--- src/nix/ping-store.cc | 7 +--- src/nix/repl.cc | 4 +- src/nix/run.cc | 14 +------ src/nix/search.cc | 7 +--- src/nix/shell.cc | 16 +------- src/nix/show-config.cc | 11 +----- src/nix/show-derivation.cc | 7 +--- src/nix/sigs.cc | 14 +------ src/nix/upgrade-nix.cc | 7 +--- src/nix/verify.cc | 7 +--- src/nix/why-depends.cc | 7 +--- 29 files changed, 73 insertions(+), 282 deletions(-) diff --git a/src/libutil/args.cc b/src/libutil/args.cc index 2837dacc9..217495c26 100644 --- a/src/libutil/args.cc +++ b/src/libutil/args.cc @@ -215,17 +215,15 @@ void Command::printHelp(const string & programName, std::ostream & out) } } -MultiCommand::MultiCommand(const std::vector> & _commands) +MultiCommand::MultiCommand(const Commands & commands) + : commands(commands) { - for (auto & command : _commands) - commands.emplace(command->name(), command); - expectedArgs.push_back(ExpectedArg{"command", 1, true, [=](std::vector ss) { assert(!command); auto i = commands.find(ss[0]); if (i == commands.end()) throw UsageError("'%s' is not a recognised command", ss[0]); - command = i->second; + command = i->second(); }}); } @@ -246,10 +244,11 @@ void MultiCommand::printHelp(const string & programName, std::ostream & out) out << "Available commands:\n"; Table2 table; - for (auto & command : commands) { - auto descr = command.second->description(); + for (auto & i : commands) { + auto command = i.second(); + auto descr = command->description(); if (!descr.empty()) - table.push_back(std::make_pair(command.second->name(), descr)); + table.push_back(std::make_pair(command->name(), descr)); } printTable(out, table); } diff --git a/src/libutil/args.hh b/src/libutil/args.hh index bf69bf4b6..8497eaf71 100644 --- a/src/libutil/args.hh +++ b/src/libutil/args.hh @@ -192,7 +192,12 @@ public: run() method. */ struct Command : virtual Args { - virtual std::string name() = 0; +private: + std::string _name; + +public: + std::string name() { return _name; } + virtual void prepare() { }; virtual void run() = 0; @@ -209,7 +214,7 @@ struct Command : virtual Args void printHelp(const string & programName, std::ostream & out) override; }; -typedef std::map> Commands; +typedef std::map()>> Commands; /* An argument parser that supports multiple subcommands, i.e. ‘ ’. */ @@ -220,7 +225,7 @@ public: std::shared_ptr command; - MultiCommand(const std::vector> & commands); + MultiCommand(const Commands & commands); void printHelp(const string & programName, std::ostream & out) override; diff --git a/src/nix/add-to-store.cc b/src/nix/add-to-store.cc index e86b96e3f..296b2c7e4 100644 --- a/src/nix/add-to-store.cc +++ b/src/nix/add-to-store.cc @@ -22,11 +22,6 @@ struct CmdAddToStore : MixDryRun, StoreCommand .dest(&namePart); } - std::string name() override - { - return "add-to-store"; - } - std::string description() override { return "add a path to the Nix store"; @@ -58,4 +53,4 @@ struct CmdAddToStore : MixDryRun, StoreCommand } }; -static RegisterCommand r1(make_ref()); +static auto r1 = registerCommand("add-to-store"); diff --git a/src/nix/build.cc b/src/nix/build.cc index c08ec0e62..d8ce8cc80 100644 --- a/src/nix/build.cc +++ b/src/nix/build.cc @@ -25,11 +25,6 @@ struct CmdBuild : MixDryRun, InstallablesCommand .set(&outLink, Path("")); } - std::string name() override - { - return "build"; - } - std::string description() override { return "build a derivation or fetch a store path"; @@ -72,4 +67,4 @@ struct CmdBuild : MixDryRun, InstallablesCommand } }; -static RegisterCommand r1(make_ref()); +static auto r1 = registerCommand("build"); diff --git a/src/nix/cat.cc b/src/nix/cat.cc index a35f640d8..851f90abd 100644 --- a/src/nix/cat.cc +++ b/src/nix/cat.cc @@ -28,11 +28,6 @@ struct CmdCatStore : StoreCommand, MixCat expectArg("path", &path); } - std::string name() override - { - return "cat-store"; - } - std::string description() override { return "print the contents of a store file on stdout"; @@ -54,11 +49,6 @@ struct CmdCatNar : StoreCommand, MixCat expectArg("path", &path); } - std::string name() override - { - return "cat-nar"; - } - std::string description() override { return "print the contents of a file inside a NAR file"; @@ -70,5 +60,5 @@ struct CmdCatNar : StoreCommand, MixCat } }; -static RegisterCommand r1(make_ref()); -static RegisterCommand r2(make_ref()); +static auto r1 = registerCommand("cat-store"); +static auto r2 = registerCommand("cat-nar"); diff --git a/src/nix/command.cc b/src/nix/command.cc index 5967ab36c..89fa0cba4 100644 --- a/src/nix/command.cc +++ b/src/nix/command.cc @@ -4,7 +4,7 @@ namespace nix { -std::vector> * RegisterCommand::commands = 0; +Commands * RegisterCommand::commands = nullptr; StoreCommand::StoreCommand() { diff --git a/src/nix/command.hh b/src/nix/command.hh index 0ffbe46f5..59c6f8578 100644 --- a/src/nix/command.hh +++ b/src/nix/command.hh @@ -190,15 +190,22 @@ struct StorePathCommand : public InstallablesCommand /* A helper class for registering commands globally. */ struct RegisterCommand { - static std::vector> * commands; + static Commands * commands; - RegisterCommand(ref command) + RegisterCommand(const std::string & name, + std::function()> command) { - if (!commands) commands = new std::vector>; - commands->push_back(command); + if (!commands) commands = new Commands; + commands->emplace(name, command); } }; +template +static RegisterCommand registerCommand(const std::string & name) +{ + return RegisterCommand(name, [](){ return make_ref(); }); +} + Buildables build(ref store, RealiseMode mode, std::vector> installables); diff --git a/src/nix/copy.cc b/src/nix/copy.cc index 96bd453d8..5f9051f40 100644 --- a/src/nix/copy.cc +++ b/src/nix/copy.cc @@ -42,11 +42,6 @@ struct CmdCopy : StorePathsCommand .set(&substitute, Substitute); } - std::string name() override - { - return "copy"; - } - std::string description() override { return "copy paths between Nix stores"; @@ -97,4 +92,4 @@ struct CmdCopy : StorePathsCommand } }; -static RegisterCommand r1(make_ref()); +static auto r1 = registerCommand("copy"); diff --git a/src/nix/doctor.cc b/src/nix/doctor.cc index 7b5444619..f2cf04758 100644 --- a/src/nix/doctor.cc +++ b/src/nix/doctor.cc @@ -20,11 +20,6 @@ struct CmdDoctor : StoreCommand { bool success = true; - std::string name() override - { - return "doctor"; - } - std::string description() override { return "check your system for potential problems"; @@ -121,4 +116,4 @@ struct CmdDoctor : StoreCommand } }; -static RegisterCommand r1(make_ref()); +static auto r1 = registerCommand("doctore"); diff --git a/src/nix/dump-path.cc b/src/nix/dump-path.cc index f411c0cb7..90f1552d9 100644 --- a/src/nix/dump-path.cc +++ b/src/nix/dump-path.cc @@ -5,11 +5,6 @@ using namespace nix; struct CmdDumpPath : StorePathCommand { - std::string name() override - { - return "dump-path"; - } - std::string description() override { return "dump a store path to stdout (in NAR format)"; @@ -33,4 +28,4 @@ struct CmdDumpPath : StorePathCommand } }; -static RegisterCommand r1(make_ref()); +static auto r1 = registerCommand("dump-path"); diff --git a/src/nix/edit.cc b/src/nix/edit.cc index c9671f76d..c62b35c46 100644 --- a/src/nix/edit.cc +++ b/src/nix/edit.cc @@ -10,11 +10,6 @@ using namespace nix; struct CmdEdit : InstallableCommand { - std::string name() override - { - return "edit"; - } - std::string description() override { return "open the Nix expression of a Nix package in $EDITOR"; @@ -78,4 +73,4 @@ struct CmdEdit : InstallableCommand } }; -static RegisterCommand r1(make_ref()); +static auto r1 = registerCommand("edit"); diff --git a/src/nix/eval.cc b/src/nix/eval.cc index b7058361c..524bac304 100644 --- a/src/nix/eval.cc +++ b/src/nix/eval.cc @@ -18,11 +18,6 @@ struct CmdEval : MixJSON, InstallableCommand mkFlag(0, "raw", "print strings unquoted", &raw); } - std::string name() override - { - return "eval"; - } - std::string description() override { return "evaluate a Nix expression"; @@ -74,4 +69,4 @@ struct CmdEval : MixJSON, InstallableCommand } }; -static RegisterCommand r1(make_ref()); +static auto r1 = registerCommand("eval"); diff --git a/src/nix/flake.cc b/src/nix/flake.cc index 6fc56827f..91c6b4276 100644 --- a/src/nix/flake.cc +++ b/src/nix/flake.cc @@ -49,11 +49,6 @@ public: struct CmdFlakeList : EvalCommand { - std::string name() override - { - return "list"; - } - std::string description() override { return "list available Nix flakes"; @@ -133,11 +128,6 @@ static void printNonFlakeInfo(const NonFlake & nonFlake) // FIXME: merge info CmdFlakeInfo? struct CmdFlakeDeps : FlakeCommand { - std::string name() override - { - return "deps"; - } - std::string description() override { return "list informaton about dependencies"; @@ -171,11 +161,6 @@ struct CmdFlakeDeps : FlakeCommand struct CmdFlakeUpdate : FlakeCommand { - std::string name() override - { - return "update"; - } - std::string description() override { return "update flake lock file"; @@ -209,11 +194,6 @@ static void enumerateOutputs(EvalState & state, Value & vFlake, struct CmdFlakeInfo : FlakeCommand, MixJSON { - std::string name() override - { - return "info"; - } - std::string description() override { return "list info about a given flake"; @@ -269,11 +249,6 @@ struct CmdFlakeCheck : FlakeCommand, MixJSON .set(&build, false); } - std::string name() override - { - return "check"; - } - std::string description() override { return "check whether the flake evaluates and run its tests"; @@ -383,11 +358,6 @@ struct CmdFlakeAdd : MixEvalArgs, Command FlakeUri alias; FlakeUri uri; - std::string name() override - { - return "add"; - } - std::string description() override { return "upsert flake in user flake registry"; @@ -414,11 +384,6 @@ struct CmdFlakeRemove : virtual Args, MixEvalArgs, Command { FlakeUri alias; - std::string name() override - { - return "remove"; - } - std::string description() override { return "remove flake from user flake registry"; @@ -442,11 +407,6 @@ struct CmdFlakePin : virtual Args, EvalCommand { FlakeUri alias; - std::string name() override - { - return "pin"; - } - std::string description() override { return "pin flake require in user flake registry"; @@ -482,11 +442,6 @@ struct CmdFlakePin : virtual Args, EvalCommand struct CmdFlakeInit : virtual Args, Command { - std::string name() override - { - return "init"; - } - std::string description() override { return "create a skeleton 'flake.nix' file in the current directory"; @@ -514,11 +469,6 @@ struct CmdFlakeClone : FlakeCommand { Path destDir; - std::string name() override - { - return "clone"; - } - std::string description() override { return "clone flake repository"; @@ -541,25 +491,20 @@ struct CmdFlakeClone : FlakeCommand struct CmdFlake : virtual MultiCommand, virtual Command { CmdFlake() - : MultiCommand({make_ref() - , make_ref() - , make_ref() - , make_ref() - //, make_ref() - , make_ref() - , make_ref() - , make_ref() - , make_ref() - , make_ref() - }) + : MultiCommand({ + {"list", []() { return make_ref(); }}, + {"update", []() { return make_ref(); }}, + {"info", []() { return make_ref(); }}, + {"check", []() { return make_ref(); }}, + {"add", []() { return make_ref(); }}, + {"remove", []() { return make_ref(); }}, + {"pin", []() { return make_ref(); }}, + {"init", []() { return make_ref(); }}, + {"clone", []() { return make_ref(); }}, + }) { } - std::string name() override - { - return "flake"; - } - std::string description() override { return "manage Nix flakes"; @@ -578,4 +523,4 @@ struct CmdFlake : virtual MultiCommand, virtual Command } }; -static RegisterCommand r1(make_ref()); +static auto r1 = registerCommand("flake"); diff --git a/src/nix/hash.cc b/src/nix/hash.cc index af4105e28..1b3ba729e 100644 --- a/src/nix/hash.cc +++ b/src/nix/hash.cc @@ -26,11 +26,6 @@ struct CmdHash : Command expectArgs("paths", &paths); } - std::string name() override - { - return mode == mFile ? "hash-file" : "hash-path"; - } - std::string description() override { return mode == mFile @@ -49,8 +44,8 @@ struct CmdHash : Command } }; -static RegisterCommand r1(make_ref(CmdHash::mFile)); -static RegisterCommand r2(make_ref(CmdHash::mPath)); +static RegisterCommand r1("hash-file", [](){ return make_ref(CmdHash::mFile); }); +static RegisterCommand r2("hash-path", [](){ return make_ref(CmdHash::mPath); }); struct CmdToBase : Command { @@ -66,15 +61,6 @@ struct CmdToBase : Command expectArgs("strings", &args); } - std::string name() override - { - return - base == Base16 ? "to-base16" : - base == Base32 ? "to-base32" : - base == Base64 ? "to-base64" : - "to-sri"; - } - std::string description() override { return fmt("convert a hash to %s representation", @@ -91,10 +77,10 @@ struct CmdToBase : Command } }; -static RegisterCommand r3(make_ref(Base16)); -static RegisterCommand r4(make_ref(Base32)); -static RegisterCommand r5(make_ref(Base64)); -static RegisterCommand r6(make_ref(SRI)); +static RegisterCommand r3("to-base16", [](){ return make_ref(Base16); }); +static RegisterCommand r4("to-base32", [](){ return make_ref(Base32); }); +static RegisterCommand r5("to-base64", [](){ return make_ref(Base64); }); +static RegisterCommand r6("to-sri", [](){ return make_ref(SRI); }); /* Legacy nix-hash command. */ static int compatNixHash(int argc, char * * argv) diff --git a/src/nix/log.cc b/src/nix/log.cc index f07ec4e93..122a3d690 100644 --- a/src/nix/log.cc +++ b/src/nix/log.cc @@ -8,15 +8,6 @@ using namespace nix; struct CmdLog : InstallableCommand { - CmdLog() - { - } - - std::string name() override - { - return "log"; - } - std::string description() override { return "show the build log of the specified packages or paths, if available"; @@ -68,4 +59,4 @@ struct CmdLog : InstallableCommand } }; -static RegisterCommand r1(make_ref()); +static auto r1 = registerCommand("log"); diff --git a/src/nix/ls.cc b/src/nix/ls.cc index d089be42f..9408cc9da 100644 --- a/src/nix/ls.cc +++ b/src/nix/ls.cc @@ -100,11 +100,6 @@ struct CmdLsStore : StoreCommand, MixLs }; } - std::string name() override - { - return "ls-store"; - } - std::string description() override { return "show information about a store path"; @@ -136,11 +131,6 @@ struct CmdLsNar : Command, MixLs }; } - std::string name() override - { - return "ls-nar"; - } - std::string description() override { return "show information about the contents of a NAR file"; @@ -152,5 +142,5 @@ struct CmdLsNar : Command, MixLs } }; -static RegisterCommand r1(make_ref()); -static RegisterCommand r2(make_ref()); +static auto r1 = registerCommand("ls-store"); +static auto r2 = registerCommand("ls-nar"); diff --git a/src/nix/optimise-store.cc b/src/nix/optimise-store.cc index 725fb75a1..fed012b04 100644 --- a/src/nix/optimise-store.cc +++ b/src/nix/optimise-store.cc @@ -8,15 +8,6 @@ using namespace nix; struct CmdOptimiseStore : StoreCommand { - CmdOptimiseStore() - { - } - - std::string name() override - { - return "optimise-store"; - } - std::string description() override { return "replace identical files in the store by hard links"; @@ -38,4 +29,4 @@ struct CmdOptimiseStore : StoreCommand } }; -static RegisterCommand r1(make_ref()); +static auto r1 = registerCommand("optimise-store"); diff --git a/src/nix/path-info.cc b/src/nix/path-info.cc index dea5f0557..2cb718f12 100644 --- a/src/nix/path-info.cc +++ b/src/nix/path-info.cc @@ -24,11 +24,6 @@ struct CmdPathInfo : StorePathsCommand, MixJSON mkFlag(0, "sigs", "show signatures", &showSigs); } - std::string name() override - { - return "path-info"; - } - std::string description() override { return "query information about store paths"; @@ -130,4 +125,4 @@ struct CmdPathInfo : StorePathsCommand, MixJSON } }; -static RegisterCommand r1(make_ref()); +static auto r1 = registerCommand("path-info"); diff --git a/src/nix/ping-store.cc b/src/nix/ping-store.cc index 310942574..3a2e542a3 100644 --- a/src/nix/ping-store.cc +++ b/src/nix/ping-store.cc @@ -6,11 +6,6 @@ using namespace nix; struct CmdPingStore : StoreCommand { - std::string name() override - { - return "ping-store"; - } - std::string description() override { return "test whether a store can be opened"; @@ -32,4 +27,4 @@ struct CmdPingStore : StoreCommand } }; -static RegisterCommand r1(make_ref()); +static auto r1 = registerCommand("ping-store"); diff --git a/src/nix/repl.cc b/src/nix/repl.cc index d8f812149..ce17a2b9f 100644 --- a/src/nix/repl.cc +++ b/src/nix/repl.cc @@ -764,8 +764,6 @@ struct CmdRepl : StoreCommand, MixEvalArgs expectArgs("files", &files); } - std::string name() override { return "repl"; } - std::string description() override { return "start an interactive environment for evaluating Nix expressions"; @@ -779,6 +777,6 @@ struct CmdRepl : StoreCommand, MixEvalArgs } }; -static RegisterCommand r1(make_ref()); +static auto r1 = registerCommand("repl"); } diff --git a/src/nix/run.cc b/src/nix/run.cc index d30851d47..9c15b6749 100644 --- a/src/nix/run.cc +++ b/src/nix/run.cc @@ -99,11 +99,6 @@ struct CmdRun : InstallablesCommand, RunCommon .handler([&](std::vector ss) { unset.insert(ss.front()); }); } - std::string name() override - { - return "run"; - } - std::string description() override { return "run a shell in which the specified packages are available"; @@ -192,7 +187,7 @@ struct CmdRun : InstallablesCommand, RunCommon } }; -static RegisterCommand r1(make_ref()); +static auto r1 = registerCommand("run"); struct CmdApp : InstallableCommand, RunCommon { @@ -203,11 +198,6 @@ struct CmdApp : InstallableCommand, RunCommon expectArgs("args", &args); } - std::string name() override - { - return "app"; - } - std::string description() override { return "run a Nix application"; @@ -248,7 +238,7 @@ struct CmdApp : InstallableCommand, RunCommon } }; -static RegisterCommand r2(make_ref()); +static auto r2 = registerCommand("app"); void chrootHelper(int argc, char * * argv) { diff --git a/src/nix/search.cc b/src/nix/search.cc index 55f8d106a..70de717d1 100644 --- a/src/nix/search.cc +++ b/src/nix/search.cc @@ -52,11 +52,6 @@ struct CmdSearch : SourceExprCommand, MixJSON .handler([&]() { writeCache = false; useCache = false; }); } - std::string name() override - { - return "search"; - } - std::string description() override { return "query available packages"; @@ -282,4 +277,4 @@ struct CmdSearch : SourceExprCommand, MixJSON } }; -static RegisterCommand r1(make_ref()); +static auto r1 = registerCommand("search"); diff --git a/src/nix/shell.cc b/src/nix/shell.cc index 2ccad930f..f42947b7c 100644 --- a/src/nix/shell.cc +++ b/src/nix/shell.cc @@ -177,12 +177,6 @@ struct Common : InstallableCommand struct CmdDevShell : Common { - - std::string name() override - { - return "dev-shell"; - } - std::string description() override { return "run a bash shell that provides the build environment of a derivation"; @@ -240,12 +234,6 @@ struct CmdDevShell : Common struct CmdPrintDevEnv : Common { - - std::string name() override - { - return "print-dev-env"; - } - std::string description() override { return "print shell code that can be sourced by bash to reproduce the build environment of a derivation"; @@ -279,5 +267,5 @@ struct CmdPrintDevEnv : Common } }; -static RegisterCommand r1(make_ref()); -static RegisterCommand r2(make_ref()); +static auto r1 = registerCommand("print-dev-env"); +static auto r2 = registerCommand("dev-shell"); diff --git a/src/nix/show-config.cc b/src/nix/show-config.cc index 86638b50d..87544f937 100644 --- a/src/nix/show-config.cc +++ b/src/nix/show-config.cc @@ -8,15 +8,6 @@ using namespace nix; struct CmdShowConfig : Command, MixJSON { - CmdShowConfig() - { - } - - std::string name() override - { - return "show-config"; - } - std::string description() override { return "show the Nix configuration"; @@ -37,4 +28,4 @@ struct CmdShowConfig : Command, MixJSON } }; -static RegisterCommand r1(make_ref()); +static auto r1 = registerCommand("show-config"); diff --git a/src/nix/show-derivation.cc b/src/nix/show-derivation.cc index ee94fded3..6065adc4d 100644 --- a/src/nix/show-derivation.cc +++ b/src/nix/show-derivation.cc @@ -22,11 +22,6 @@ struct CmdShowDerivation : InstallablesCommand .set(&recursive, true); } - std::string name() override - { - return "show-derivation"; - } - std::string description() override { return "show the contents of a store derivation"; @@ -116,4 +111,4 @@ struct CmdShowDerivation : InstallablesCommand } }; -static RegisterCommand r1(make_ref()); +static auto r1 = registerCommand("show-derivation"); diff --git a/src/nix/sigs.cc b/src/nix/sigs.cc index b1825c412..23bc83ad0 100644 --- a/src/nix/sigs.cc +++ b/src/nix/sigs.cc @@ -22,11 +22,6 @@ struct CmdCopySigs : StorePathsCommand .handler([&](std::vector ss) { substituterUris.push_back(ss[0]); }); } - std::string name() override - { - return "copy-sigs"; - } - std::string description() override { return "copy path signatures from substituters (like binary caches)"; @@ -93,7 +88,7 @@ struct CmdCopySigs : StorePathsCommand } }; -static RegisterCommand r1(make_ref()); +static auto r1 = registerCommand("copy-sigs"); struct CmdSignPaths : StorePathsCommand { @@ -109,11 +104,6 @@ struct CmdSignPaths : StorePathsCommand .dest(&secretKeyFile); } - std::string name() override - { - return "sign-paths"; - } - std::string description() override { return "sign the specified paths"; @@ -146,4 +136,4 @@ struct CmdSignPaths : StorePathsCommand } }; -static RegisterCommand r3(make_ref()); +static auto r2 = registerCommand("sign-paths"); diff --git a/src/nix/upgrade-nix.cc b/src/nix/upgrade-nix.cc index 35c44a70c..13d8504a6 100644 --- a/src/nix/upgrade-nix.cc +++ b/src/nix/upgrade-nix.cc @@ -30,11 +30,6 @@ struct CmdUpgradeNix : MixDryRun, StoreCommand .dest(&storePathsUrl); } - std::string name() override - { - return "upgrade-nix"; - } - std::string description() override { return "upgrade Nix to the latest stable version"; @@ -157,4 +152,4 @@ struct CmdUpgradeNix : MixDryRun, StoreCommand } }; -static RegisterCommand r1(make_ref()); +static auto r1 = registerCommand("upgrade-nix"); diff --git a/src/nix/verify.cc b/src/nix/verify.cc index 7ef571561..f55766eda 100644 --- a/src/nix/verify.cc +++ b/src/nix/verify.cc @@ -29,11 +29,6 @@ struct CmdVerify : StorePathsCommand mkIntFlag('n', "sigs-needed", "require that each path has at least N valid signatures", &sigsNeeded); } - std::string name() override - { - return "verify"; - } - std::string description() override { return "verify the integrity of store paths"; @@ -175,4 +170,4 @@ struct CmdVerify : StorePathsCommand } }; -static RegisterCommand r1(make_ref()); +static auto r1 = registerCommand("verify"); diff --git a/src/nix/why-depends.cc b/src/nix/why-depends.cc index 32ba5a1ad..3d13a77e4 100644 --- a/src/nix/why-depends.cc +++ b/src/nix/why-depends.cc @@ -44,11 +44,6 @@ struct CmdWhyDepends : SourceExprCommand .set(&all, true); } - std::string name() override - { - return "why-depends"; - } - std::string description() override { return "show why a package has another package in its closure"; @@ -264,4 +259,4 @@ struct CmdWhyDepends : SourceExprCommand } }; -static RegisterCommand r1(make_ref()); +static auto r1 = registerCommand("why-depends"); From 59714a15e0b36c93a7c19b71344a9fccf7b9840d Mon Sep 17 00:00:00 2001 From: Nick Van den Broeck Date: Tue, 11 Jun 2019 13:09:06 +0200 Subject: [PATCH 219/634] FuzzyMatching works Fixes #2843 --- src/libexpr/flake/flake.cc | 1 + tests/flakes.sh | 56 +++++++++++++++++++++++++++++++++++++- 2 files changed, 56 insertions(+), 1 deletion(-) diff --git a/src/libexpr/flake/flake.cc b/src/libexpr/flake/flake.cc index 0018a0d07..30ab9a876 100644 --- a/src/libexpr/flake/flake.cc +++ b/src/libexpr/flake/flake.cc @@ -104,6 +104,7 @@ static FlakeRef lookupFlake(EvalState & state, const FlakeRef & flakeRef, const auto newRef = j->second; newRef.ref = flakeRef.ref; newRef.rev = flakeRef.rev; + newRef.subdir = flakeRef.subdir; return updateFlakeRef(state, newRef, registries, pastSearches); } } diff --git a/tests/flakes.sh b/tests/flakes.sh index c380b405b..85ba7c8c9 100644 --- a/tests/flakes.sh +++ b/tests/flakes.sh @@ -15,6 +15,7 @@ registry=$TEST_ROOT/registry.json flake1Dir=$TEST_ROOT/flake1 flake2Dir=$TEST_ROOT/flake2 flake3Dir=$TEST_ROOT/flake3 +flake4Dir=$TEST_ROOT/flake4 nonFlakeDir=$TEST_ROOT/nonFlake for repo in $flake1Dir $flake2Dir $flake3Dir $nonFlakeDir; do @@ -101,6 +102,12 @@ cat > $registry < $registry < $flake3Dir/flake.nix < \$out + ''; + }; + }; +} +EOF +git -C $flake3Dir add flake.nix +git -C $flake3Dir commit -m 'Remove packages.xyzzy' +git -C $flake3Dir checkout master + +# Test whether fuzzy-matching works for IsAlias +(! nix build -o $TEST_ROOT/result --flake-registry $registry flake4/removeXyzzy:xyzzy) + +# Test whether fuzzy-matching works for IsGit +(! nix build -o $TEST_ROOT/result --flake-registry $registry flake4/removeXyzzy:xyzzy) +nix build -o $TEST_ROOT/result --flake-registry $registry flake4/removeXyzzy:sth From e3552f2bcf246ea8f0dae0c076f1f67e86953746 Mon Sep 17 00:00:00 2001 From: Nick Van den Broeck Date: Wed, 19 Jun 2019 14:48:40 +0200 Subject: [PATCH 220/634] Added tests for the `nix flake` CLI --- tests/flakes.sh | 24 +++++++++++++++++++++++- 1 file changed, 23 insertions(+), 1 deletion(-) diff --git a/tests/flakes.sh b/tests/flakes.sh index 85ba7c8c9..0571c2a62 100644 --- a/tests/flakes.sh +++ b/tests/flakes.sh @@ -16,9 +16,10 @@ flake1Dir=$TEST_ROOT/flake1 flake2Dir=$TEST_ROOT/flake2 flake3Dir=$TEST_ROOT/flake3 flake4Dir=$TEST_ROOT/flake4 +flake7Dir=$TEST_ROOT/flake7 nonFlakeDir=$TEST_ROOT/nonFlake -for repo in $flake1Dir $flake2Dir $flake3Dir $nonFlakeDir; do +for repo in $flake1Dir $flake2Dir $flake3Dir $flake7Dir $nonFlakeDir; do rm -rf $repo $repo.tmp mkdir $repo git -C $repo init @@ -76,6 +77,10 @@ cat > $flake3Dir/flake.nix < Date: Wed, 19 Jun 2019 23:37:40 +0200 Subject: [PATCH 221/634] Initialize Command::_name --- src/libutil/args.cc | 2 ++ src/libutil/args.hh | 3 +++ 2 files changed, 5 insertions(+) diff --git a/src/libutil/args.cc b/src/libutil/args.cc index 217495c26..ba15ea571 100644 --- a/src/libutil/args.cc +++ b/src/libutil/args.cc @@ -224,6 +224,7 @@ MultiCommand::MultiCommand(const Commands & commands) if (i == commands.end()) throw UsageError("'%s' is not a recognised command", ss[0]); command = i->second(); + command->_name = ss[0]; }}); } @@ -246,6 +247,7 @@ void MultiCommand::printHelp(const string & programName, std::ostream & out) Table2 table; for (auto & i : commands) { auto command = i.second(); + command->_name = i.first; auto descr = command->description(); if (!descr.empty()) table.push_back(std::make_pair(command->name(), descr)); diff --git a/src/libutil/args.hh b/src/libutil/args.hh index 8497eaf71..a083c4ce8 100644 --- a/src/libutil/args.hh +++ b/src/libutil/args.hh @@ -195,7 +195,10 @@ struct Command : virtual Args private: std::string _name; + friend class MultiCommand; + public: + std::string name() { return _name; } virtual void prepare() { }; From 29ccb2e9697ee2184012dd13854e487928ae4441 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Fri, 21 Jun 2019 12:49:18 +0200 Subject: [PATCH 222/634] Fix 32-bit overflow with --no-net --no-net causes tarballTtl to be set to the largest 32-bit integer, which causes comparison like 'time + tarballTtl < other_time' to fail on 32-bit systems. So cast them to 64-bit first. https://hydra.nixos.org/build/95076624 --- src/libexpr/primops/fetchGit.cc | 2 +- src/libexpr/primops/fetchMercurial.cc | 13 +++++-------- src/libstore/download.cc | 2 +- 3 files changed, 7 insertions(+), 10 deletions(-) diff --git a/src/libexpr/primops/fetchGit.cc b/src/libexpr/primops/fetchGit.cc index 10f6b6f72..f0f5b2a51 100644 --- a/src/libexpr/primops/fetchGit.cc +++ b/src/libexpr/primops/fetchGit.cc @@ -135,7 +135,7 @@ GitInfo exportGit(ref store, std::string uri, git fetch to update the local ref to the remote ref. */ struct stat st; doFetch = stat(localRefFile.c_str(), &st) != 0 || - st.st_mtime + settings.tarballTtl <= now; + (uint64_t) st.st_mtime + settings.tarballTtl <= (uint64_t) now; } if (doFetch) { diff --git a/src/libexpr/primops/fetchMercurial.cc b/src/libexpr/primops/fetchMercurial.cc index 596047ce3..c791443c3 100644 --- a/src/libexpr/primops/fetchMercurial.cc +++ b/src/libexpr/primops/fetchMercurial.cc @@ -77,7 +77,7 @@ HgInfo exportMercurial(ref store, const std::string & uri, time_t now = time(0); struct stat st; if (stat(stampFile.c_str(), &st) != 0 || - st.st_mtime + settings.tarballTtl <= now) + (uint64_t) st.st_mtime + settings.tarballTtl <= (uint64_t) now) { /* Except that if this is a commit hash that we already have, we don't have to pull again. */ @@ -93,17 +93,14 @@ HgInfo exportMercurial(ref store, const std::string & uri, try { runProgram("hg", true, { "pull", "-R", cacheDir, "--", uri }); } - catch (ExecError & e){ + catch (ExecError & e) { string transJournal = cacheDir + "/.hg/store/journal"; /* hg throws "abandoned transaction" error only if this file exists */ - if (pathExists(transJournal)) - { + if (pathExists(transJournal)) { runProgram("hg", true, { "recover", "-R", cacheDir }); runProgram("hg", true, { "pull", "-R", cacheDir, "--", uri }); - } - else - { - throw ExecError(e.status, fmt("program hg '%1%' ", statusToString(e.status))); + } else { + throw ExecError(e.status, fmt("'hg pull' %s", statusToString(e.status))); } } } else { diff --git a/src/libstore/download.cc b/src/libstore/download.cc index 5c1705e2f..571c194ec 100644 --- a/src/libstore/download.cc +++ b/src/libstore/download.cc @@ -822,7 +822,7 @@ CachedDownloadResult Downloader::downloadCached( auto ss = tokenizeString>(readFile(dataFile), "\n"); if (ss.size() >= 3 && ss[0] == url) { time_t lastChecked; - if (string2Int(ss[2], lastChecked) && lastChecked + request.ttl >= time(0)) { + if (string2Int(ss[2], lastChecked) && (uint64_t) lastChecked + request.ttl >= (uint64_t) time(0)) { skip = true; result.effectiveUri = request.uri; result.etag = ss[1]; From 4f6a7c86218c748df2a8700eca8ba9edbedd9ca5 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Fri, 21 Jun 2019 15:17:31 +0200 Subject: [PATCH 223/634] shell.nix: Unset $PYTHONPATH It breaks gdb. --- shell.nix | 1 + 1 file changed, 1 insertion(+) diff --git a/shell.nix b/shell.nix index dd2950b29..34abf05ea 100644 --- a/shell.nix +++ b/shell.nix @@ -23,5 +23,6 @@ with import ./release-common.nix { inherit pkgs; }; configureFlags+=" --prefix=$prefix" PKG_CONFIG_PATH=$prefix/lib/pkgconfig:$PKG_CONFIG_PATH PATH=$prefix/bin:$PATH + unset PYTHONPATH ''; } From d132d057a85aa1812c4133feed6c9b34ca70671d Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Fri, 21 Jun 2019 15:29:05 +0200 Subject: [PATCH 224/634] Handle store symlinks in flake directories E.g. 'nix path-info ./result' inside a flake directory now works again. --- src/libexpr/flake/flakeref.cc | 8 ++++++++ src/libstore/store-api.cc | 2 +- src/libstore/store-api.hh | 1 + src/nix/installables.cc | 22 +++++++++++----------- tests/flakes.sh | 4 ++++ 5 files changed, 25 insertions(+), 12 deletions(-) diff --git a/src/libexpr/flake/flakeref.cc b/src/libexpr/flake/flakeref.cc index 24af09124..7631cd53a 100644 --- a/src/libexpr/flake/flakeref.cc +++ b/src/libexpr/flake/flakeref.cc @@ -143,6 +143,14 @@ FlakeRef::FlakeRef(const std::string & uri_, bool allowRelative) IsPath d; if (allowRelative) { d.path = absPath(uri); + try { + if (!S_ISDIR(lstat(d.path).st_mode)) + throw BadFlakeRef("path '%s' is not a flake (sub)directory"); + } catch (SysError & e) { + if (e.errNo == ENOENT || e.errNo == EISDIR) + throw BadFlakeRef("flake '%s' does not exist"); + throw; + } while (true) { if (pathExists(d.path + "/.git")) break; subdir = baseNameOf(d.path) + (subdir.empty() ? "" : "/" + subdir); diff --git a/src/libstore/store-api.cc b/src/libstore/store-api.cc index 8fabeeea4..c5a771030 100644 --- a/src/libstore/store-api.cc +++ b/src/libstore/store-api.cc @@ -55,7 +55,7 @@ Path Store::followLinksToStore(const Path & _path) const path = absPath(target, dirOf(path)); } if (!isInStore(path)) - throw Error(format("path '%1%' is not in the Nix store") % path); + throw NotInStore("path '%1%' is not in the Nix store", path); return path; } diff --git a/src/libstore/store-api.hh b/src/libstore/store-api.hh index 7a1b31d0f..558ea79af 100644 --- a/src/libstore/store-api.hh +++ b/src/libstore/store-api.hh @@ -26,6 +26,7 @@ MakeError(InvalidPath, Error) MakeError(Unsupported, Error) MakeError(SubstituteGone, Error) MakeError(SubstituterDisabled, Error) +MakeError(NotInStore, Error) struct BasicDerivation; diff --git a/src/nix/installables.cc b/src/nix/installables.cc index feaf57f0c..d43f86c0c 100644 --- a/src/nix/installables.cc +++ b/src/nix/installables.cc @@ -394,9 +394,18 @@ std::vector> SourceExprCommand::parseInstallables( } else { + auto follow = [&](const std::string & s) -> std::optional { + try { + return store->followLinksToStorePath(s); + } catch (NotInStore &) { + return {}; + } + }; + for (auto & s : ss) { size_t colon; + std::optional storePath; if (s.compare(0, 1, "(") == 0) result.push_back(std::make_shared(*this, s)); @@ -422,17 +431,8 @@ std::vector> SourceExprCommand::parseInstallables( getDefaultFlakeAttrPathPrefixes())); } - else if (s.find('/') != std::string::npos || s == ".") { - Path storePath; - try { - storePath = store->toStorePath(store->followLinksToStore(s)); - } catch (Error) { } - if (storePath != "") - result.push_back(std::make_shared(storePath)); - else - result.push_back(std::make_shared(*this, FlakeRef(s, true), - getDefaultFlakeAttrPaths())); - } + else if (s.find('/') != std::string::npos && (storePath = follow(s))) + result.push_back(std::make_shared(*storePath)); else throw Error("unsupported argument '%s'", s); diff --git a/tests/flakes.sh b/tests/flakes.sh index 0571c2a62..bfe00a674 100644 --- a/tests/flakes.sh +++ b/tests/flakes.sh @@ -149,6 +149,10 @@ nix build -o $TEST_ROOT/result --flake-registry $registry flake1 nix build -o $TEST_ROOT/result --flake-registry $registry $flake1Dir nix build -o $TEST_ROOT/result --flake-registry $registry file://$flake1Dir +# CHeck that store symlinks inside a flake are not interpreted as flakes. +nix build -o $flake1Dir/result --flake-registry $registry file://$flake1Dir +nix path-info $flake1Dir/result + # Building a flake with an unlocked dependency should fail in pure mode. (! nix eval "(builtins.getFlake "$flake2Dir")") From aa2846198f46c1260a91a6bf21a2f53997f1f274 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Fri, 21 Jun 2019 18:34:43 +0200 Subject: [PATCH 225/634] Don't update the global registry when building a locked flake It's unnecessary and slows things down (e.g. when you're on a Thalys with super-crappy Internet). --- src/libexpr/flake/flake.cc | 4 +++- tests/flakes.sh | 5 ++++- 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/src/libexpr/flake/flake.cc b/src/libexpr/flake/flake.cc index 215eb85b6..e9db9d80e 100644 --- a/src/libexpr/flake/flake.cc +++ b/src/libexpr/flake/flake.cc @@ -119,7 +119,9 @@ static FlakeRef lookupFlake(EvalState & state, const FlakeRef & flakeRef, const static SourceInfo fetchFlake(EvalState & state, const FlakeRef & flakeRef, bool impureIsAllowed = false) { FlakeRef resolvedRef = lookupFlake(state, flakeRef, - impureIsAllowed ? state.getFlakeRegistries() : std::vector>()); + impureIsAllowed && !flakeRef.isDirect() + ? state.getFlakeRegistries() + : std::vector>()); if (evalSettings.pureEval && !impureIsAllowed && !resolvedRef.isImmutable()) throw Error("requested to fetch mutable flake '%s' in pure mode", resolvedRef); diff --git a/tests/flakes.sh b/tests/flakes.sh index bfe00a674..ccab84612 100644 --- a/tests/flakes.sh +++ b/tests/flakes.sh @@ -168,6 +168,9 @@ git -C $flake2Dir commit flake.lock -m 'Add flake.lock' nix build -o $TEST_ROOT/result --flake-registry $registry $flake2Dir:bar [[ -z $(git -C $flake2Dir diff master) ]] +# Building with a lockfile should not require a fetch of the registry. +nix build -o $TEST_ROOT/result --flake-registry file:///no-registry.json $flake2Dir:bar --tarball-ttl 0 + # Updating the flake should not change the lockfile. nix flake update --flake-registry $registry $flake2Dir [[ -z $(git -C $flake2Dir diff master) ]] @@ -297,7 +300,7 @@ nix build -o $TEST_ROOT/result --flake-registry $registry flake3:xyzzy flake3:fn # Test doing multiple `lookupFlake`s nix build -o $TEST_ROOT/result --flake-registry $registry flake4:xyzzy -nix build -o $TEST_ROOT/result --flake-registry $registry file://$flake4Dir:xyzzy +#nix build -o $TEST_ROOT/result --flake-registry $registry file://$flake4Dir:xyzzy # Make branch "removeXyzzy" where flake3 doesn't have xyzzy anymore git -C $flake3Dir checkout -b removeXyzzy From d4fe9daed6f48ebdcea18a1952cbecd30a846e70 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Fri, 21 Jun 2019 19:04:58 +0200 Subject: [PATCH 226/634] Simplify getFlake() / fetchFlake() logic --- src/libexpr/flake/flake.cc | 50 ++++++++++++++++++++++---------------- src/libexpr/flake/flake.hh | 9 ++++++- src/nix/flake.cc | 7 +++--- tests/flakes.sh | 14 ++++------- 4 files changed, 46 insertions(+), 34 deletions(-) diff --git a/src/libexpr/flake/flake.cc b/src/libexpr/flake/flake.cc index e9db9d80e..302549a3c 100644 --- a/src/libexpr/flake/flake.cc +++ b/src/libexpr/flake/flake.cc @@ -89,9 +89,6 @@ FlakeRef updateFlakeRef(EvalState & state, const FlakeRef & newRef, const Regist static FlakeRef lookupFlake(EvalState & state, const FlakeRef & flakeRef, const Registries & registries, std::vector pastSearches) { - if (registries.empty() && !flakeRef.isDirect()) - throw Error("indirect flake reference '%s' is not allowed", flakeRef); - for (std::shared_ptr registry : registries) { auto i = registry->entries.find(flakeRef); if (i != registry->entries.end()) { @@ -115,16 +112,24 @@ static FlakeRef lookupFlake(EvalState & state, const FlakeRef & flakeRef, const return flakeRef; } -// Lookups happen here too -static SourceInfo fetchFlake(EvalState & state, const FlakeRef & flakeRef, bool impureIsAllowed = false) +FlakeRef maybeLookupFlake( + EvalState & state, + const FlakeRef & flakeRef, + bool allowLookup) { - FlakeRef resolvedRef = lookupFlake(state, flakeRef, - impureIsAllowed && !flakeRef.isDirect() - ? state.getFlakeRegistries() - : std::vector>()); + if (!flakeRef.isDirect()) { + if (allowLookup) + return lookupFlake(state, flakeRef, state.getFlakeRegistries()); + else + throw Error("'%s' is an indirect flake reference, but registry lookups are not allowed", flakeRef); + } else + return flakeRef; +} - if (evalSettings.pureEval && !impureIsAllowed && !resolvedRef.isImmutable()) - throw Error("requested to fetch mutable flake '%s' in pure mode", resolvedRef); + +static SourceInfo fetchFlake(EvalState & state, const FlakeRef & resolvedRef) +{ + assert(resolvedRef.isDirect()); auto doGit = [&](const GitInfo & gitInfo) { FlakeRef ref(resolvedRef.baseRef()); @@ -190,10 +195,9 @@ static SourceInfo fetchFlake(EvalState & state, const FlakeRef & flakeRef, bool else abort(); } -// This will return the flake which corresponds to a given FlakeRef. The lookupFlake is done within `fetchFlake`, which is used here. -Flake getFlake(EvalState & state, const FlakeRef & flakeRef, bool impureIsAllowed = false) +Flake getFlake(EvalState & state, const FlakeRef & flakeRef) { - SourceInfo sourceInfo = fetchFlake(state, flakeRef, impureIsAllowed); + SourceInfo sourceInfo = fetchFlake(state, flakeRef); debug("got flake source '%s' with flakeref %s", sourceInfo.storePath, sourceInfo.resolvedRef.to_string()); FlakeRef resolvedRef = sourceInfo.resolvedRef; @@ -278,10 +282,9 @@ Flake getFlake(EvalState & state, const FlakeRef & flakeRef, bool impureIsAllowe return flake; } -// Get the `NonFlake` corresponding to a `FlakeRef`. -NonFlake getNonFlake(EvalState & state, const FlakeRef & flakeRef, bool impureIsAllowed = false) +NonFlake getNonFlake(EvalState & state, const FlakeRef & flakeRef) { - auto sourceInfo = fetchFlake(state, flakeRef, impureIsAllowed); + auto sourceInfo = fetchFlake(state, flakeRef); debug("got non-flake source '%s' with flakeref %s", sourceInfo.storePath, sourceInfo.resolvedRef.to_string()); FlakeRef resolvedRef = sourceInfo.resolvedRef; @@ -347,7 +350,7 @@ static std::pair updateLocks( } else { if (handleLockFile == AllPure || handleLockFile == TopRefUsesRegistries) throw Error("cannot update non-flake dependency '%s' in pure mode", id); - auto nonFlake = getNonFlake(state, ref, allowedToUseRegistries(handleLockFile, false)); + auto nonFlake = getNonFlake(state, maybeLookupFlake(state, ref, allowedToUseRegistries(handleLockFile, false))); newEntry.nonFlakeInputs.insert_or_assign(id, NonFlakeInput( nonFlake.sourceInfo.resolvedRef, @@ -364,7 +367,7 @@ static std::pair updateLocks( throw Error("cannot update flake dependency '%s' in pure mode", inputRef); newEntry.flakeInputs.insert_or_assign(inputRef, updateLocks(state, - getFlake(state, inputRef, allowedToUseRegistries(handleLockFile, false)), + getFlake(state, maybeLookupFlake(state, inputRef, allowedToUseRegistries(handleLockFile, false))), handleLockFile, {}, false).second); } } @@ -376,7 +379,7 @@ static std::pair updateLocks( and optionally write it to file, it the flake is writable. */ ResolvedFlake resolveFlake(EvalState & state, const FlakeRef & topRef, HandleLockFile handleLockFile) { - auto flake = getFlake(state, topRef, allowedToUseRegistries(handleLockFile, true)); + auto flake = getFlake(state, maybeLookupFlake(state, topRef, allowedToUseRegistries(handleLockFile, true))); LockFile oldLockFile; @@ -441,7 +444,10 @@ static void emitSourceInfoAttrs(EvalState & state, const SourceInfo & sourceInfo static void prim_callFlake(EvalState & state, const Pos & pos, Value * * args, Value & v) { auto lazyFlake = (FlakeInput *) args[0]->attrs; - auto flake = getFlake(state, lazyFlake->ref, false); + + assert(lazyFlake->ref.isImmutable()); + + auto flake = getFlake(state, lazyFlake->ref); if (flake.sourceInfo.narHash != lazyFlake->narHash) throw Error("the content hash of flake '%s' doesn't match the hash recorded in the referring lockfile", flake.sourceInfo.resolvedRef); @@ -453,6 +459,8 @@ static void prim_callNonFlake(EvalState & state, const Pos & pos, Value * * args { auto lazyNonFlake = (NonFlakeInput *) args[0]->attrs; + assert(lazyNonFlake->ref.isImmutable()); + auto nonFlake = getNonFlake(state, lazyNonFlake->ref); if (nonFlake.sourceInfo.narHash != lazyNonFlake->narHash) diff --git a/src/libexpr/flake/flake.hh b/src/libexpr/flake/flake.hh index 81b6541f0..de0feb2c4 100644 --- a/src/libexpr/flake/flake.hh +++ b/src/libexpr/flake/flake.hh @@ -81,7 +81,14 @@ struct NonFlake : originalRef(origRef), sourceInfo(sourceInfo) {}; }; -Flake getFlake(EvalState &, const FlakeRef &, bool impureIsAllowed); +Flake getFlake(EvalState &, const FlakeRef &); + +/* If 'allowLookup' is true, then resolve 'flakeRef' using the + registries. */ +FlakeRef maybeLookupFlake( + EvalState & state, + const FlakeRef & flakeRef, + bool allowLookup); /* Fingerprint of a locked flake; used as a cache key. */ typedef Hash Fingerprint; diff --git a/src/nix/flake.cc b/src/nix/flake.cc index 91c6b4276..49f7c33c7 100644 --- a/src/nix/flake.cc +++ b/src/nix/flake.cc @@ -38,7 +38,8 @@ public: Flake getFlake() { auto evalState = getEvalState(); - return flake::getFlake(*evalState, getFlakeRef(), useRegistries); + return flake::getFlake(*evalState, + maybeLookupFlake(*evalState, getFlakeRef(), useRegistries)); } ResolvedFlake resolveFlake() @@ -425,13 +426,13 @@ struct CmdFlakePin : virtual Args, EvalCommand FlakeRegistry userRegistry = *readRegistry(userRegistryPath); auto it = userRegistry.entries.find(FlakeRef(alias)); if (it != userRegistry.entries.end()) { - it->second = getFlake(*evalState, it->second, true).sourceInfo.resolvedRef; + it->second = getFlake(*evalState, maybeLookupFlake(*evalState, it->second, true)).sourceInfo.resolvedRef; writeRegistry(userRegistry, userRegistryPath); } else { std::shared_ptr globalReg = evalState->getGlobalFlakeRegistry(); it = globalReg->entries.find(FlakeRef(alias)); if (it != globalReg->entries.end()) { - auto newRef = getFlake(*evalState, it->second, true).sourceInfo.resolvedRef; + auto newRef = getFlake(*evalState, maybeLookupFlake(*evalState, it->second, true)).sourceInfo.resolvedRef; userRegistry.entries.insert_or_assign(alias, newRef); writeRegistry(userRegistry, userRegistryPath); } else diff --git a/tests/flakes.sh b/tests/flakes.sh index ccab84612..d2b168712 100644 --- a/tests/flakes.sh +++ b/tests/flakes.sh @@ -107,9 +107,6 @@ cat > $registry < $registry < Date: Sun, 23 Jun 2019 22:19:14 +0200 Subject: [PATCH 227/634] nix doctor: Fix typo --- src/nix/doctor.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/nix/doctor.cc b/src/nix/doctor.cc index f2cf04758..98260127b 100644 --- a/src/nix/doctor.cc +++ b/src/nix/doctor.cc @@ -116,4 +116,4 @@ struct CmdDoctor : StoreCommand } }; -static auto r1 = registerCommand("doctore"); +static auto r1 = registerCommand("doctor"); From 15fa70cd1b853f5e62662b99ccb9ef3da6cfadff Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Mon, 24 Jun 2019 21:06:37 +0200 Subject: [PATCH 228/634] Downloader: Propagate exceptions from decompressionSink->finish() --- src/libstore/download.cc | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/src/libstore/download.cc b/src/libstore/download.cc index 571c194ec..70cfaf0dd 100644 --- a/src/libstore/download.cc +++ b/src/libstore/download.cc @@ -311,8 +311,13 @@ struct CurlDownloader : public Downloader debug("finished %s of '%s'; curl status = %d, HTTP status = %d, body = %d bytes", request.verb(), request.uri, code, httpStatus, result.bodySize); - if (decompressionSink) - decompressionSink->finish(); + if (decompressionSink) { + try { + decompressionSink->finish(); + } catch (...) { + writeException = std::current_exception(); + } + } if (code == CURLE_WRITE_ERROR && result.etag == request.expectedETag) { code = CURLE_OK; From a67cf5a3585c41dd9f219a2c7aa9cf67fa69520b Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Mon, 24 Jun 2019 21:48:52 +0200 Subject: [PATCH 229/634] Fix 'error 9 while decompressing xz file' Once we've started writing data to a Sink, we can't restart a download request, because then we end up writing duplicate data to the Sink. Therefore we shouldn't handle retries in Downloader but at a higher level (in particular, in copyStorePath()). Fixes #2952. --- src/libstore/binary-cache-store.cc | 18 +++--- src/libstore/download.cc | 67 ++++---------------- src/libstore/download.hh | 11 +++- src/libstore/http-binary-cache-store.cc | 55 +++++++++++----- src/libstore/store-api.cc | 84 +++++++++++++------------ src/libutil/retry.hh | 38 +++++++++++ src/libutil/types.hh | 2 + 7 files changed, 156 insertions(+), 119 deletions(-) create mode 100644 src/libutil/retry.hh diff --git a/src/libstore/binary-cache-store.cc b/src/libstore/binary-cache-store.cc index 4527ee6ba..8b736056e 100644 --- a/src/libstore/binary-cache-store.cc +++ b/src/libstore/binary-cache-store.cc @@ -10,6 +10,8 @@ #include "nar-info-disk-cache.hh" #include "nar-accessor.hh" #include "json.hh" +#include "retry.hh" +#include "download.hh" #include @@ -79,13 +81,15 @@ void BinaryCacheStore::getFile(const std::string & path, Sink & sink) std::shared_ptr BinaryCacheStore::getFile(const std::string & path) { - StringSink sink; - try { - getFile(path, sink); - } catch (NoSuchBinaryCacheFile &) { - return nullptr; - } - return sink.s; + return retry>(downloadSettings.tries, [&]() -> std::shared_ptr { + StringSink sink; + try { + getFile(path, sink); + } catch (NoSuchBinaryCacheFile &) { + return nullptr; + } + return sink.s; + }); } Path BinaryCacheStore::narInfoFileFor(const Path & storePath) diff --git a/src/libstore/download.cc b/src/libstore/download.cc index 70cfaf0dd..cf79b2af5 100644 --- a/src/libstore/download.cc +++ b/src/libstore/download.cc @@ -8,6 +8,7 @@ #include "compression.hh" #include "pathlocks.hh" #include "finally.hh" +#include "retry.hh" #ifdef ENABLE_S3 #include @@ -19,11 +20,9 @@ #include #include -#include #include #include #include -#include #include using namespace std::string_literals; @@ -46,9 +45,6 @@ struct CurlDownloader : public Downloader { CURLM * curlm = 0; - std::random_device rd; - std::mt19937 mt19937; - struct DownloadItem : public std::enable_shared_from_this { CurlDownloader & downloader; @@ -61,12 +57,6 @@ struct CurlDownloader : public Downloader bool active = false; // whether the handle has been added to the multi object std::string status; - unsigned int attempt = 0; - - /* Don't start this download until the specified time point - has been reached. */ - std::chrono::steady_clock::time_point embargo; - struct curl_slist * requestHeaders = 0; std::string encoding; @@ -385,9 +375,7 @@ struct CurlDownloader : public Downloader } } - attempt++; - - auto exc = + fail( code == CURLE_ABORTED_BY_CALLBACK && _isInterrupted ? DownloadError(Interrupted, fmt("%s of '%s' was interrupted", request.verb(), request.uri)) : httpStatus != 0 @@ -398,31 +386,15 @@ struct CurlDownloader : public Downloader ) : DownloadError(err, fmt("unable to %s '%s': %s (%d)", - request.verb(), request.uri, curl_easy_strerror(code), code)); - - /* If this is a transient error, then maybe retry the - download after a while. */ - if (err == Transient && attempt < request.tries) { - int ms = request.baseRetryTimeMs * std::pow(2.0f, attempt - 1 + std::uniform_real_distribution<>(0.0, 0.5)(downloader.mt19937)); - printError(format("warning: %s; retrying in %d ms") % exc.what() % ms); - embargo = std::chrono::steady_clock::now() + std::chrono::milliseconds(ms); - downloader.enqueueItem(shared_from_this()); - } - else - fail(exc); + request.verb(), request.uri, curl_easy_strerror(code), code))); } } }; struct State { - struct EmbargoComparator { - bool operator() (const std::shared_ptr & i1, const std::shared_ptr & i2) { - return i1->embargo > i2->embargo; - } - }; bool quit = false; - std::priority_queue, std::vector>, EmbargoComparator> incoming; + std::vector> incoming; }; Sync state_; @@ -435,7 +407,6 @@ struct CurlDownloader : public Downloader std::thread workerThread; CurlDownloader() - : mt19937(rd()) { static std::once_flag globalInit; std::call_once(globalInit, curl_global_init, CURL_GLOBAL_ALL); @@ -529,9 +500,7 @@ struct CurlDownloader : public Downloader nextWakeup = std::chrono::steady_clock::time_point(); - /* Add new curl requests from the incoming requests queue, - except for requests that are embargoed (waiting for a - retry timeout to expire). */ + /* Add new curl requests from the incoming requests queue. */ if (extraFDs[0].revents & CURL_WAIT_POLLIN) { char buf[1024]; auto res = read(extraFDs[0].fd, buf, sizeof(buf)); @@ -540,22 +509,9 @@ struct CurlDownloader : public Downloader } std::vector> incoming; - auto now = std::chrono::steady_clock::now(); - { auto state(state_.lock()); - while (!state->incoming.empty()) { - auto item = state->incoming.top(); - if (item->embargo <= now) { - incoming.push_back(item); - state->incoming.pop(); - } else { - if (nextWakeup == std::chrono::steady_clock::time_point() - || item->embargo < nextWakeup) - nextWakeup = item->embargo; - break; - } - } + std::swap(state->incoming, incoming); quit = state->quit; } @@ -582,7 +538,7 @@ struct CurlDownloader : public Downloader { auto state(state_.lock()); - while (!state->incoming.empty()) state->incoming.pop(); + state->incoming.clear(); state->quit = true; } } @@ -598,7 +554,7 @@ struct CurlDownloader : public Downloader auto state(state_.lock()); if (state->quit) throw nix::Error("cannot enqueue download request because the download thread is shutting down"); - state->incoming.push(item); + state->incoming.push_back(item); } writeFull(wakeupPipe.writeSide.get(), " "); } @@ -681,7 +637,9 @@ std::future Downloader::enqueueDownload(const DownloadRequest & DownloadResult Downloader::download(const DownloadRequest & request) { - return enqueueDownload(request).get(); + return retry(request.tries, [&]() { + return enqueueDownload(request).get(); + }); } void Downloader::download(DownloadRequest && request, Sink & sink) @@ -868,7 +826,7 @@ CachedDownloadResult Downloader::downloadCached( writeFile(dataFile, url + "\n" + res.etag + "\n" + std::to_string(time(0)) + "\n"); } catch (DownloadError & e) { if (storePath.empty()) throw; - printError(format("warning: %1%; using cached result") % e.msg()); + warn("%s; using cached result", e.msg()); result.etag = expectedETag; } } @@ -931,5 +889,4 @@ bool isUri(const string & s) return scheme == "http" || scheme == "https" || scheme == "file" || scheme == "channel" || scheme == "git" || scheme == "s3" || scheme == "ssh"; } - } diff --git a/src/libstore/download.hh b/src/libstore/download.hh index c095ad053..7548b83ae 100644 --- a/src/libstore/download.hh +++ b/src/libstore/download.hh @@ -99,11 +99,13 @@ struct Downloader std::future enqueueDownload(const DownloadRequest & request); - /* Synchronously download a file. */ + /* Synchronously download a file. The request will be retried in + case of transient failures. */ DownloadResult download(const DownloadRequest & request); /* Download a file, writing its data to a sink. The sink will be - invoked on the thread of the caller. */ + invoked on the thread of the caller. The request will not be + retried in case of transient failures. */ void download(DownloadRequest && request, Sink & sink); /* Check if the specified file is already in ~/.cache/nix/tarballs @@ -129,6 +131,11 @@ public: DownloadError(Downloader::Error error, const FormatOrString & fs) : Error(fs), error(error) { } + + bool isTransient() override + { + return error == Downloader::Error::Transient; + } }; bool isUri(const string & s); diff --git a/src/libstore/http-binary-cache-store.cc b/src/libstore/http-binary-cache-store.cc index 11c34fdac..ff2c10354 100644 --- a/src/libstore/http-binary-cache-store.cc +++ b/src/libstore/http-binary-cache-store.cc @@ -2,6 +2,7 @@ #include "download.hh" #include "globals.hh" #include "nar-info-disk-cache.hh" +#include "retry.hh" namespace nix { @@ -113,7 +114,6 @@ protected: DownloadRequest makeRequest(const std::string & path) { DownloadRequest request(cacheUri + "/" + path); - request.tries = 8; return request; } @@ -136,21 +136,46 @@ protected: { checkEnabled(); - auto request(makeRequest(path)); + struct State + { + DownloadRequest request; + std::function tryDownload; + unsigned int attempt = 0; + State(DownloadRequest && request) : request(request) {} + }; - getDownloader()->enqueueDownload(request, - {[callback, this](std::future result) { - try { - callback(result.get().data); - } catch (DownloadError & e) { - if (e.error == Downloader::NotFound || e.error == Downloader::Forbidden) - return callback(std::shared_ptr()); - maybeDisable(); - callback.rethrow(); - } catch (...) { - callback.rethrow(); - } - }}); + auto state = std::make_shared(makeRequest(path)); + + state->tryDownload = [callback, state, this]() { + getDownloader()->enqueueDownload(state->request, + {[callback, state, this](std::future result) { + try { + callback(result.get().data); + } catch (DownloadError & e) { + if (e.error == Downloader::NotFound || e.error == Downloader::Forbidden) + return callback(std::shared_ptr()); + ++state->attempt; + if (state->attempt < state->request.tries && e.isTransient()) { + auto ms = retrySleepTime(state->attempt); + warn("%s; retrying in %d ms", e.what(), ms); + /* We can't sleep here because that would + block the download thread. So use a + separate thread for sleeping. */ + std::thread([state, ms]() { + std::this_thread::sleep_for(std::chrono::milliseconds(ms)); + state->tryDownload(); + }).detach(); + } else { + maybeDisable(); + callback.rethrow(); + } + } catch (...) { + callback.rethrow(); + } + }}); + }; + + state->tryDownload(); } }; diff --git a/src/libstore/store-api.cc b/src/libstore/store-api.cc index c5a771030..f577799da 100644 --- a/src/libstore/store-api.cc +++ b/src/libstore/store-api.cc @@ -6,10 +6,11 @@ #include "thread-pool.hh" #include "json.hh" #include "derivations.hh" +#include "retry.hh" +#include "download.hh" #include - namespace nix { @@ -572,54 +573,57 @@ void Store::buildPaths(const PathSet & paths, BuildMode buildMode) void copyStorePath(ref srcStore, ref dstStore, const Path & storePath, RepairFlag repair, CheckSigsFlag checkSigs) { - auto srcUri = srcStore->getUri(); - auto dstUri = dstStore->getUri(); + retry(downloadSettings.tries, [&]() { - Activity act(*logger, lvlInfo, actCopyPath, - srcUri == "local" || srcUri == "daemon" - ? fmt("copying path '%s' to '%s'", storePath, dstUri) - : dstUri == "local" || dstUri == "daemon" - ? fmt("copying path '%s' from '%s'", storePath, srcUri) - : fmt("copying path '%s' from '%s' to '%s'", storePath, srcUri, dstUri), - {storePath, srcUri, dstUri}); - PushActivity pact(act.id); + auto srcUri = srcStore->getUri(); + auto dstUri = dstStore->getUri(); - auto info = srcStore->queryPathInfo(storePath); + Activity act(*logger, lvlInfo, actCopyPath, + srcUri == "local" || srcUri == "daemon" + ? fmt("copying path '%s' to '%s'", storePath, dstUri) + : dstUri == "local" || dstUri == "daemon" + ? fmt("copying path '%s' from '%s'", storePath, srcUri) + : fmt("copying path '%s' from '%s' to '%s'", storePath, srcUri, dstUri), + {storePath, srcUri, dstUri}); + PushActivity pact(act.id); - uint64_t total = 0; + auto info = srcStore->queryPathInfo(storePath); - if (!info->narHash) { - StringSink sink; - srcStore->narFromPath({storePath}, sink); - auto info2 = make_ref(*info); - info2->narHash = hashString(htSHA256, *sink.s); - if (!info->narSize) info2->narSize = sink.s->size(); - if (info->ultimate) info2->ultimate = false; - info = info2; + uint64_t total = 0; - StringSource source(*sink.s); - dstStore->addToStore(*info, source, repair, checkSigs); - return; - } + if (!info->narHash) { + StringSink sink; + srcStore->narFromPath({storePath}, sink); + auto info2 = make_ref(*info); + info2->narHash = hashString(htSHA256, *sink.s); + if (!info->narSize) info2->narSize = sink.s->size(); + if (info->ultimate) info2->ultimate = false; + info = info2; - if (info->ultimate) { - auto info2 = make_ref(*info); - info2->ultimate = false; - info = info2; - } + StringSource source(*sink.s); + dstStore->addToStore(*info, source, repair, checkSigs); + return; + } - auto source = sinkToSource([&](Sink & sink) { - LambdaSink wrapperSink([&](const unsigned char * data, size_t len) { - sink(data, len); - total += len; - act.progress(total, info->narSize); + if (info->ultimate) { + auto info2 = make_ref(*info); + info2->ultimate = false; + info = info2; + } + + auto source = sinkToSource([&](Sink & sink) { + LambdaSink wrapperSink([&](const unsigned char * data, size_t len) { + sink(data, len); + total += len; + act.progress(total, info->narSize); + }); + srcStore->narFromPath({storePath}, wrapperSink); + }, [&]() { + throw EndOfFile("NAR for '%s' fetched from '%s' is incomplete", storePath, srcStore->getUri()); }); - srcStore->narFromPath({storePath}, wrapperSink); - }, [&]() { - throw EndOfFile("NAR for '%s' fetched from '%s' is incomplete", storePath, srcStore->getUri()); - }); - dstStore->addToStore(*info, *source, repair, checkSigs); + dstStore->addToStore(*info, *source, repair, checkSigs); + }); } diff --git a/src/libutil/retry.hh b/src/libutil/retry.hh new file mode 100644 index 000000000..b45cb37f7 --- /dev/null +++ b/src/libutil/retry.hh @@ -0,0 +1,38 @@ +#pragma once + +#include "logging.hh" + +#include +#include +#include +#include + +namespace nix { + +inline unsigned int retrySleepTime(unsigned int attempt) +{ + std::random_device rd; + std::mt19937 mt19937; + return 250.0 * std::pow(2.0f, + attempt - 1 + std::uniform_real_distribution<>(0.0, 0.5)(mt19937)); +} + +template +C retry(unsigned int attempts, std::function && f) +{ + unsigned int attempt = 0; + while (true) { + try { + return f(); + } catch (BaseError & e) { + ++attempt; + if (attempt >= attempts || !e.isTransient()) + throw; + auto ms = retrySleepTime(attempt); + warn("%s; retrying in %d ms", e.what(), ms); + std::this_thread::sleep_for(std::chrono::milliseconds(ms)); + } + } +} + +} diff --git a/src/libutil/types.hh b/src/libutil/types.hh index 92bf469b5..88e3243f4 100644 --- a/src/libutil/types.hh +++ b/src/libutil/types.hh @@ -109,6 +109,8 @@ public: const string & msg() const { return err; } const string & prefix() const { return prefix_; } BaseError & addPrefix(const FormatOrString & fs); + + virtual bool isTransient() { return false; } }; #define MakeError(newClass, superClass) \ From b0c220c02ec584af282b9c7f493e4a4d2e429f8c Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Wed, 10 Jul 2019 10:27:33 +0200 Subject: [PATCH 230/634] Check for epochs < 201906 --- src/libexpr/flake/flake.cc | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/libexpr/flake/flake.cc b/src/libexpr/flake/flake.cc index 302549a3c..4f59c61bd 100644 --- a/src/libexpr/flake/flake.cc +++ b/src/libexpr/flake/flake.cc @@ -227,6 +227,8 @@ Flake getFlake(EvalState & state, const FlakeRef & flakeRef) if (auto epoch = vInfo.attrs->get(sEpoch)) { flake.epoch = state.forceInt(*(**epoch).value, *(**epoch).pos); + if (flake.epoch < 201906) + throw Error("flake '%s' has illegal epoch %d", flakeRef, flake.epoch); if (flake.epoch > 201906) throw Error("flake '%s' requires unsupported epoch %d; please upgrade Nix", flakeRef, flake.epoch); } else From ad42a784690449873fccb20192bd2150da81c56d Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Thu, 11 Jul 2019 13:54:53 +0200 Subject: [PATCH 231/634] Rename 'epoch' -> 'edition' --- doc/flakes/design.md | 10 +++++----- src/libexpr/flake/flake.cc | 24 +++++++++++++++--------- src/libexpr/flake/flake.hh | 2 +- src/nix/flake.cc | 4 ++-- 4 files changed, 23 insertions(+), 17 deletions(-) diff --git a/doc/flakes/design.md b/doc/flakes/design.md index c9520bcbf..97bd12ce3 100644 --- a/doc/flakes/design.md +++ b/doc/flakes/design.md @@ -65,7 +65,7 @@ Upcoming but not yet implemented: NixOS configuration can be reproduced unambiguously from the top-level flake. * Nix code can query flake metadata such as `commitHash` (the Git revision) or - `epoch` (the date of the last commit). This is useful for NixOS to compute + `edition` (the date of the last commit). This is useful for NixOS to compute the NixOS version string (which will be the revision of the top-level configuration flake, uniquely identifying the configuration). @@ -85,9 +85,9 @@ repository that provides a single package and a single NixOS module. # The flake identifier. name = "dwarffs"; - # The epoch may be used in the future to determine how Nix + # The edition may be used in the future to determine how Nix # expressions inside this flake are to be parsed. - epoch = 201906; + edition = 201906; # Some other metadata. description = "A filesystem that fetches DWARF debug info from the Internet on demand"; @@ -162,7 +162,7 @@ Similarly, a minimal `flake.nix` for Nixpkgs: { name = "nixpkgs"; - epoch = 201906; + edition = 201906; description = "A collection of packages for the Nix package manager"; @@ -449,7 +449,7 @@ flakes in (local) Git repositories. { name = "my-system"; - epoch = 201906; + edition = 201906; inputs = [ "nixpkgs/nixos-18.09" diff --git a/src/libexpr/flake/flake.cc b/src/libexpr/flake/flake.cc index 4f59c61bd..8b9525680 100644 --- a/src/libexpr/flake/flake.cc +++ b/src/libexpr/flake/flake.cc @@ -223,16 +223,21 @@ Flake getFlake(EvalState & state, const FlakeRef & flakeRef) state.forceAttrs(vInfo); - auto sEpoch = state.symbols.create("epoch"); + auto sEdition = state.symbols.create("edition"); + auto sEpoch = state.symbols.create("epoch"); // FIXME: remove soon - if (auto epoch = vInfo.attrs->get(sEpoch)) { - flake.epoch = state.forceInt(*(**epoch).value, *(**epoch).pos); - if (flake.epoch < 201906) - throw Error("flake '%s' has illegal epoch %d", flakeRef, flake.epoch); - if (flake.epoch > 201906) - throw Error("flake '%s' requires unsupported epoch %d; please upgrade Nix", flakeRef, flake.epoch); + auto edition = vInfo.attrs->get(sEdition); + if (!edition) + edition = vInfo.attrs->get(sEpoch); + + if (edition) { + flake.edition = state.forceInt(*(**edition).value, *(**edition).pos); + if (flake.edition < 201906) + throw Error("flake '%s' has illegal edition %d", flakeRef, flake.edition); + if (flake.edition > 201906) + throw Error("flake '%s' requires unsupported edition %d; please upgrade Nix", flakeRef, flake.edition); } else - throw Error("flake '%s' lacks attribute 'epoch'", flakeRef); + throw Error("flake '%s' lacks attribute 'edition'", flakeRef); if (auto name = vInfo.attrs->get(state.sName)) flake.id = state.forceStringNoCtx(*(**name).value, *(**name).pos); @@ -271,7 +276,8 @@ Flake getFlake(EvalState & state, const FlakeRef & flakeRef) throw Error("flake '%s' lacks attribute 'outputs'", flakeRef); for (auto & attr : *vInfo.attrs) { - if (attr.name != sEpoch && + if (attr.name != sEdition && + attr.name != sEpoch && attr.name != state.sName && attr.name != state.sDescription && attr.name != sInputs && diff --git a/src/libexpr/flake/flake.hh b/src/libexpr/flake/flake.hh index de0feb2c4..01fb421bd 100644 --- a/src/libexpr/flake/flake.hh +++ b/src/libexpr/flake/flake.hh @@ -67,7 +67,7 @@ struct Flake std::vector inputs; std::map nonFlakeInputs; Value * vOutputs; // FIXME: gc - unsigned int epoch; + unsigned int edition; Flake(const FlakeRef & origRef, const SourceInfo & sourceInfo) : originalRef(origRef), sourceInfo(sourceInfo) {}; diff --git a/src/nix/flake.cc b/src/nix/flake.cc index 49f7c33c7..aab29b626 100644 --- a/src/nix/flake.cc +++ b/src/nix/flake.cc @@ -105,7 +105,7 @@ static void printFlakeInfo(const Flake & flake) { std::cout << fmt("ID: %s\n", flake.id); std::cout << fmt("Description: %s\n", flake.description); - std::cout << fmt("Epoch: %s\n", flake.epoch); + std::cout << fmt("Edition: %s\n", flake.edition); printSourceInfo(flake.sourceInfo); } @@ -114,7 +114,7 @@ static nlohmann::json flakeToJson(const Flake & flake) nlohmann::json j; j["id"] = flake.id; j["description"] = flake.description; - j["epoch"] = flake.epoch; + j["edition"] = flake.edition; sourceInfoToJson(flake.sourceInfo, j); return j; } From 0802e006f27acadc32c43ba02313709dfc51f940 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Thu, 11 Jul 2019 17:05:53 +0200 Subject: [PATCH 232/634] Use "git add --force --intent-to-add" for flake.lock Fixes The following paths are ignored by one of your .gitignore files: flake.lock --- src/libexpr/flake/flake.cc | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/libexpr/flake/flake.cc b/src/libexpr/flake/flake.cc index 8b9525680..abbb9a3e1 100644 --- a/src/libexpr/flake/flake.cc +++ b/src/libexpr/flake/flake.cc @@ -409,6 +409,8 @@ ResolvedFlake resolveFlake(EvalState & state, const FlakeRef & topRef, HandleLoc // Hack: Make sure that flake.lock is visible to Git, so it ends up in the Nix store. runProgram("git", true, { "-C", refData->path, "add", + "--force", + "--intent-to-add", (topRef.subdir == "" ? "" : topRef.subdir + "/") + "flake.lock" }); } else warn("cannot write lockfile of remote flake '%s'", topRef); From bd62290c23fa15181e8af75c5055e104900f8532 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Fri, 12 Jul 2019 12:59:00 +0200 Subject: [PATCH 233/634] fetchGit: Warn about dirty trees --- src/libexpr/primops/fetchGit.cc | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/libexpr/primops/fetchGit.cc b/src/libexpr/primops/fetchGit.cc index 10cd2470a..940cf3022 100644 --- a/src/libexpr/primops/fetchGit.cc +++ b/src/libexpr/primops/fetchGit.cc @@ -46,6 +46,8 @@ GitInfo exportGit(ref store, std::string uri, /* This is an unclean working tree. So copy all tracked files. */ + warn("Git tree '%s' is dirty", uri); + GitInfo gitInfo; gitInfo.ref = "HEAD"; From b29cec76971ff1949415f76c82fa0ecf699ac264 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Fri, 12 Jul 2019 13:29:54 +0200 Subject: [PATCH 234/634] Don't write lock files if they have dirty inputs --- src/libexpr/flake/flake.cc | 25 +++++++++++++++---------- src/libexpr/flake/flakeref.hh | 6 ++++++ src/libexpr/flake/lockfile.cc | 11 +++++++++++ src/libexpr/flake/lockfile.hh | 4 ++++ 4 files changed, 36 insertions(+), 10 deletions(-) diff --git a/src/libexpr/flake/flake.cc b/src/libexpr/flake/flake.cc index abbb9a3e1..e6cef502c 100644 --- a/src/libexpr/flake/flake.cc +++ b/src/libexpr/flake/flake.cc @@ -405,17 +405,22 @@ ResolvedFlake resolveFlake(EvalState & state, const FlakeRef & topRef, HandleLoc if (!(lockFile == oldLockFile)) { if (allowedToWrite(handleLockFile)) { if (auto refData = std::get_if(&topRef.data)) { - lockFile.write(refData->path + (topRef.subdir == "" ? "" : "/" + topRef.subdir) + "/flake.lock"); + if (lockFile.isDirty()) + warn("will not write lock file of flake '%s' because it has a dirty input", topRef); + else { + lockFile.write(refData->path + (topRef.subdir == "" ? "" : "/" + topRef.subdir) + "/flake.lock"); - // Hack: Make sure that flake.lock is visible to Git, so it ends up in the Nix store. - runProgram("git", true, { "-C", refData->path, "add", - "--force", - "--intent-to-add", - (topRef.subdir == "" ? "" : topRef.subdir + "/") + "flake.lock" }); + // Hack: Make sure that flake.lock is visible to Git, so it ends up in the Nix store. + runProgram("git", true, + { "-C", refData->path, "add", + "--force", + "--intent-to-add", + (topRef.subdir == "" ? "" : topRef.subdir + "/") + "flake.lock" }); + } } else - warn("cannot write lockfile of remote flake '%s'", topRef); + warn("cannot write lock file of remote flake '%s'", topRef); } else if (handleLockFile != AllPure && handleLockFile != TopRefUsesRegistries) - warn("using updated lockfile without writing it to file"); + warn("using updated lock file without writing it to file"); } return ResolvedFlake(std::move(flake), std::move(lockFile)); @@ -624,8 +629,8 @@ const Registries EvalState::getFlakeRegistries() Fingerprint ResolvedFlake::getFingerprint() const { - // FIXME: as an optimization, if the flake contains a lockfile and - // we haven't changed it, then it's sufficient to use + // FIXME: as an optimization, if the flake contains a lock file + // and we haven't changed it, then it's sufficient to use // flake.sourceInfo.storePath for the fingerprint. return hashString(htSHA256, fmt("%s;%s", flake.sourceInfo.storePath, lockFile)); diff --git a/src/libexpr/flake/flakeref.hh b/src/libexpr/flake/flakeref.hh index 52bb82ddb..082dd8c26 100644 --- a/src/libexpr/flake/flakeref.hh +++ b/src/libexpr/flake/flakeref.hh @@ -176,6 +176,12 @@ struct FlakeRef bool isImmutable() const; FlakeRef baseRef() const; + + bool isDirty() const + { + return std::get_if(&data) + && rev == Hash(rev->type); + } }; std::ostream & operator << (std::ostream & str, const FlakeRef & flakeRef); diff --git a/src/libexpr/flake/lockfile.cc b/src/libexpr/flake/lockfile.cc index 97c748c66..15f2e2e8e 100644 --- a/src/libexpr/flake/lockfile.cc +++ b/src/libexpr/flake/lockfile.cc @@ -66,6 +66,17 @@ nlohmann::json FlakeInputs::toJson() const return json; } +bool FlakeInputs::isDirty() const +{ + for (auto & i : flakeInputs) + if (i.second.ref.isDirty() || i.second.isDirty()) return true; + + for (auto & i : nonFlakeInputs) + if (i.second.ref.isDirty()) return true; + + return false; +} + nlohmann::json LockFile::toJson() const { auto json = FlakeInputs::toJson(); diff --git a/src/libexpr/flake/lockfile.hh b/src/libexpr/flake/lockfile.hh index b76124190..7077db3cd 100644 --- a/src/libexpr/flake/lockfile.hh +++ b/src/libexpr/flake/lockfile.hh @@ -53,6 +53,10 @@ struct FlakeInputs FlakeInputs(const nlohmann::json & json); nlohmann::json toJson() const; + + /* A lock file is dirty if it contains a dirty flakeref + (i.e. reference to a dirty working tree). */ + bool isDirty() const; }; /* Lock file information about a flake input. */ From b45628a172dd3a3ba863cd231f1446c7cd901cf7 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Fri, 12 Jul 2019 14:37:45 +0200 Subject: [PATCH 235/634] Fix flake tests --- tests/flakes.sh | 20 ++++++++++++-------- 1 file changed, 12 insertions(+), 8 deletions(-) diff --git a/tests/flakes.sh b/tests/flakes.sh index d2b168712..406e5b632 100644 --- a/tests/flakes.sh +++ b/tests/flakes.sh @@ -31,7 +31,7 @@ cat > $flake1Dir/flake.nix < $flake2Dir/flake.nix < $flake3Dir/flake.nix < $flake3Dir/flake.nix <&1 | grep 'unsupported epoch' +nix build -o $TEST_ROOT/result --flake-registry $registry $flake3Dir:sth 2>&1 | grep 'unsupported edition' # Test whether registry caching works. nix flake list --flake-registry file://$registry | grep -q flake3 @@ -242,7 +244,7 @@ cat > $flake3Dir/flake.nix < $flake3Dir/flake.nix < Date: Fri, 12 Jul 2019 15:32:17 +0200 Subject: [PATCH 236/634] nix build: Add '--profile' flag This replaces 'nix-env --set'. For example: $ nix build --profile /nix/var/nix/profiles/system \ ~/Misc/eelco-configurations:nixosConfigurations.vyr.config.system.build.toplevel updates the NixOS system profile from a flake. This could have been a separate command (e.g. 'nix set-profile') but 1) '--profile' is pretty similar to '--out-link'; and 2) '--profile' could be useful for other command (like 'nix dev-shell'). --- src/nix/build.cc | 17 +++++++++++------ src/nix/command.cc | 41 +++++++++++++++++++++++++++++++++++++++++ src/nix/command.hh | 14 ++++++++++++++ 3 files changed, 66 insertions(+), 6 deletions(-) diff --git a/src/nix/build.cc b/src/nix/build.cc index d8ce8cc80..f63150012 100644 --- a/src/nix/build.cc +++ b/src/nix/build.cc @@ -6,7 +6,7 @@ using namespace nix; -struct CmdBuild : MixDryRun, InstallablesCommand +struct CmdBuild : MixDryRun, MixProfile, InstallablesCommand { Path outLink = "result"; @@ -41,6 +41,10 @@ struct CmdBuild : MixDryRun, InstallablesCommand "To build the build.x86_64-linux attribute from release.nix:", "nix build -f release.nix build.x86_64-linux" }, + Example{ + "To make a profile point at GNU Hello:", + "nix build --profile /tmp/profile nixpkgs:hello" + }, }; } @@ -52,18 +56,19 @@ struct CmdBuild : MixDryRun, InstallablesCommand evalState->addRegistryOverrides(registryOverrides); if (dryRun) return; - for (size_t i = 0; i < buildables.size(); ++i) { - auto & b(buildables[i]); - - if (outLink != "") - for (auto & output : b.outputs) + if (outLink != "") { + for (size_t i = 0; i < buildables.size(); ++i) { + for (auto & output : buildables[i].outputs) if (auto store2 = store.dynamic_pointer_cast()) { std::string symlink = outLink; if (i) symlink += fmt("-%d", i); if (output.first != "out") symlink += fmt("-%s", output.first); store2->addPermRoot(output.second, absPath(symlink), true); } + } } + + updateProfile(buildables); } }; diff --git a/src/nix/command.cc b/src/nix/command.cc index 89fa0cba4..8191cb831 100644 --- a/src/nix/command.cc +++ b/src/nix/command.cc @@ -1,6 +1,7 @@ #include "command.hh" #include "store-api.hh" #include "derivations.hh" +#include "profiles.hh" namespace nix { @@ -81,4 +82,44 @@ void StorePathCommand::run(ref store) run(store, *storePaths.begin()); } +MixProfile::MixProfile() +{ + mkFlag() + .longName("profile") + .description("profile to update") + .labels({"path"}) + .dest(&profile); +} + +void MixProfile::updateProfile(const Path & storePath) +{ + if (!profile) return; + auto store = getStore().dynamic_pointer_cast(); + if (!store) throw Error("'--profile' is not supported for this Nix store"); + switchLink(*profile, + createGeneration( + ref(store), + *profile, storePath)); +} + +void MixProfile::updateProfile(const Buildables & buildables) +{ + if (!profile) return; + + std::optional result; + + for (auto & buildable : buildables) { + for (auto & output : buildable.outputs) { + if (result) + throw Error("'--profile' requires that the arguments produce a single store path, but there are multiple"); + result = output.second; + } + } + + if (!result) + throw Error("'--profile' requires that the arguments produce a single store path, but there are none"); + + updateProfile(*result); +} + } diff --git a/src/nix/command.hh b/src/nix/command.hh index 59c6f8578..d6153e42b 100644 --- a/src/nix/command.hh +++ b/src/nix/command.hh @@ -219,4 +219,18 @@ PathSet toDerivations(ref store, std::vector> installables, bool useDeriver = false); +struct MixProfile : virtual Args, virtual StoreCommand +{ + std::optional profile; + + MixProfile(); + + /* If 'profile' is set, make it point at 'storePath'. */ + void updateProfile(const Path & storePath); + + /* If 'profile' is set, make it point at the store path produced + by 'buildables'. */ + void updateProfile(const Buildables & buildables); +}; + } From 7ba928116ef1677b7403525df9e8abb49001820e Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Fri, 12 Jul 2019 16:10:58 +0200 Subject: [PATCH 237/634] nix dev-shell: Add --profile flag This is useful to prevent the shell environment from being garbage-collected. --- src/nix/shell.cc | 20 +++++++++++++++----- 1 file changed, 15 insertions(+), 5 deletions(-) diff --git a/src/nix/shell.cc b/src/nix/shell.cc index f42947b7c..4c7b701dc 100644 --- a/src/nix/shell.cc +++ b/src/nix/shell.cc @@ -66,7 +66,7 @@ BuildEnvironment readEnvironment(const Path & path) modified derivation with the same dependencies and nearly the same initial environment variables, that just writes the resulting environment to a file and exits. */ -BuildEnvironment getDerivationEnvironment(ref store, Derivation drv) +Path getDerivationEnvironment(ref store, Derivation drv) { auto builder = baseNameOf(drv.builder); if (builder != "bash") @@ -101,7 +101,7 @@ BuildEnvironment getDerivationEnvironment(ref store, Derivation drv) assert(store->isValidPath(shellOutPath)); - return readEnvironment(shellOutPath); + return shellOutPath; } struct Common : InstallableCommand @@ -175,7 +175,7 @@ struct Common : InstallableCommand } }; -struct CmdDevShell : Common +struct CmdDevShell : Common, MixProfile { std::string description() override { @@ -193,6 +193,10 @@ struct CmdDevShell : Common "To get the build environment of the default package of flake in the current directory:", "nix dev-shell" }, + Example{ + "To store the build environment in a profile:", + "nix dev-shell --profile /tmp/my-shell" + }, }; } @@ -206,7 +210,11 @@ struct CmdDevShell : Common auto & drvPath = *drvs.begin(); - auto buildEnvironment = getDerivationEnvironment(store, store->derivationFromPath(drvPath)); + auto shellOutPath = getDerivationEnvironment(store, store->derivationFromPath(drvPath)); + + updateProfile(shellOutPath); + + auto buildEnvironment = readEnvironment(shellOutPath); auto [rcFileFd, rcFilePath] = createTempFile("nix-shell"); @@ -259,7 +267,9 @@ struct CmdPrintDevEnv : Common auto & drvPath = *drvs.begin(); - auto buildEnvironment = getDerivationEnvironment(store, store->derivationFromPath(drvPath)); + auto buildEnvironment = readEnvironment( + getDerivationEnvironment(store, + store->derivationFromPath(drvPath))); stopProgressBar(); From 731bc65ec04900834ca5e5b8e9dae1aa8c2c1027 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Fri, 12 Jul 2019 16:16:27 +0200 Subject: [PATCH 238/634] Refactor a bit --- src/nix/shell.cc | 47 +++++++++++++++++++++-------------------------- 1 file changed, 21 insertions(+), 26 deletions(-) diff --git a/src/nix/shell.cc b/src/nix/shell.cc index 4c7b701dc..442835d38 100644 --- a/src/nix/shell.cc +++ b/src/nix/shell.cc @@ -104,7 +104,7 @@ Path getDerivationEnvironment(ref store, Derivation drv) return shellOutPath; } -struct Common : InstallableCommand +struct Common : InstallableCommand, MixProfile { /* std::set keepVars{ @@ -173,9 +173,26 @@ struct Common : InstallableCommand { return {"devShell", "defaultPackage"}; } + + BuildEnvironment getBuildEnvironment(ref store) + { + auto drvs = toDerivations(store, {installable}); + + if (drvs.size() != 1) + throw Error("'%s' needs to evaluate to a single derivation, but it evaluated to %d derivations", + installable->what(), drvs.size()); + + auto & drvPath = *drvs.begin(); + + auto shellOutPath = getDerivationEnvironment(store, store->derivationFromPath(drvPath)); + + updateProfile(shellOutPath); + + return readEnvironment(shellOutPath); + } }; -struct CmdDevShell : Common, MixProfile +struct CmdDevShell : Common { std::string description() override { @@ -202,19 +219,7 @@ struct CmdDevShell : Common, MixProfile void run(ref store) override { - auto drvs = toDerivations(store, {installable}); - - if (drvs.size() != 1) - throw Error("'%s' needs to evaluate to a single derivation, but it evaluated to %d derivations", - installable->what(), drvs.size()); - - auto & drvPath = *drvs.begin(); - - auto shellOutPath = getDerivationEnvironment(store, store->derivationFromPath(drvPath)); - - updateProfile(shellOutPath); - - auto buildEnvironment = readEnvironment(shellOutPath); + auto buildEnvironment = getBuildEnvironment(store); auto [rcFileFd, rcFilePath] = createTempFile("nix-shell"); @@ -259,17 +264,7 @@ struct CmdPrintDevEnv : Common void run(ref store) override { - auto drvs = toDerivations(store, {installable}); - - if (drvs.size() != 1) - throw Error("'%s' needs to evaluate to a single derivation, but it evaluated to %d derivations", - installable->what(), drvs.size()); - - auto & drvPath = *drvs.begin(); - - auto buildEnvironment = readEnvironment( - getDerivationEnvironment(store, - store->derivationFromPath(drvPath))); + auto buildEnvironment = getBuildEnvironment(store); stopProgressBar(); From aa82f8b2d2a2c42f0d713e8404b668cef1a4b108 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Fri, 12 Jul 2019 16:28:39 +0200 Subject: [PATCH 239/634] nix dev-shell: Make it possible to enter a profile For example: $ nix dev-shell --profile /tmp/my-shell dwarffs (later) $ nix dev-shell /tmp/my-shell --- src/nix/command.hh | 7 +++++++ src/nix/installables.cc | 5 +++++ src/nix/shell.cc | 34 ++++++++++++++++++++++++---------- 3 files changed, 36 insertions(+), 10 deletions(-) diff --git a/src/nix/command.hh b/src/nix/command.hh index d6153e42b..00c202f20 100644 --- a/src/nix/command.hh +++ b/src/nix/command.hh @@ -64,6 +64,13 @@ struct Installable { throw Error("argument '%s' cannot be evaluated", what()); } + + /* Return a value only if this installable is a store path or a + symlink to it. */ + virtual std::optional getStorePath() + { + return {}; + } }; struct EvalCommand : virtual StoreCommand, MixEvalArgs diff --git a/src/nix/installables.cc b/src/nix/installables.cc index d43f86c0c..aa5ef5184 100644 --- a/src/nix/installables.cc +++ b/src/nix/installables.cc @@ -102,6 +102,11 @@ struct InstallableStorePath : Installable { return {{isDerivation(storePath) ? storePath : "", {{"out", storePath}}}}; } + + std::optional getStorePath() override + { + return storePath; + } }; struct InstallableValue : Installable diff --git a/src/nix/shell.cc b/src/nix/shell.cc index 442835d38..93732f6a3 100644 --- a/src/nix/shell.cc +++ b/src/nix/shell.cc @@ -174,17 +174,27 @@ struct Common : InstallableCommand, MixProfile return {"devShell", "defaultPackage"}; } + Path getShellOutPath(ref store) + { + auto path = installable->getStorePath(); + if (path && hasSuffix(*path, "-env")) + return *path; + else { + auto drvs = toDerivations(store, {installable}); + + if (drvs.size() != 1) + throw Error("'%s' needs to evaluate to a single derivation, but it evaluated to %d derivations", + installable->what(), drvs.size()); + + auto & drvPath = *drvs.begin(); + + return getDerivationEnvironment(store, store->derivationFromPath(drvPath)); + } + } + BuildEnvironment getBuildEnvironment(ref store) { - auto drvs = toDerivations(store, {installable}); - - if (drvs.size() != 1) - throw Error("'%s' needs to evaluate to a single derivation, but it evaluated to %d derivations", - installable->what(), drvs.size()); - - auto & drvPath = *drvs.begin(); - - auto shellOutPath = getDerivationEnvironment(store, store->derivationFromPath(drvPath)); + auto shellOutPath = getShellOutPath(store); updateProfile(shellOutPath); @@ -212,7 +222,11 @@ struct CmdDevShell : Common }, Example{ "To store the build environment in a profile:", - "nix dev-shell --profile /tmp/my-shell" + "nix dev-shell --profile /tmp/my-shell nixpkgs:hello" + }, + Example{ + "To use a build environment previously recorded in a profile:", + "nix dev-shell /tmp/my-shell" }, }; } From 13604318ad1a6ac97ef789481a550a9be65c1d48 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Fri, 26 Jul 2019 20:06:03 +0200 Subject: [PATCH 240/634] epoch -> edition --- flake.nix | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/flake.nix b/flake.nix index e7deb2de3..e7a85d730 100644 --- a/flake.nix +++ b/flake.nix @@ -3,7 +3,7 @@ description = "The purely functional package manager"; - epoch = 201906; + edition = 201906; inputs = [ "nixpkgs" ]; From 336afe4d5fe374569c2b13d2db90caac663573b3 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Fri, 26 Jul 2019 20:09:44 +0200 Subject: [PATCH 241/634] nix dev-shell: Set IN_NIX_SHELL in the derivation This ensures that stdenv / setup hooks take $IN_NIX_SHELL into account. For example, stdenv only sets NIX_SSL_CERT_FILE=/no-cert-file.crt if we're not in a shell. --- src/nix/shell.cc | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/src/nix/shell.cc b/src/nix/shell.cc index 93732f6a3..5f2724961 100644 --- a/src/nix/shell.cc +++ b/src/nix/shell.cc @@ -72,7 +72,7 @@ Path getDerivationEnvironment(ref store, Derivation drv) if (builder != "bash") throw Error("'nix shell' only works on derivations that use 'bash' as their builder"); - drv.args = {"-c", "set -e; if [[ -n $stdenv ]]; then source $stdenv/setup; fi; set > $out"}; + drv.args = {"-c", "set -e; export IN_NIX_SHELL=impure; if [[ -n $stdenv ]]; then source $stdenv/setup; fi; set > $out"}; /* Remove derivation checks. */ drv.env.erase("allowedReferences"); @@ -143,7 +143,6 @@ struct Common : InstallableCommand, MixProfile void makeRcScript(const BuildEnvironment & buildEnvironment, std::ostream & out) { - out << "export IN_NIX_SHELL=1\n"; out << "nix_saved_PATH=\"$PATH\"\n"; for (auto & i : buildEnvironment.env) { From ab16b3d076e9cd3ecfdcde128f43dd486b072557 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Thu, 8 Aug 2019 14:32:02 +0200 Subject: [PATCH 242/634] Fix gc-auto test --- tests/gc-auto.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/gc-auto.sh b/tests/gc-auto.sh index 1e91282d0..e770e665c 100644 --- a/tests/gc-auto.sh +++ b/tests/gc-auto.sh @@ -29,7 +29,7 @@ with import ./config.nix; mkDerivation { EOF ) -nix build -o $TEST_ROOT/result-A -L "($expr)" \ +nix build --impure -o $TEST_ROOT/result-A -L "($expr)" \ --min-free 1000 --max-free 2000 --min-free-check-interval 1 & pid=$! @@ -50,7 +50,7 @@ with import ./config.nix; mkDerivation { EOF ) -nix build -o $TEST_ROOT/result-B -L "($expr2)" \ +nix build --impure -o $TEST_ROOT/result-B -L "($expr2)" \ --min-free 1000 --max-free 2000 --min-free-check-interval 1 wait "$pid" From 662db921e2f5bebeb0fd455aa744708468df8bfa Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Fri, 9 Aug 2019 18:51:52 +0200 Subject: [PATCH 243/634] nix dev-shell: Set dontAddDisableDepTrack --- src/nix/shell.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/nix/shell.cc b/src/nix/shell.cc index 5f2724961..a3827c297 100644 --- a/src/nix/shell.cc +++ b/src/nix/shell.cc @@ -72,7 +72,7 @@ Path getDerivationEnvironment(ref store, Derivation drv) if (builder != "bash") throw Error("'nix shell' only works on derivations that use 'bash' as their builder"); - drv.args = {"-c", "set -e; export IN_NIX_SHELL=impure; if [[ -n $stdenv ]]; then source $stdenv/setup; fi; set > $out"}; + drv.args = {"-c", "set -e; export IN_NIX_SHELL=impure; export dontAddDisableDepTrack=1; if [[ -n $stdenv ]]; then source $stdenv/setup; fi; set > $out"}; /* Remove derivation checks. */ drv.env.erase("allowedReferences"); From 89468410d56d2b7b86ac9d60d70757f4d8f33718 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Fri, 30 Aug 2019 11:22:34 +0200 Subject: [PATCH 244/634] Extract flake dependencies from the 'outputs' arguments That is, instead of inputs = [ "nixpkgs" ]; outputs = inputs: ... inputs.nixpkgs ...; you can write outputs = { nixpkgs }: ... inputs.nixpkgs ...; --- src/libexpr/eval.cc | 1 + src/libexpr/eval.hh | 2 +- src/libexpr/flake/flake.cc | 17 +++++++++++++---- 3 files changed, 15 insertions(+), 5 deletions(-) diff --git a/src/libexpr/eval.cc b/src/libexpr/eval.cc index faa76f1f7..fa79b0d5e 100644 --- a/src/libexpr/eval.cc +++ b/src/libexpr/eval.cc @@ -304,6 +304,7 @@ EvalState::EvalState(const Strings & _searchPath, ref store) , sOutputHashAlgo(symbols.create("outputHashAlgo")) , sOutputHashMode(symbols.create("outputHashMode")) , sDescription(symbols.create("description")) + , sSelf(symbols.create("self")) , repair(NoRepair) , store(store) , baseEnv(allocEnv(128)) diff --git a/src/libexpr/eval.hh b/src/libexpr/eval.hh index 75e91e6b1..5e976f196 100644 --- a/src/libexpr/eval.hh +++ b/src/libexpr/eval.hh @@ -80,7 +80,7 @@ public: sFile, sLine, sColumn, sFunctor, sToString, sRight, sWrong, sStructuredAttrs, sBuilder, sArgs, sOutputHash, sOutputHashAlgo, sOutputHashMode, - sDescription; + sDescription, sSelf; Symbol sDerivationNix; /* If set, force copying files to the Nix store even if they diff --git a/src/libexpr/flake/flake.cc b/src/libexpr/flake/flake.cc index e6cef502c..eb40f3b2a 100644 --- a/src/libexpr/flake/flake.cc +++ b/src/libexpr/flake/flake.cc @@ -232,10 +232,10 @@ Flake getFlake(EvalState & state, const FlakeRef & flakeRef) if (edition) { flake.edition = state.forceInt(*(**edition).value, *(**edition).pos); - if (flake.edition < 201906) - throw Error("flake '%s' has illegal edition %d", flakeRef, flake.edition); - if (flake.edition > 201906) + if (flake.edition > 201909) throw Error("flake '%s' requires unsupported edition %d; please upgrade Nix", flakeRef, flake.edition); + if (flake.edition < 201909) + throw Error("flake '%s' has illegal edition %d", flakeRef, flake.edition); } else throw Error("flake '%s' lacks attribute 'edition'", flakeRef); @@ -272,6 +272,15 @@ Flake getFlake(EvalState & state, const FlakeRef & flakeRef) if (auto outputs = vInfo.attrs->get(sOutputs)) { state.forceFunction(*(**outputs).value, *(**outputs).pos); flake.vOutputs = (**outputs).value; + + if (flake.vOutputs->lambda.fun->matchAttrs) { + for (auto & formal : flake.vOutputs->lambda.fun->formals->formals) { + if (formal.name != state.sSelf) { + flake.inputs.push_back(FlakeRef(formal.name)); + } + } + } + } else throw Error("flake '%s' lacks attribute 'outputs'", flakeRef); @@ -538,7 +547,7 @@ void callFlake(EvalState & state, auto vOutputs = state.allocAttr(v, state.symbols.create("outputs")); mkApp(*vOutputs, *flake.vOutputs, v); - v.attrs->push_back(Attr(state.symbols.create("self"), &v)); + v.attrs->push_back(Attr(state.sSelf, &v)); v.attrs->sort(); From 2341f30ec66ca6b6e795eb9764ec7a0496c1c4f3 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Fri, 30 Aug 2019 13:06:23 +0200 Subject: [PATCH 245/634] Clean up the 'outputs' interface --- src/libexpr/flake/flake.cc | 38 +++++++++++++++++++------------------- 1 file changed, 19 insertions(+), 19 deletions(-) diff --git a/src/libexpr/flake/flake.cc b/src/libexpr/flake/flake.cc index eb40f3b2a..2a6792af9 100644 --- a/src/libexpr/flake/flake.cc +++ b/src/libexpr/flake/flake.cc @@ -505,17 +505,14 @@ void callFlake(EvalState & state, const FlakeInputs & inputs, Value & vRes) { - // Construct the resulting attrset '{outputs, ...}'. This attrset - // is passed lazily as an argument to the 'outputs' function. + auto & vInputs = *state.allocValue(); - auto & v = *state.allocValue(); - - state.mkAttrs(v, + state.mkAttrs(vInputs, inputs.flakeInputs.size() + - inputs.nonFlakeInputs.size() + 8); + inputs.nonFlakeInputs.size() + 1); for (auto & dep : inputs.flakeInputs) { - auto vFlake = state.allocAttr(v, dep.second.id); + auto vFlake = state.allocAttr(vInputs, dep.second.id); auto vPrimOp = state.allocValue(); static auto primOp = new PrimOp(prim_callFlake, 1, state.symbols.create("callFlake")); vPrimOp->type = tPrimOp; @@ -528,7 +525,7 @@ void callFlake(EvalState & state, } for (auto & dep : inputs.nonFlakeInputs) { - auto vNonFlake = state.allocAttr(v, dep.first); + auto vNonFlake = state.allocAttr(vInputs, dep.first); auto vPrimOp = state.allocValue(); static auto primOp = new PrimOp(prim_callNonFlake, 1, state.symbols.create("callNonFlake")); vPrimOp->type = tPrimOp; @@ -540,24 +537,27 @@ void callFlake(EvalState & state, mkApp(*vNonFlake, *vPrimOp, *vArg); } - mkString(*state.allocAttr(v, state.sDescription), flake.description); + auto & vSourceInfo = *state.allocValue(); + state.mkAttrs(vSourceInfo, 8); + emitSourceInfoAttrs(state, flake.sourceInfo, vSourceInfo); - emitSourceInfoAttrs(state, flake.sourceInfo, v); + vInputs.attrs->push_back(Attr(state.sSelf, &vRes)); - auto vOutputs = state.allocAttr(v, state.symbols.create("outputs")); - mkApp(*vOutputs, *flake.vOutputs, v); - - v.attrs->push_back(Attr(state.sSelf, &v)); - - v.attrs->sort(); + vInputs.attrs->sort(); /* For convenience, put the outputs directly in the result, so you can refer to an output of an input as 'inputs.foo.bar' rather than 'inputs.foo.outputs.bar'. */ - auto v2 = *state.allocValue(); - state.eval(state.parseExprFromString("res: res.outputs // res", "/"), v2); + auto vCall = *state.allocValue(); + state.eval(state.parseExprFromString( + "outputsFun: inputs: sourceInfo: let outputs = outputsFun inputs; in " + "outputs // sourceInfo // { inherit inputs; inherit outputs; inherit sourceInfo; }", "/"), vCall); - state.callFunction(v2, v, vRes, noPos); + auto vCall2 = *state.allocValue(); + auto vCall3 = *state.allocValue(); + state.callFunction(vCall, *flake.vOutputs, vCall2, noPos); + state.callFunction(vCall2, vInputs, vCall3, noPos); + state.callFunction(vCall3, vSourceInfo, vRes, noPos); } void callFlake(EvalState & state, From d749f5132b7da9676e5dec2b1ef596024a23da95 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Fri, 30 Aug 2019 13:06:48 +0200 Subject: [PATCH 246/634] Update flake.{nix,lock} --- flake.lock | 4 ++-- flake.nix | 12 +++++------- 2 files changed, 7 insertions(+), 9 deletions(-) diff --git a/flake.lock b/flake.lock index 336e3eb86..8072152ca 100644 --- a/flake.lock +++ b/flake.lock @@ -3,9 +3,9 @@ "nixpkgs": { "id": "nixpkgs", "inputs": {}, - "narHash": "sha256-5/HXWs25BLukwG9VaxmdmUf/9o79e32aW/tmhcWEbKk=", + "narHash": "sha256-AndIaZrFFIT+VFhVtQHsS90I5SWfjTDTxzs9Hx9ZxZA=", "nonFlakeInputs": {}, - "uri": "github:edolstra/nixpkgs/62ac6f7f504c8d3998558d9b269d22d26f13f1f0" + "uri": "github:edolstra/nixpkgs/9bd2e2c96ddeec64e9ad37540412263fdb78458d" } }, "nonFlakeInputs": {}, diff --git a/flake.nix b/flake.nix index e7a85d730..9f7d6b2ee 100644 --- a/flake.nix +++ b/flake.nix @@ -3,15 +3,13 @@ description = "The purely functional package manager"; - edition = 201906; + edition = 201909; - inputs = [ "nixpkgs" ]; - - outputs = inputs: rec { + outputs = { self, nixpkgs }: rec { hydraJobs = import ./release.nix { - nix = inputs.self; - nixpkgs = inputs.nixpkgs; + nix = self; + nixpkgs = nixpkgs; }; checks = { @@ -29,7 +27,7 @@ defaultPackage = packages.nix; devShell = import ./shell.nix { - nixpkgs = inputs.nixpkgs; + inherit nixpkgs; }; }; } From 0588d72286c97c7b43d8682930e0e43b0a1b8c1a Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Fri, 30 Aug 2019 13:11:33 +0200 Subject: [PATCH 247/634] Update tests --- tests/flakes.sh | 35 +++++++++++++++-------------------- 1 file changed, 15 insertions(+), 20 deletions(-) diff --git a/tests/flakes.sh b/tests/flakes.sh index 406e5b632..8bb6e90a2 100644 --- a/tests/flakes.sh +++ b/tests/flakes.sh @@ -31,7 +31,7 @@ cat > $flake1Dir/flake.nix < $flake2Dir/flake.nix < $flake3Dir/flake.nix < $flake3Dir/flake.nix <&1 | grep 'unsupported edition' # Test whether registry caching works. @@ -244,7 +238,7 @@ cat > $flake3Dir/flake.nix < $flake3Dir/flake.nix < Date: Fri, 30 Aug 2019 16:27:51 +0200 Subject: [PATCH 248/634] Turn flake inputs into an attrset Instead of a list, inputs are now an attrset like inputs = { nixpkgs.uri = github:NixOS/nixpkgs; }; If 'uri' is omitted, than the flake is a lookup in the flake registry, e.g. inputs = { nixpkgs = {}; }; but in that case, you can also just omit the input altogether and specify it as an argument to the 'outputs' function, as in outputs = { self, nixpkgs }: ... This also gets rid of 'nonFlakeInputs', which are now just a special kind of input that have a 'flake = false' attribute, e.g. inputs = { someRepo = { uri = github:example/repo; flake = false; }; }; --- src/libexpr/flake/flake.cc | 167 ++++++++++++++------------------ src/libexpr/flake/flake.hh | 20 ++-- src/libexpr/flake/lockfile.cc | 52 +++------- src/libexpr/flake/lockfile.hh | 79 +++++---------- src/libexpr/primops/fetchGit.cc | 1 - src/nix/installables.cc | 11 +-- tests/flakes.sh | 26 ++--- 7 files changed, 136 insertions(+), 220 deletions(-) diff --git a/src/libexpr/flake/flake.cc b/src/libexpr/flake/flake.cc index 2a6792af9..ce5dd55a2 100644 --- a/src/libexpr/flake/flake.cc +++ b/src/libexpr/flake/flake.cc @@ -248,22 +248,28 @@ Flake getFlake(EvalState & state, const FlakeRef & flakeRef) flake.description = state.forceStringNoCtx(*(**description).value, *(**description).pos); auto sInputs = state.symbols.create("inputs"); + auto sUri = state.symbols.create("uri"); + auto sFlake = state.symbols.create("flake"); - if (auto inputs = vInfo.attrs->get(sInputs)) { - state.forceList(*(**inputs).value, *(**inputs).pos); - for (unsigned int n = 0; n < (**inputs).value->listSize(); ++n) - flake.inputs.push_back(FlakeRef(state.forceStringNoCtx( - *(**inputs).value->listElems()[n], *(**inputs).pos))); - } + if (std::optional inputs = vInfo.attrs->get(sInputs)) { + state.forceAttrs(*(**inputs).value, *(**inputs).pos); - auto sNonFlakeInputs = state.symbols.create("nonFlakeInputs"); + for (Attr inputAttr : *(*(**inputs).value).attrs) { + state.forceAttrs(*inputAttr.value, *inputAttr.pos); - if (std::optional nonFlakeInputs = vInfo.attrs->get(sNonFlakeInputs)) { - state.forceAttrs(*(**nonFlakeInputs).value, *(**nonFlakeInputs).pos); - for (Attr attr : *(*(**nonFlakeInputs).value).attrs) { - std::string myNonFlakeUri = state.forceStringNoCtx(*attr.value, *attr.pos); - FlakeRef nonFlakeRef = FlakeRef(myNonFlakeUri); - flake.nonFlakeInputs.insert_or_assign(attr.name, nonFlakeRef); + FlakeInput input(FlakeRef(inputAttr.name)); + + for (Attr attr : *(inputAttr.value->attrs)) { + if (attr.name == sUri) { + input.ref = state.forceStringNoCtx(*attr.value, *attr.pos); + } else if (attr.name == sFlake) { + input.isFlake = state.forceBool(*attr.value, *attr.pos); + } else + throw Error("flake input '%s' has an unsupported attribute '%s', at %s", + inputAttr.name, attr.name, *attr.pos); + } + + flake.inputs.emplace(inputAttr.name, input); } } @@ -275,9 +281,8 @@ Flake getFlake(EvalState & state, const FlakeRef & flakeRef) if (flake.vOutputs->lambda.fun->matchAttrs) { for (auto & formal : flake.vOutputs->lambda.fun->formals->formals) { - if (formal.name != state.sSelf) { - flake.inputs.push_back(FlakeRef(formal.name)); - } + if (formal.name != state.sSelf) + flake.inputs.emplace(formal.name, FlakeInput(FlakeRef(formal.name))); } } @@ -290,7 +295,6 @@ Flake getFlake(EvalState & state, const FlakeRef & flakeRef) attr.name != state.sName && attr.name != state.sDescription && attr.name != sInputs && - attr.name != sNonFlakeInputs && attr.name != sOutputs) throw Error("flake '%s' has an unsupported attribute '%s', at %s", flakeRef, attr.name, *attr.pos); @@ -299,21 +303,19 @@ Flake getFlake(EvalState & state, const FlakeRef & flakeRef) return flake; } -NonFlake getNonFlake(EvalState & state, const FlakeRef & flakeRef) +static SourceInfo getNonFlake(EvalState & state, const FlakeRef & flakeRef) { auto sourceInfo = fetchFlake(state, flakeRef); debug("got non-flake source '%s' with flakeref %s", sourceInfo.storePath, sourceInfo.resolvedRef.to_string()); FlakeRef resolvedRef = sourceInfo.resolvedRef; - NonFlake nonFlake(flakeRef, sourceInfo); - - state.store->assertStorePath(nonFlake.sourceInfo.storePath); + state.store->assertStorePath(sourceInfo.storePath); if (state.allowedPaths) - state.allowedPaths->insert(nonFlake.sourceInfo.storePath); + state.allowedPaths->insert(sourceInfo.storePath); - return nonFlake; + return sourceInfo; } bool allowedToWrite(HandleLockFile handle) @@ -346,46 +348,33 @@ bool allowedToUseRegistries(HandleLockFile handle, bool isTopRef) Note that this is lazy: we only recursively fetch inputs that are not in the lockfile yet. */ -static std::pair updateLocks( +static std::pair updateLocks( EvalState & state, const Flake & flake, HandleLockFile handleLockFile, - const FlakeInputs & oldEntry, + const LockedInputs & oldEntry, bool topRef) { - FlakeInput newEntry( - flake.id, + LockedInput newEntry( flake.sourceInfo.resolvedRef, flake.sourceInfo.narHash); - for (auto & input : flake.nonFlakeInputs) { - auto & id = input.first; - auto & ref = input.second; - auto i = oldEntry.nonFlakeInputs.find(id); - if (i != oldEntry.nonFlakeInputs.end()) { - newEntry.nonFlakeInputs.insert_or_assign(i->first, i->second); + for (auto & [id, input] : flake.inputs) { + auto i = oldEntry.inputs.find(id); + if (i != oldEntry.inputs.end()) { + newEntry.inputs.insert_or_assign(id, i->second); } else { if (handleLockFile == AllPure || handleLockFile == TopRefUsesRegistries) - throw Error("cannot update non-flake dependency '%s' in pure mode", id); - auto nonFlake = getNonFlake(state, maybeLookupFlake(state, ref, allowedToUseRegistries(handleLockFile, false))); - newEntry.nonFlakeInputs.insert_or_assign(id, - NonFlakeInput( - nonFlake.sourceInfo.resolvedRef, - nonFlake.sourceInfo.narHash)); - } - } - - for (auto & inputRef : flake.inputs) { - auto i = oldEntry.flakeInputs.find(inputRef); - if (i != oldEntry.flakeInputs.end()) { - newEntry.flakeInputs.insert_or_assign(inputRef, i->second); - } else { - if (handleLockFile == AllPure || handleLockFile == TopRefUsesRegistries) - throw Error("cannot update flake dependency '%s' in pure mode", inputRef); - newEntry.flakeInputs.insert_or_assign(inputRef, - updateLocks(state, - getFlake(state, maybeLookupFlake(state, inputRef, allowedToUseRegistries(handleLockFile, false))), - handleLockFile, {}, false).second); + throw Error("cannot update flake input '%s' in pure mode", id); + if (input.isFlake) + newEntry.inputs.insert_or_assign(id, + updateLocks(state, + getFlake(state, maybeLookupFlake(state, input.ref, allowedToUseRegistries(handleLockFile, false))), + handleLockFile, {}, false).second); + else { + auto sourceInfo = getNonFlake(state, maybeLookupFlake(state, input.ref, allowedToUseRegistries(handleLockFile, false))); + newEntry.inputs.insert_or_assign(id, LockedInput(sourceInfo.resolvedRef, sourceInfo.narHash)); + } } } @@ -462,81 +451,69 @@ static void emitSourceInfoAttrs(EvalState & state, const SourceInfo & sourceInfo std::put_time(std::gmtime(&*sourceInfo.lastModified), "%Y%m%d%H%M%S"))); } +struct LazyInput +{ + bool isFlake; + LockedInput lockedInput; +}; + /* Helper primop to make callFlake (below) fetch/call its inputs lazily. Note that this primop cannot be called by user code since it doesn't appear in 'builtins'. */ static void prim_callFlake(EvalState & state, const Pos & pos, Value * * args, Value & v) { - auto lazyFlake = (FlakeInput *) args[0]->attrs; + auto lazyInput = (LazyInput *) args[0]->attrs; - assert(lazyFlake->ref.isImmutable()); + assert(lazyInput->lockedInput.ref.isImmutable()); - auto flake = getFlake(state, lazyFlake->ref); + if (lazyInput->isFlake) { + auto flake = getFlake(state, lazyInput->lockedInput.ref); - if (flake.sourceInfo.narHash != lazyFlake->narHash) - throw Error("the content hash of flake '%s' doesn't match the hash recorded in the referring lockfile", flake.sourceInfo.resolvedRef); + if (flake.sourceInfo.narHash != lazyInput->lockedInput.narHash) + throw Error("the content hash of flake '%s' doesn't match the hash recorded in the referring lockfile", flake.sourceInfo.resolvedRef); - callFlake(state, flake, *lazyFlake, v); -} + callFlake(state, flake, lazyInput->lockedInput, v); + } else { + auto sourceInfo = getNonFlake(state, lazyInput->lockedInput.ref); -static void prim_callNonFlake(EvalState & state, const Pos & pos, Value * * args, Value & v) -{ - auto lazyNonFlake = (NonFlakeInput *) args[0]->attrs; + if (sourceInfo.narHash != lazyInput->lockedInput.narHash) + throw Error("the content hash of repository '%s' doesn't match the hash recorded in the referring lockfile", sourceInfo.resolvedRef); - assert(lazyNonFlake->ref.isImmutable()); + state.mkAttrs(v, 8); - auto nonFlake = getNonFlake(state, lazyNonFlake->ref); + assert(state.store->isValidPath(sourceInfo.storePath)); - if (nonFlake.sourceInfo.narHash != lazyNonFlake->narHash) - throw Error("the content hash of repository '%s' doesn't match the hash recorded in the referring lockfile", nonFlake.sourceInfo.resolvedRef); + mkString(*state.allocAttr(v, state.sOutPath), + sourceInfo.storePath, {sourceInfo.storePath}); - state.mkAttrs(v, 8); - - assert(state.store->isValidPath(nonFlake.sourceInfo.storePath)); - - mkString(*state.allocAttr(v, state.sOutPath), - nonFlake.sourceInfo.storePath, {nonFlake.sourceInfo.storePath}); - - emitSourceInfoAttrs(state, nonFlake.sourceInfo, v); + emitSourceInfoAttrs(state, sourceInfo, v); + } } void callFlake(EvalState & state, const Flake & flake, - const FlakeInputs & inputs, + const LockedInputs & lockedInputs, Value & vRes) { auto & vInputs = *state.allocValue(); - state.mkAttrs(vInputs, - inputs.flakeInputs.size() + - inputs.nonFlakeInputs.size() + 1); + state.mkAttrs(vInputs, flake.inputs.size() + 1); - for (auto & dep : inputs.flakeInputs) { - auto vFlake = state.allocAttr(vInputs, dep.second.id); + for (auto & [inputId, input] : flake.inputs) { + auto vFlake = state.allocAttr(vInputs, inputId); auto vPrimOp = state.allocValue(); static auto primOp = new PrimOp(prim_callFlake, 1, state.symbols.create("callFlake")); vPrimOp->type = tPrimOp; vPrimOp->primOp = primOp; auto vArg = state.allocValue(); vArg->type = tNull; + auto lockedInput = lockedInputs.inputs.find(inputId); + assert(lockedInput != lockedInputs.inputs.end()); // FIXME: leak - vArg->attrs = (Bindings *) new FlakeInput(dep.second); // evil! also inefficient + vArg->attrs = (Bindings *) new LazyInput{input.isFlake, lockedInput->second}; mkApp(*vFlake, *vPrimOp, *vArg); } - for (auto & dep : inputs.nonFlakeInputs) { - auto vNonFlake = state.allocAttr(vInputs, dep.first); - auto vPrimOp = state.allocValue(); - static auto primOp = new PrimOp(prim_callNonFlake, 1, state.symbols.create("callNonFlake")); - vPrimOp->type = tPrimOp; - vPrimOp->primOp = primOp; - auto vArg = state.allocValue(); - vArg->type = tNull; - // FIXME: leak - vArg->attrs = (Bindings *) new NonFlakeInput(dep.second); // evil! also inefficient - mkApp(*vNonFlake, *vPrimOp, *vArg); - } - auto & vSourceInfo = *state.allocValue(); state.mkAttrs(vSourceInfo, 8); emitSourceInfoAttrs(state, flake.sourceInfo, vSourceInfo); diff --git a/src/libexpr/flake/flake.hh b/src/libexpr/flake/flake.hh index 01fb421bd..84bce9b5a 100644 --- a/src/libexpr/flake/flake.hh +++ b/src/libexpr/flake/flake.hh @@ -58,14 +58,20 @@ struct SourceInfo SourceInfo(const FlakeRef & resolvRef) : resolvedRef(resolvRef) {}; }; +struct FlakeInput +{ + FlakeRef ref; + bool isFlake = true; + FlakeInput(const FlakeRef & ref) : ref(ref) {}; +}; + struct Flake { FlakeId id; FlakeRef originalRef; std::string description; SourceInfo sourceInfo; - std::vector inputs; - std::map nonFlakeInputs; + std::map inputs; Value * vOutputs; // FIXME: gc unsigned int edition; @@ -73,14 +79,6 @@ struct Flake : originalRef(origRef), sourceInfo(sourceInfo) {}; }; -struct NonFlake -{ - FlakeRef originalRef; - SourceInfo sourceInfo; - NonFlake(const FlakeRef & origRef, const SourceInfo & sourceInfo) - : originalRef(origRef), sourceInfo(sourceInfo) {}; -}; - Flake getFlake(EvalState &, const FlakeRef &); /* If 'allowLookup' is true, then resolve 'flakeRef' using the @@ -108,7 +106,7 @@ ResolvedFlake resolveFlake(EvalState &, const FlakeRef &, HandleLockFile); void callFlake(EvalState & state, const Flake & flake, - const FlakeInputs & inputs, + const LockedInputs & inputs, Value & v); void callFlake(EvalState & state, diff --git a/src/libexpr/flake/lockfile.cc b/src/libexpr/flake/lockfile.cc index 15f2e2e8e..f32d752f9 100644 --- a/src/libexpr/flake/lockfile.cc +++ b/src/libexpr/flake/lockfile.cc @@ -3,83 +3,57 @@ namespace nix::flake { -AbstractInput::AbstractInput(const nlohmann::json & json) - : ref(json["uri"]) +LockedInput::LockedInput(const nlohmann::json & json) + : LockedInputs(json) + , ref(json["uri"]) , narHash(Hash((std::string) json["narHash"])) { if (!ref.isImmutable()) throw Error("lockfile contains mutable flakeref '%s'", ref); } -nlohmann::json AbstractInput::toJson() const +nlohmann::json LockedInput::toJson() const { - nlohmann::json json; + auto json = LockedInputs::toJson(); json["uri"] = ref.to_string(); json["narHash"] = narHash.to_string(SRI); return json; } -Path AbstractInput::computeStorePath(Store & store) const +Path LockedInput::computeStorePath(Store & store) const { return store.makeFixedOutputPath(true, narHash, "source"); } -FlakeInput::FlakeInput(const nlohmann::json & json) - : FlakeInputs(json) - , AbstractInput(json) - , id(json["id"]) +LockedInputs::LockedInputs(const nlohmann::json & json) { -} - -nlohmann::json FlakeInput::toJson() const -{ - auto json = FlakeInputs::toJson(); - json.update(AbstractInput::toJson()); - json["id"] = id; - return json; -} - -FlakeInputs::FlakeInputs(const nlohmann::json & json) -{ - for (auto & i : json["nonFlakeInputs"].items()) - nonFlakeInputs.insert_or_assign(i.key(), NonFlakeInput(i.value())); - for (auto & i : json["inputs"].items()) - flakeInputs.insert_or_assign(i.key(), FlakeInput(i.value())); + inputs.insert_or_assign(i.key(), LockedInput(i.value())); } -nlohmann::json FlakeInputs::toJson() const +nlohmann::json LockedInputs::toJson() const { nlohmann::json json; { auto j = nlohmann::json::object(); - for (auto & i : nonFlakeInputs) + for (auto & i : inputs) j[i.first] = i.second.toJson(); - json["nonFlakeInputs"] = std::move(j); - } - { - auto j = nlohmann::json::object(); - for (auto & i : flakeInputs) - j[i.first.to_string()] = i.second.toJson(); json["inputs"] = std::move(j); } return json; } -bool FlakeInputs::isDirty() const +bool LockedInputs::isDirty() const { - for (auto & i : flakeInputs) + for (auto & i : inputs) if (i.second.ref.isDirty() || i.second.isDirty()) return true; - for (auto & i : nonFlakeInputs) - if (i.second.ref.isDirty()) return true; - return false; } nlohmann::json LockFile::toJson() const { - auto json = FlakeInputs::toJson(); + auto json = LockedInputs::toJson(); json["version"] = 2; return json; } diff --git a/src/libexpr/flake/lockfile.hh b/src/libexpr/flake/lockfile.hh index 7077db3cd..084eabc1a 100644 --- a/src/libexpr/flake/lockfile.hh +++ b/src/libexpr/flake/lockfile.hh @@ -10,47 +10,15 @@ class Store; namespace nix::flake { -/* Common lock file information about a flake input, namely the - immutable ref and the NAR hash. */ -struct AbstractInput -{ - FlakeRef ref; - Hash narHash; - - AbstractInput(const FlakeRef & flakeRef, const Hash & narHash) - : ref(flakeRef), narHash(narHash) - { - assert(ref.isImmutable()); - }; - - AbstractInput(const nlohmann::json & json); - - nlohmann::json toJson() const; - - Path computeStorePath(Store & store) const; -}; - -/* Lock file information about a non-flake input. */ -struct NonFlakeInput : AbstractInput -{ - using AbstractInput::AbstractInput; - - bool operator ==(const NonFlakeInput & other) const - { - return ref == other.ref && narHash == other.narHash; - } -}; - -struct FlakeInput; +struct LockedInput; /* Lock file information about the dependencies of a flake. */ -struct FlakeInputs +struct LockedInputs { - std::map flakeInputs; - std::map nonFlakeInputs; + std::map inputs; - FlakeInputs() {}; - FlakeInputs(const nlohmann::json & json); + LockedInputs() {}; + LockedInputs(const nlohmann::json & json); nlohmann::json toJson() const; @@ -60,47 +28,48 @@ struct FlakeInputs }; /* Lock file information about a flake input. */ -struct FlakeInput : FlakeInputs, AbstractInput +struct LockedInput : LockedInputs { - FlakeId id; + FlakeRef ref; + Hash narHash; - FlakeInput(const FlakeId & id, const FlakeRef & flakeRef, const Hash & narHash) - : AbstractInput(flakeRef, narHash), id(id) {}; + LockedInput(const FlakeRef & ref, const Hash & narHash) + : ref(ref), narHash(narHash) + { + assert(ref.isImmutable()); + }; - FlakeInput(const nlohmann::json & json); + LockedInput(const nlohmann::json & json); - bool operator ==(const FlakeInput & other) const + bool operator ==(const LockedInput & other) const { return - id == other.id - && ref == other.ref + ref == other.ref && narHash == other.narHash - && flakeInputs == other.flakeInputs - && nonFlakeInputs == other.nonFlakeInputs; + && inputs == other.inputs; } nlohmann::json toJson() const; + + Path computeStorePath(Store & store) const; }; /* An entire lock file. Note that this cannot be a FlakeInput for the top-level flake, because then the lock file would need to contain the hash of the top-level flake, but committing the lock file would invalidate that hash. */ -struct LockFile : FlakeInputs +struct LockFile : LockedInputs { bool operator ==(const LockFile & other) const { - return - flakeInputs == other.flakeInputs - && nonFlakeInputs == other.nonFlakeInputs; + return inputs == other.inputs; } LockFile() {} - LockFile(const nlohmann::json & json) : FlakeInputs(json) {} - LockFile(FlakeInput && dep) + LockFile(const nlohmann::json & json) : LockedInputs(json) {} + LockFile(LockedInput && dep) { - flakeInputs = std::move(dep.flakeInputs); - nonFlakeInputs = std::move(dep.nonFlakeInputs); + inputs = std::move(dep.inputs); } nlohmann::json toJson() const; diff --git a/src/libexpr/primops/fetchGit.cc b/src/libexpr/primops/fetchGit.cc index 940cf3022..931eac95f 100644 --- a/src/libexpr/primops/fetchGit.cc +++ b/src/libexpr/primops/fetchGit.cc @@ -31,7 +31,6 @@ GitInfo exportGit(ref store, std::string uri, // or revision is given, then allow the use of an unclean working // tree. if (!ref && !rev && isLocal) { - bool clean = true; try { diff --git a/src/nix/installables.cc b/src/nix/installables.cc index aa5ef5184..dbbf58861 100644 --- a/src/nix/installables.cc +++ b/src/nix/installables.cc @@ -217,25 +217,20 @@ void makeFlakeClosureGCRoot(Store & store, assert(store.isValidPath(resFlake.flake.sourceInfo.storePath)); closure.insert(resFlake.flake.sourceInfo.storePath); - std::queue> queue; + std::queue> queue; queue.push(resFlake.lockFile); while (!queue.empty()) { - const flake::FlakeInputs & flake = queue.front(); + const flake::LockedInputs & flake = queue.front(); queue.pop(); /* Note: due to lazy fetching, these paths might not exist yet. */ - for (auto & dep : flake.flakeInputs) { + for (auto & dep : flake.inputs) { auto path = dep.second.computeStorePath(store); if (store.isValidPath(path)) closure.insert(path); queue.push(dep.second); } - for (auto & dep : flake.nonFlakeInputs) { - auto path = dep.second.computeStorePath(store); - if (store.isValidPath(path)) - closure.insert(path); - } } if (closure.empty()) return; diff --git a/tests/flakes.sh b/tests/flakes.sh index 8bb6e90a2..7c16304ec 100644 --- a/tests/flakes.sh +++ b/tests/flakes.sh @@ -240,10 +240,13 @@ cat > $flake3Dir/flake.nix < $flake3Dir/flake.nix < \$out + cat \${nonFlake}/README.md > \$out ''; }; }; From 80c36d4562af71a90c67b3adb886a1003834890e Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Fri, 30 Aug 2019 16:38:27 +0200 Subject: [PATCH 249/634] Remove 'name' attribute from flakes This is no longer needed since flakes are given an identity in the 'inputs' attribute. --- flake.lock | 7 ++----- flake.nix | 2 -- src/libexpr/flake/flake.cc | 6 ------ src/libexpr/flake/flake.hh | 1 - src/nix/flake.cc | 11 ----------- tests/flakes.sh | 20 ++++---------------- 6 files changed, 6 insertions(+), 41 deletions(-) diff --git a/flake.lock b/flake.lock index 8072152ca..fa5649c03 100644 --- a/flake.lock +++ b/flake.lock @@ -1,13 +1,10 @@ { "inputs": { "nixpkgs": { - "id": "nixpkgs", "inputs": {}, - "narHash": "sha256-AndIaZrFFIT+VFhVtQHsS90I5SWfjTDTxzs9Hx9ZxZA=", - "nonFlakeInputs": {}, - "uri": "github:edolstra/nixpkgs/9bd2e2c96ddeec64e9ad37540412263fdb78458d" + "narHash": "sha256-TrLhI3xPkTTznE9gcMHhkHirGNN7N02zM4DxJ/U3WRs=", + "uri": "github:edolstra/nixpkgs/24bf27fc215e8300877dfa1c426b9966bbfbd150" } }, - "nonFlakeInputs": {}, "version": 2 } diff --git a/flake.nix b/flake.nix index 9f7d6b2ee..06a5515c5 100644 --- a/flake.nix +++ b/flake.nix @@ -1,6 +1,4 @@ { - name = "nix"; - description = "The purely functional package manager"; edition = 201909; diff --git a/src/libexpr/flake/flake.cc b/src/libexpr/flake/flake.cc index ce5dd55a2..ba337cf97 100644 --- a/src/libexpr/flake/flake.cc +++ b/src/libexpr/flake/flake.cc @@ -239,11 +239,6 @@ Flake getFlake(EvalState & state, const FlakeRef & flakeRef) } else throw Error("flake '%s' lacks attribute 'edition'", flakeRef); - if (auto name = vInfo.attrs->get(state.sName)) - flake.id = state.forceStringNoCtx(*(**name).value, *(**name).pos); - else - throw Error("flake '%s' lacks attribute 'name'", flakeRef); - if (auto description = vInfo.attrs->get(state.sDescription)) flake.description = state.forceStringNoCtx(*(**description).value, *(**description).pos); @@ -292,7 +287,6 @@ Flake getFlake(EvalState & state, const FlakeRef & flakeRef) for (auto & attr : *vInfo.attrs) { if (attr.name != sEdition && attr.name != sEpoch && - attr.name != state.sName && attr.name != state.sDescription && attr.name != sInputs && attr.name != sOutputs) diff --git a/src/libexpr/flake/flake.hh b/src/libexpr/flake/flake.hh index 84bce9b5a..8f05e9799 100644 --- a/src/libexpr/flake/flake.hh +++ b/src/libexpr/flake/flake.hh @@ -67,7 +67,6 @@ struct FlakeInput struct Flake { - FlakeId id; FlakeRef originalRef; std::string description; SourceInfo sourceInfo; diff --git a/src/nix/flake.cc b/src/nix/flake.cc index aab29b626..4129ef323 100644 --- a/src/nix/flake.cc +++ b/src/nix/flake.cc @@ -103,7 +103,6 @@ static void sourceInfoToJson(const SourceInfo & sourceInfo, nlohmann::json & j) static void printFlakeInfo(const Flake & flake) { - std::cout << fmt("ID: %s\n", flake.id); std::cout << fmt("Description: %s\n", flake.description); std::cout << fmt("Edition: %s\n", flake.edition); printSourceInfo(flake.sourceInfo); @@ -112,7 +111,6 @@ static void printFlakeInfo(const Flake & flake) static nlohmann::json flakeToJson(const Flake & flake) { nlohmann::json j; - j["id"] = flake.id; j["description"] = flake.description; j["edition"] = flake.edition; sourceInfoToJson(flake.sourceInfo, j); @@ -120,12 +118,6 @@ static nlohmann::json flakeToJson(const Flake & flake) } #if 0 -static void printNonFlakeInfo(const NonFlake & nonFlake) -{ - std::cout << fmt("ID: %s\n", nonFlake.alias); - printSourceInfo(nonFlake.sourceInfo); -} - // FIXME: merge info CmdFlakeInfo? struct CmdFlakeDeps : FlakeCommand { @@ -148,9 +140,6 @@ struct CmdFlakeDeps : FlakeCommand auto resFlake = std::move(todo.front()); todo.pop(); - for (auto & nonFlake : resFlake.nonFlakeDeps) - printNonFlakeInfo(nonFlake); - for (auto & info : resFlake.flakeDeps) { printFlakeInfo(info.second.flake); todo.push(info.second); diff --git a/tests/flakes.sh b/tests/flakes.sh index 7c16304ec..8594a4c55 100644 --- a/tests/flakes.sh +++ b/tests/flakes.sh @@ -29,8 +29,6 @@ done cat > $flake1Dir/flake.nix < $flake2Dir/flake.nix < $flake3Dir/flake.nix < $flake3Dir/flake.nix < $flake3Dir/flake.nix < $flake3Dir/flake.nix < Date: Fri, 30 Aug 2019 17:27:51 +0200 Subject: [PATCH 250/634] Fix sourceInfo --- src/libexpr/flake/flake.cc | 1 + 1 file changed, 1 insertion(+) diff --git a/src/libexpr/flake/flake.cc b/src/libexpr/flake/flake.cc index ba337cf97..9e25fc116 100644 --- a/src/libexpr/flake/flake.cc +++ b/src/libexpr/flake/flake.cc @@ -511,6 +511,7 @@ void callFlake(EvalState & state, auto & vSourceInfo = *state.allocValue(); state.mkAttrs(vSourceInfo, 8); emitSourceInfoAttrs(state, flake.sourceInfo, vSourceInfo); + vSourceInfo.attrs->sort(); vInputs.attrs->push_back(Attr(state.sSelf, &vRes)); From 5ec2a1ed82d485429aaf6fbad55fd6c1320b2d8c Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Mon, 2 Sep 2019 15:59:19 +0200 Subject: [PATCH 251/634] nix dev-shell --profile: Support relative path --- src/nix/command.cc | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/src/nix/command.cc b/src/nix/command.cc index 8191cb831..9cca443dc 100644 --- a/src/nix/command.cc +++ b/src/nix/command.cc @@ -96,10 +96,11 @@ void MixProfile::updateProfile(const Path & storePath) if (!profile) return; auto store = getStore().dynamic_pointer_cast(); if (!store) throw Error("'--profile' is not supported for this Nix store"); - switchLink(*profile, + auto profile2 = absPath(*profile); + switchLink(profile2, createGeneration( ref(store), - *profile, storePath)); + profile2, storePath)); } void MixProfile::updateProfile(const Buildables & buildables) From 61fdb16aacf9ff18c96b72a37e1b46eb14586eb4 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Mon, 2 Sep 2019 17:33:07 +0200 Subject: [PATCH 252/634] Improve error message when a directory is not a flake So you now get $ nix build error: path '.' is not a flake (because it does not reference a Git repository) rather than $ nix build error: unsupported argument '.' --- src/libexpr/flake/flakeref.cc | 6 ++-- src/libexpr/flake/flakeref.hh | 1 + src/nix/installables.cc | 52 +++++++++++++++++++++++------------ 3 files changed, 39 insertions(+), 20 deletions(-) diff --git a/src/libexpr/flake/flakeref.cc b/src/libexpr/flake/flakeref.cc index 7631cd53a..253442566 100644 --- a/src/libexpr/flake/flakeref.cc +++ b/src/libexpr/flake/flakeref.cc @@ -145,10 +145,10 @@ FlakeRef::FlakeRef(const std::string & uri_, bool allowRelative) d.path = absPath(uri); try { if (!S_ISDIR(lstat(d.path).st_mode)) - throw BadFlakeRef("path '%s' is not a flake (sub)directory"); + throw MissingFlake("path '%s' is not a flake (sub)directory", d.path); } catch (SysError & e) { if (e.errNo == ENOENT || e.errNo == EISDIR) - throw BadFlakeRef("flake '%s' does not exist"); + throw MissingFlake("flake '%s' does not exist", d.path); throw; } while (true) { @@ -156,7 +156,7 @@ FlakeRef::FlakeRef(const std::string & uri_, bool allowRelative) subdir = baseNameOf(d.path) + (subdir.empty() ? "" : "/" + subdir); d.path = dirOf(d.path); if (d.path == "/") - throw BadFlakeRef("path '%s' does not reference a Git repository", uri); + throw MissingFlake("path '%s' is not a flake (because it does not reference a Git repository)", uri); } } else d.path = canonPath(uri); diff --git a/src/libexpr/flake/flakeref.hh b/src/libexpr/flake/flakeref.hh index 082dd8c26..9ddc227bb 100644 --- a/src/libexpr/flake/flakeref.hh +++ b/src/libexpr/flake/flakeref.hh @@ -187,6 +187,7 @@ struct FlakeRef std::ostream & operator << (std::ostream & str, const FlakeRef & flakeRef); MakeError(BadFlakeRef, Error); +MakeError(MissingFlake, BadFlakeRef); std::optional parseFlakeRef( const std::string & uri, bool allowRelative = false); diff --git a/src/nix/installables.cc b/src/nix/installables.cc index dbbf58861..a4726a59e 100644 --- a/src/nix/installables.cc +++ b/src/nix/installables.cc @@ -417,25 +417,43 @@ std::vector> SourceExprCommand::parseInstallables( Strings{"legacyPackages." + std::string(s, 8)})); } - else if (auto flakeRef = parseFlakeRef(s, true)) - result.push_back(std::make_shared(*this, std::move(*flakeRef), - getDefaultFlakeAttrPaths())); + else { - else if ((colon = s.rfind(':')) != std::string::npos) { - auto flakeRef = std::string(s, 0, colon); - auto attrPath = std::string(s, colon + 1); - result.push_back(std::make_shared( - *this, - FlakeRef(flakeRef, true), - attrPath, - getDefaultFlakeAttrPathPrefixes())); + std::exception_ptr flakeEx; + + try { + auto flakeRef = FlakeRef(s, true); + result.push_back(std::make_shared( + *this, std::move(flakeRef), getDefaultFlakeAttrPaths())); + continue; + } catch (MissingFlake &) { + /* 's' could be parsed as a flakeref, but it + references a local path that is not a flake. So + take note of that. */ + flakeEx = std::current_exception(); + } catch (BadFlakeRef &) { + } + + if ((colon = s.rfind(':')) != std::string::npos) { + auto flakeRef = std::string(s, 0, colon); + auto attrPath = std::string(s, colon + 1); + result.push_back(std::make_shared( + *this, + FlakeRef(flakeRef, true), + attrPath, + getDefaultFlakeAttrPathPrefixes())); + } + + else if (s.find('/') != std::string::npos && (storePath = follow(s))) + result.push_back(std::make_shared(*storePath)); + + else { + if (flakeEx) + std::rethrow_exception(flakeEx); + else + throw Error("unsupported argument '%s'", s); + } } - - else if (s.find('/') != std::string::npos && (storePath = follow(s))) - result.push_back(std::make_shared(*storePath)); - - else - throw Error("unsupported argument '%s'", s); } } From c693f80b814c244dcdae7a2e87fb9e444d9d1ca5 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Mon, 2 Sep 2019 17:43:27 +0200 Subject: [PATCH 253/634] Shut up some clang warnings --- src/libutil/args.hh | 2 ++ src/nix/command.hh | 2 ++ 2 files changed, 4 insertions(+) diff --git a/src/libutil/args.hh b/src/libutil/args.hh index a083c4ce8..b960a55a8 100644 --- a/src/libutil/args.hh +++ b/src/libutil/args.hh @@ -199,6 +199,8 @@ private: public: + virtual ~Command() { } + std::string name() { return _name; } virtual void prepare() { }; diff --git a/src/nix/command.hh b/src/nix/command.hh index 00c202f20..92f606bbe 100644 --- a/src/nix/command.hh +++ b/src/nix/command.hh @@ -49,6 +49,8 @@ struct App struct Installable { + virtual ~Installable() { } + virtual std::string what() = 0; virtual Buildables toBuildables() From aeb695c0074b52772057b36f442a054f8d1a856d Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Mon, 2 Sep 2019 17:50:44 +0200 Subject: [PATCH 254/634] Remove obsolete OpenSSL locking code OpenSSL 1.1.1 no longer needs this (https://github.com/openssl/openssl/commit/2e52e7df518d80188c865ea3f7bb3526d14b0c08). This shuts up a clang warning about opensslLockCallback being unused. --- src/libmain/shared.cc | 20 -------------------- src/libutil/hash.cc | 4 ++++ 2 files changed, 4 insertions(+), 20 deletions(-) diff --git a/src/libmain/shared.cc b/src/libmain/shared.cc index 0afddfb78..910549583 100644 --- a/src/libmain/shared.cc +++ b/src/libmain/shared.cc @@ -15,8 +15,6 @@ #include #include -#include - namespace nix { @@ -80,20 +78,6 @@ string getArg(const string & opt, } -/* OpenSSL is not thread-safe by default - it will randomly crash - unless the user supplies a mutex locking function. So let's do - that. */ -static std::vector opensslLocks; - -static void opensslLockCallback(int mode, int type, const char * file, int line) -{ - if (mode & CRYPTO_LOCK) - opensslLocks[type].lock(); - else - opensslLocks[type].unlock(); -} - - static void sigHandler(int signo) { } @@ -105,10 +89,6 @@ void initNix() std::cerr.rdbuf()->pubsetbuf(buf, sizeof(buf)); #endif - /* Initialise OpenSSL locking. */ - opensslLocks = std::vector(CRYPTO_num_locks()); - CRYPTO_set_locking_callback(opensslLockCallback); - loadConfFile(); startSignalHandlerThread(); diff --git a/src/libutil/hash.cc b/src/libutil/hash.cc index 1c14ebb18..362c537fe 100644 --- a/src/libutil/hash.cc +++ b/src/libutil/hash.cc @@ -13,6 +13,10 @@ #include #include +#if OPENSSL_VERSION_NUMBER < 0x10101000L +#error "Unsupported version of OpenSSL, you need at least 1.1.1" +#endif + namespace nix { From 2dbd69dbf4538d5b7947d192979ff4feab322c2e Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Mon, 2 Sep 2019 23:04:27 +0200 Subject: [PATCH 255/634] nix repl: Run in impure mode --- src/nix/repl.cc | 1 + 1 file changed, 1 insertion(+) diff --git a/src/nix/repl.cc b/src/nix/repl.cc index 3a70a23c7..0fa1594cc 100644 --- a/src/nix/repl.cc +++ b/src/nix/repl.cc @@ -778,6 +778,7 @@ struct CmdRepl : StoreCommand, MixEvalArgs void run(ref store) override { + evalSettings.pureEval = false; auto repl = std::make_unique(searchPath, openStore()); repl->autoArgs = getAutoArgs(repl->state); repl->mainLoop(files); From 4caeefaf004c1a4fdd67485f0328a6741a9640fb Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Wed, 4 Sep 2019 14:06:52 +0200 Subject: [PATCH 256/634] Revert "Remove obsolete OpenSSL locking code" This reverts commit aeb695c0074b52772057b36f442a054f8d1a856d. --- src/libmain/shared.cc | 20 ++++++++++++++++++++ src/libutil/hash.cc | 4 ---- 2 files changed, 20 insertions(+), 4 deletions(-) diff --git a/src/libmain/shared.cc b/src/libmain/shared.cc index 910549583..0afddfb78 100644 --- a/src/libmain/shared.cc +++ b/src/libmain/shared.cc @@ -15,6 +15,8 @@ #include #include +#include + namespace nix { @@ -78,6 +80,20 @@ string getArg(const string & opt, } +/* OpenSSL is not thread-safe by default - it will randomly crash + unless the user supplies a mutex locking function. So let's do + that. */ +static std::vector opensslLocks; + +static void opensslLockCallback(int mode, int type, const char * file, int line) +{ + if (mode & CRYPTO_LOCK) + opensslLocks[type].lock(); + else + opensslLocks[type].unlock(); +} + + static void sigHandler(int signo) { } @@ -89,6 +105,10 @@ void initNix() std::cerr.rdbuf()->pubsetbuf(buf, sizeof(buf)); #endif + /* Initialise OpenSSL locking. */ + opensslLocks = std::vector(CRYPTO_num_locks()); + CRYPTO_set_locking_callback(opensslLockCallback); + loadConfFile(); startSignalHandlerThread(); diff --git a/src/libutil/hash.cc b/src/libutil/hash.cc index 362c537fe..1c14ebb18 100644 --- a/src/libutil/hash.cc +++ b/src/libutil/hash.cc @@ -13,10 +13,6 @@ #include #include -#if OPENSSL_VERSION_NUMBER < 0x10101000L -#error "Unsupported version of OpenSSL, you need at least 1.1.1" -#endif - namespace nix { From 6f88fed8195c43bc46e3f9e62273599f6cc205fa Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Wed, 4 Sep 2019 14:14:03 +0200 Subject: [PATCH 257/634] Disable OpenSSL lock callback on OpenSSL >= 1.1.1 --- src/libmain/shared.cc | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/libmain/shared.cc b/src/libmain/shared.cc index 0afddfb78..d3dbfbc44 100644 --- a/src/libmain/shared.cc +++ b/src/libmain/shared.cc @@ -80,6 +80,7 @@ string getArg(const string & opt, } +#if OPENSSL_VERSION_NUMBER < 0x10101000L /* OpenSSL is not thread-safe by default - it will randomly crash unless the user supplies a mutex locking function. So let's do that. */ @@ -92,6 +93,7 @@ static void opensslLockCallback(int mode, int type, const char * file, int line) else opensslLocks[type].unlock(); } +#endif static void sigHandler(int signo) { } @@ -105,9 +107,11 @@ void initNix() std::cerr.rdbuf()->pubsetbuf(buf, sizeof(buf)); #endif +#if OPENSSL_VERSION_NUMBER < 0x10101000L /* Initialise OpenSSL locking. */ opensslLocks = std::vector(CRYPTO_num_locks()); CRYPTO_set_locking_callback(opensslLockCallback); +#endif loadConfFile(); From 2fa7f2a56a5c2fe11c1a0daceee5cf0584b69be9 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Thu, 5 Sep 2019 17:15:09 +0200 Subject: [PATCH 258/634] Use git+ prefix in flake URI schemes Fixes #3045. --- src/libexpr/flake/flakeref.cc | 40 ++++++++++++++++++++------------- src/libexpr/flake/flakeref.hh | 20 ++++++++--------- src/libexpr/primops/fetchGit.cc | 2 ++ 3 files changed, 36 insertions(+), 26 deletions(-) diff --git a/src/libexpr/flake/flakeref.cc b/src/libexpr/flake/flakeref.cc index 253442566..4ce326c0b 100644 --- a/src/libexpr/flake/flakeref.cc +++ b/src/libexpr/flake/flakeref.cc @@ -27,7 +27,7 @@ const static std::string ownerRegex = "[a-zA-Z][a-zA-Z0-9_-]*"; const static std::string repoRegex = "[a-zA-Z][a-zA-Z0-9_-]*"; // URI stuff. -const static std::string schemeRegex = "(?:http|https|ssh|git|file)"; +const static std::string schemeRegex = "[a-z+]+"; const static std::string authorityRegex = "[a-zA-Z0-9._~-]*"; const static std::string segmentRegex = "[a-zA-Z0-9._~-]+"; const static std::string pathRegex = "/?" + segmentRegex + "(?:/" + segmentRegex + ")*"; @@ -120,21 +120,29 @@ FlakeRef::FlakeRef(const std::string & uri_, bool allowRelative) data = d; } - else if (std::regex_match(uri.c_str(), match, uriRegex) - && (match[2] == "file" || hasSuffix(match[4], ".git"))) - { - IsGit d; - d.uri = match[1]; - for (auto & param : params) { - if (handleGitParams(param.first, param.second)) - ; - else - // FIXME: should probably pass through unknown parameters - throw BadFlakeRef("invalid Git flakeref parameter '%s', in '%s'", param.first, uri); - } - if (rev && !ref) - throw BadFlakeRef("flake URI '%s' lacks a Git ref", uri); - data = d; + else if (std::regex_match(uri.c_str(), match, uriRegex)) { + auto & scheme = match[2]; + if (scheme == "git" || + scheme == "git+http" || + scheme == "git+https" || + scheme == "git+ssh" || + scheme == "git+file" || + scheme == "file") + { + IsGit d; + d.uri = match[1]; + for (auto & param : params) { + if (handleGitParams(param.first, param.second)) + ; + else + // FIXME: should probably pass through unknown parameters + throw BadFlakeRef("invalid Git flakeref parameter '%s', in '%s'", param.first, uri); + } + if (rev && !ref) + throw BadFlakeRef("flake URI '%s' lacks a Git ref", uri); + data = d; + } else + throw BadFlakeRef("unsupported URI scheme '%s' in flake reference '%s'", scheme, uri); } else if ((hasPrefix(uri, "/") || (allowRelative && (hasPrefix(uri, "./") || hasPrefix(uri, "../") || uri == "."))) diff --git a/src/libexpr/flake/flakeref.hh b/src/libexpr/flake/flakeref.hh index 9ddc227bb..6b47330a7 100644 --- a/src/libexpr/flake/flakeref.hh +++ b/src/libexpr/flake/flakeref.hh @@ -52,28 +52,28 @@ namespace nix { github:edolstra/dwarffs/unstable github:edolstra/dwarffs/41c0c1bf292ea3ac3858ff393b49ca1123dbd553 - * https:///.git(\?attr(&attr)*)? - ssh:///.git(\?attr(&attr)*)? - git:///.git(\?attr(&attr)*)? + * git+https:///(\?attr(&attr)*)? + git+ssh:///(\?attr(&attr)*)? + git:///(\?attr(&attr)*)? file:///(\?attr(&attr)*)? where 'attr' is one of: rev= ref= - A Git repository fetched through https. Note that the path must - end in ".git". The default for "ref" is "master". + A Git repository fetched through https. The default for "ref" is + "master". Examples: - https://example.org/my/repo.git - https://example.org/my/repo.git?ref=release-1.2.3 - https://example.org/my/repo.git?rev=e72daba8250068216d79d2aeef40d4d95aff6666 + git+https://example.org/my/repo.git + git+https://example.org/my/repo.git?ref=release-1.2.3 + git+https://example.org/my/repo.git?rev=e72daba8250068216d79d2aeef40d4d95aff6666 git://github.com/edolstra/dwarffs.git?ref=flake&rev=2efca4bc9da70fb001b26c3dc858c6397d3c4817 - * /path.git(\?attr(&attr)*)? + * /path(\?attr(&attr)*)? - Like file://path.git, but if no "ref" or "rev" is specified, the + Like file://path, but if no "ref" or "rev" is specified, the (possibly dirty) working tree will be used. Using a working tree is not allowed in pure evaluation mode. diff --git a/src/libexpr/primops/fetchGit.cc b/src/libexpr/primops/fetchGit.cc index 931eac95f..48d84c4a1 100644 --- a/src/libexpr/primops/fetchGit.cc +++ b/src/libexpr/primops/fetchGit.cc @@ -25,6 +25,8 @@ GitInfo exportGit(ref store, std::string uri, { assert(!rev || rev->type == htSHA1); + if (hasPrefix(uri, "git+")) uri = std::string(uri, 4); + bool isLocal = hasPrefix(uri, "/") && pathExists(uri + "/.git"); // If this is a local directory (but not a file:// URI) and no ref From c87840ae14eea84b5910cb0b188ec3fb32cc1466 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Mon, 9 Sep 2019 16:34:44 +0200 Subject: [PATCH 259/634] Don't allow arbitrary computations in flake attributes E.g. you can write 'edition = 201909' but not 'edition = 201909 + 0'. Fixes #3075. --- src/libexpr/eval.cc | 23 ++++++++++++++++++----- src/libexpr/eval.hh | 1 + src/libexpr/flake/flake.cc | 36 +++++++++++++++++++++++++++--------- 3 files changed, 46 insertions(+), 14 deletions(-) diff --git a/src/libexpr/eval.cc b/src/libexpr/eval.cc index fa79b0d5e..25b50da7e 100644 --- a/src/libexpr/eval.cc +++ b/src/libexpr/eval.cc @@ -141,12 +141,12 @@ const Value *getPrimOp(const Value &v) { } -string showType(const Value & v) +string showType(ValueType type) { - switch (v.type) { + switch (type) { case tInt: return "an integer"; case tBool: return "a boolean"; - case tString: return v.string.context ? "a string with context" : "a string"; + case tString: return "a string"; case tPath: return "a path"; case tNull: return "null"; case tAttrs: return "a set"; @@ -155,14 +155,27 @@ string showType(const Value & v) case tApp: return "a function application"; case tLambda: return "a function"; case tBlackhole: return "a black hole"; + case tPrimOp: return "a built-in function"; + case tPrimOpApp: return "a partially applied built-in function"; + case tExternal: return "an external value"; + case tFloat: return "a float"; + } + abort(); +} + + +string showType(const Value & v) +{ + switch (v.type) { + case tString: return v.string.context ? "a string with context" : "a string"; case tPrimOp: return fmt("the built-in function '%s'", string(v.primOp->name)); case tPrimOpApp: return fmt("the partially applied built-in function '%s'", string(getPrimOp(v)->primOp->name)); case tExternal: return v.external->showType(); - case tFloat: return "a float"; + default: + return showType(v.type); } - abort(); } diff --git a/src/libexpr/eval.hh b/src/libexpr/eval.hh index 5e976f196..468a826ca 100644 --- a/src/libexpr/eval.hh +++ b/src/libexpr/eval.hh @@ -338,6 +338,7 @@ private: /* Return a string representing the type of the value `v'. */ +string showType(ValueType type); string showType(const Value & v); /* Decode a context string ‘!!’ into a pair (value.thunk.expr)) || + (type == tLambda && dynamic_cast(value.thunk.expr)))) + state.forceValue(value, pos); + if (value.type != type) + throw Error("expected %s but got %s at %s", + showType(type), showType(value.type), pos); +} + Flake getFlake(EvalState & state, const FlakeRef & flakeRef) { SourceInfo sourceInfo = fetchFlake(state, flakeRef); @@ -219,9 +231,10 @@ Flake getFlake(EvalState & state, const FlakeRef & flakeRef) throw Error("source tree referenced by '%s' does not contain a '%s/flake.nix' file", resolvedRef, resolvedRef.subdir); Value vInfo; + // FIXME: don't evaluate vInfo. state.evalFile(realFlakeFile, vInfo); // FIXME: symlink attack - state.forceAttrs(vInfo); + expectType(state, tAttrs, vInfo, Pos(state.symbols.create(realFlakeFile), 0, 0)); auto sEdition = state.symbols.create("edition"); auto sEpoch = state.symbols.create("epoch"); // FIXME: remove soon @@ -231,7 +244,8 @@ Flake getFlake(EvalState & state, const FlakeRef & flakeRef) edition = vInfo.attrs->get(sEpoch); if (edition) { - flake.edition = state.forceInt(*(**edition).value, *(**edition).pos); + expectType(state, tInt, *(**edition).value, *(**edition).pos); + flake.edition = (**edition).value->integer; if (flake.edition > 201909) throw Error("flake '%s' requires unsupported edition %d; please upgrade Nix", flakeRef, flake.edition); if (flake.edition < 201909) @@ -239,26 +253,30 @@ Flake getFlake(EvalState & state, const FlakeRef & flakeRef) } else throw Error("flake '%s' lacks attribute 'edition'", flakeRef); - if (auto description = vInfo.attrs->get(state.sDescription)) - flake.description = state.forceStringNoCtx(*(**description).value, *(**description).pos); + if (auto description = vInfo.attrs->get(state.sDescription)) { + expectType(state, tString, *(**description).value, *(**description).pos); + flake.description = (**description).value->string.s; + } auto sInputs = state.symbols.create("inputs"); auto sUri = state.symbols.create("uri"); auto sFlake = state.symbols.create("flake"); if (std::optional inputs = vInfo.attrs->get(sInputs)) { - state.forceAttrs(*(**inputs).value, *(**inputs).pos); + expectType(state, tAttrs, *(**inputs).value, *(**inputs).pos); for (Attr inputAttr : *(*(**inputs).value).attrs) { - state.forceAttrs(*inputAttr.value, *inputAttr.pos); + expectType(state, tAttrs, *inputAttr.value, *inputAttr.pos); FlakeInput input(FlakeRef(inputAttr.name)); for (Attr attr : *(inputAttr.value->attrs)) { if (attr.name == sUri) { - input.ref = state.forceStringNoCtx(*attr.value, *attr.pos); + expectType(state, tString, *attr.value, *attr.pos); + input.ref = std::string(attr.value->string.s); } else if (attr.name == sFlake) { - input.isFlake = state.forceBool(*attr.value, *attr.pos); + expectType(state, tBool, *attr.value, *attr.pos); + input.isFlake = attr.value->boolean; } else throw Error("flake input '%s' has an unsupported attribute '%s', at %s", inputAttr.name, attr.name, *attr.pos); @@ -271,7 +289,7 @@ Flake getFlake(EvalState & state, const FlakeRef & flakeRef) auto sOutputs = state.symbols.create("outputs"); if (auto outputs = vInfo.attrs->get(sOutputs)) { - state.forceFunction(*(**outputs).value, *(**outputs).pos); + expectType(state, tLambda, *(**outputs).value, *(**outputs).pos); flake.vOutputs = (**outputs).value; if (flake.vOutputs->lambda.fun->matchAttrs) { From f97d3753a13f0ff916d83dbea4fe7dae7194f903 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Mon, 9 Sep 2019 17:34:38 +0200 Subject: [PATCH 260/634] Require flake.nix to be an attrset (not a non-trivial thunk) --- src/libexpr/eval.cc | 19 ++++++++++++++++++- src/libexpr/eval.hh | 5 +++-- src/libexpr/flake/flake.cc | 7 ++----- src/libexpr/value.hh | 5 +++++ 4 files changed, 28 insertions(+), 8 deletions(-) diff --git a/src/libexpr/eval.cc b/src/libexpr/eval.cc index 25b50da7e..ec751ad31 100644 --- a/src/libexpr/eval.cc +++ b/src/libexpr/eval.cc @@ -179,6 +179,18 @@ string showType(const Value & v) } +bool Value::isTrivial() const +{ + return + type != tApp + && type != tPrimOpApp + && (type != tThunk + || (dynamic_cast(thunk.expr) + && ((ExprAttrs *) thunk.expr)->dynamicAttrs.empty()) + || dynamic_cast(thunk.expr)); +} + + #if HAVE_BOEHMGC /* Called when the Boehm GC runs out of memory. */ static void * oomHandler(size_t requested) @@ -749,7 +761,7 @@ Value * ExprPath::maybeThunk(EvalState & state, Env & env) } -void EvalState::evalFile(const Path & path_, Value & v) +void EvalState::evalFile(const Path & path_, Value & v, bool mustBeTrivial) { auto path = checkSourcePath(path_); @@ -778,6 +790,11 @@ void EvalState::evalFile(const Path & path_, Value & v) fileParseCache[path2] = e; try { + // Enforce that 'flake.nix' is a direct attrset, not a + // computation. + if (mustBeTrivial && + !(dynamic_cast(e))) + throw Error("file '%s' must be an attribute set", path); eval(e, v); } catch (Error & e) { addErrorPrefix(e, "while evaluating the file '%1%':\n", path2); diff --git a/src/libexpr/eval.hh b/src/libexpr/eval.hh index 468a826ca..16350a5bf 100644 --- a/src/libexpr/eval.hh +++ b/src/libexpr/eval.hh @@ -157,8 +157,9 @@ public: Expr * parseStdin(); /* Evaluate an expression read from the given file to normal - form. */ - void evalFile(const Path & path, Value & v); + form. Optionally enforce that the top-level expression is + trivial (i.e. doesn't require arbitrary computation). */ + void evalFile(const Path & path, Value & v, bool mustBeTrivial = false); void resetFileCache(); diff --git a/src/libexpr/flake/flake.cc b/src/libexpr/flake/flake.cc index c10906731..050e65259 100644 --- a/src/libexpr/flake/flake.cc +++ b/src/libexpr/flake/flake.cc @@ -198,9 +198,7 @@ static SourceInfo fetchFlake(EvalState & state, const FlakeRef & resolvedRef) static void expectType(EvalState & state, ValueType type, Value & value, const Pos & pos) { - if (value.type == tThunk && - ((type == tAttrs && dynamic_cast(value.thunk.expr)) || - (type == tLambda && dynamic_cast(value.thunk.expr)))) + if (value.type == tThunk && value.isTrivial()) state.forceValue(value, pos); if (value.type != type) throw Error("expected %s but got %s at %s", @@ -231,8 +229,7 @@ Flake getFlake(EvalState & state, const FlakeRef & flakeRef) throw Error("source tree referenced by '%s' does not contain a '%s/flake.nix' file", resolvedRef, resolvedRef.subdir); Value vInfo; - // FIXME: don't evaluate vInfo. - state.evalFile(realFlakeFile, vInfo); // FIXME: symlink attack + state.evalFile(realFlakeFile, vInfo, true); // FIXME: symlink attack expectType(state, tAttrs, vInfo, Pos(state.symbols.create(realFlakeFile), 0, 0)); diff --git a/src/libexpr/value.hh b/src/libexpr/value.hh index e1ec87d3b..bdf2cdde1 100644 --- a/src/libexpr/value.hh +++ b/src/libexpr/value.hh @@ -170,6 +170,11 @@ struct Value { return type == tList1 ? 1 : type == tList2 ? 2 : bigList.size; } + + /* Check whether forcing this value requires a trivial amount of + computation. In particular, function applications are + non-trivial. */ + bool isTrivial() const; }; From dc3f52a1447df8523f44c89e25e48e8b7f5341a0 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Tue, 10 Sep 2019 14:52:22 +0200 Subject: [PATCH 261/634] nix flake check: Check overlays --- src/nix/flake.cc | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/src/nix/flake.cc b/src/nix/flake.cc index 4129ef323..10ce9addc 100644 --- a/src/nix/flake.cc +++ b/src/nix/flake.cc @@ -280,6 +280,22 @@ struct CmdFlakeCheck : FlakeCommand, MixJSON } }; + auto checkOverlay = [&](const std::string & attrPath, Value & v) { + try { + state->forceValue(v); + if (v.type != tLambda || v.lambda.fun->matchAttrs || std::string(v.lambda.fun->arg) != "final") + throw Error("overlay does not take an argument named 'final'"); + auto body = dynamic_cast(v.lambda.fun->body); + if (!body || body->matchAttrs || std::string(body->arg) != "prev") + throw Error("overlay does not take an argument named 'prev'"); + // FIXME: if we have a 'nixpkgs' input, use it to + // evaluate the overlay. + } catch (Error & e) { + e.addPrefix(fmt("while checking the overlay '" ANSI_BOLD "%s" ANSI_NORMAL "':\n", attrPath)); + throw; + } + }; + { Activity act(*logger, lvlInfo, actUnknown, "evaluating flake"); @@ -326,6 +342,9 @@ struct CmdFlakeCheck : FlakeCommand, MixJSON // FIXME: do getDerivations? ; + else if (name == "overlay") + checkOverlay(name, vProvide); + else warn("unknown flake output '%s'", name); From 4b9dee6bcca48bd60f341cb07273a33e632bafc2 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Tue, 10 Sep 2019 15:25:10 +0200 Subject: [PATCH 262/634] nix flake check: Do some basic checks on NixOS modules Also show more position info. --- src/nix/flake.cc | 87 +++++++++++++++++++++++++++++++++++------------- 1 file changed, 64 insertions(+), 23 deletions(-) diff --git a/src/nix/flake.cc b/src/nix/flake.cc index 10ce9addc..b29aa212c 100644 --- a/src/nix/flake.cc +++ b/src/nix/flake.cc @@ -170,7 +170,7 @@ struct CmdFlakeUpdate : FlakeCommand }; static void enumerateOutputs(EvalState & state, Value & vFlake, - std::function callback) + std::function callback) { state.forceAttrs(vFlake); @@ -179,7 +179,7 @@ static void enumerateOutputs(EvalState & state, Value & vFlake, state.forceAttrs(*vOutputs); for (auto & attr : *vOutputs->attrs) - callback(attr.name, *attr.value); + callback(attr.name, *attr.value, *attr.pos); } struct CmdFlakeInfo : FlakeCommand, MixJSON @@ -207,7 +207,7 @@ struct CmdFlakeInfo : FlakeCommand, MixJSON enumerateOutputs(*state, *vFlake, - [&](const std::string & name, Value & vProvide) { + [&](const std::string & name, Value & vProvide, const Pos & pos) { auto provide = nlohmann::json::object(); if (name == "checks" || name == "packages") { @@ -251,7 +251,7 @@ struct CmdFlakeCheck : FlakeCommand, MixJSON auto state = getEvalState(); auto flake = resolveFlake(); - auto checkDerivation = [&](const std::string & attrPath, Value & v) { + auto checkDerivation = [&](const std::string & attrPath, Value & v, const Pos & pos) { try { auto drvInfo = getDerivation(*state, v, false); if (!drvInfo) @@ -259,14 +259,14 @@ struct CmdFlakeCheck : FlakeCommand, MixJSON // FIXME: check meta attributes return drvInfo->queryDrvPath(); } catch (Error & e) { - e.addPrefix(fmt("while checking the derivation '" ANSI_BOLD "%s" ANSI_NORMAL "':\n", attrPath)); + e.addPrefix(fmt("while checking the derivation '" ANSI_BOLD "%s" ANSI_NORMAL "' at %s:\n", attrPath, pos)); throw; } }; PathSet drvPaths; - auto checkApp = [&](const std::string & attrPath, Value & v) { + auto checkApp = [&](const std::string & attrPath, Value & v, const Pos & pos) { try { auto app = App(*state, v); for (auto & i : app.context) { @@ -275,12 +275,12 @@ struct CmdFlakeCheck : FlakeCommand, MixJSON drvPaths.insert(drvPath + "!" + outputName); } } catch (Error & e) { - e.addPrefix(fmt("while checking the app definition '" ANSI_BOLD "%s" ANSI_NORMAL "':\n", attrPath)); + e.addPrefix(fmt("while checking the app definition '" ANSI_BOLD "%s" ANSI_NORMAL "' at %s:\n", attrPath, pos)); throw; } }; - auto checkOverlay = [&](const std::string & attrPath, Value & v) { + auto checkOverlay = [&](const std::string & attrPath, Value & v, const Pos & pos) { try { state->forceValue(v); if (v.type != tLambda || v.lambda.fun->matchAttrs || std::string(v.lambda.fun->arg) != "final") @@ -291,7 +291,31 @@ struct CmdFlakeCheck : FlakeCommand, MixJSON // FIXME: if we have a 'nixpkgs' input, use it to // evaluate the overlay. } catch (Error & e) { - e.addPrefix(fmt("while checking the overlay '" ANSI_BOLD "%s" ANSI_NORMAL "':\n", attrPath)); + e.addPrefix(fmt("while checking the overlay '" ANSI_BOLD "%s" ANSI_NORMAL "' at %s:\n", attrPath, pos)); + throw; + } + }; + + auto checkModule = [&](const std::string & attrPath, Value & v, const Pos & pos) { + try { + state->forceValue(v); + if (v.type == tLambda) { + if (!v.lambda.fun->matchAttrs || !v.lambda.fun->formals->ellipsis) + throw Error("module must match an open attribute set ('{ config, ... }')"); + } else if (v.type == tAttrs) { + for (auto & attr : *v.attrs) + try { + state->forceValue(*attr.value); + } catch (Error & e) { + e.addPrefix(fmt("while evaluating the option '" ANSI_BOLD "%s" ANSI_NORMAL "' at %s:\n", attr.name, *attr.pos)); + throw; + } + } else + throw Error("module must be a function or an attribute set"); + // FIXME: if we have a 'nixpkgs' input, use it to + // check the module. + } catch (Error & e) { + e.addPrefix(fmt("while checking the NixOS module '" ANSI_BOLD "%s" ANSI_NORMAL "' at %s:\n", attrPath, pos)); throw; } }; @@ -304,46 +328,63 @@ struct CmdFlakeCheck : FlakeCommand, MixJSON enumerateOutputs(*state, *vFlake, - [&](const std::string & name, Value & vProvide) { + [&](const std::string & name, Value & vOutput, const Pos & pos) { Activity act(*logger, lvlChatty, actUnknown, fmt("checking flake output '%s'", name)); try { - state->forceValue(vProvide); + state->forceValue(vOutput); if (name == "checks") { - state->forceAttrs(vProvide); - for (auto & aCheck : *vProvide.attrs) + state->forceAttrs(vOutput); + for (auto & attr : *vOutput.attrs) drvPaths.insert(checkDerivation( - name + "." + (std::string) aCheck.name, *aCheck.value)); + name + "." + (std::string) attr.name, *attr.value, *attr.pos)); } else if (name == "packages") { - state->forceAttrs(vProvide); - for (auto & aCheck : *vProvide.attrs) + state->forceAttrs(vOutput); + for (auto & attr : *vOutput.attrs) checkDerivation( - name + "." + (std::string) aCheck.name, *aCheck.value); + name + "." + (std::string) attr.name, *attr.value, *attr.pos); } else if (name == "apps") { - state->forceAttrs(vProvide); - for (auto & aCheck : *vProvide.attrs) + state->forceAttrs(vOutput); + for (auto & attr : *vOutput.attrs) checkApp( - name + "." + (std::string) aCheck.name, *aCheck.value); + name + "." + (std::string) attr.name, *attr.value, *attr.pos); } else if (name == "defaultPackage" || name == "devShell") - checkDerivation(name, vProvide); + checkDerivation(name, vOutput, pos); else if (name == "defaultApp") - checkApp(name, vProvide); + checkApp(name, vOutput, pos); else if (name == "legacyPackages") // FIXME: do getDerivations? ; else if (name == "overlay") - checkOverlay(name, vProvide); + checkOverlay(name, vOutput, pos); + + else if (name == "overlays") { + state->forceAttrs(vOutput); + for (auto & attr : *vOutput.attrs) + checkOverlay(name + "." + (std::string) attr.name, + *attr.value, *attr.pos); + } + + else if (name == "nixosModule") + checkModule(name, vOutput, pos); + + else if (name == "nixosModules") { + state->forceAttrs(vOutput); + for (auto & attr : *vOutput.attrs) + checkModule(name + "." + (std::string) attr.name, + *attr.value, *attr.pos); + } else warn("unknown flake output '%s'", name); From f3f854dac15d5e5318cd0398fd50a8d8db4f6fd0 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Tue, 10 Sep 2019 16:03:03 +0200 Subject: [PATCH 263/634] nix flake check: Add some tests --- tests/flakes.sh | 85 +++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 85 insertions(+) diff --git a/tests/flakes.sh b/tests/flakes.sh index 8594a4c55..7f41d5d7e 100644 --- a/tests/flakes.sh +++ b/tests/flakes.sh @@ -344,3 +344,88 @@ nix flake --flake-registry $registry check $flake3Dir rm -rf $TEST_ROOT/flake1-v2 nix flake clone --flake-registry $registry flake1 $TEST_ROOT/flake1-v2 + +# More 'nix flake check' tests. +cat > $flake3Dir/flake.nix < $flake3Dir/flake.nix < $flake3Dir/flake.nix < $flake3Dir/flake.nix < $flake3Dir/flake.nix < $flake3Dir/flake.nix < Date: Tue, 10 Sep 2019 16:06:43 +0200 Subject: [PATCH 264/634] Test quoted attrpaths Issue #3076. --- tests/flakes.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/flakes.sh b/tests/flakes.sh index 7f41d5d7e..f3baf8cef 100644 --- a/tests/flakes.sh +++ b/tests/flakes.sh @@ -183,7 +183,7 @@ cat > $flake3Dir/flake.nix < Date: Tue, 10 Sep 2019 17:39:34 +0200 Subject: [PATCH 265/634] flake.nix: Remove VM tests from 'checks' --- flake.nix | 2 -- 1 file changed, 2 deletions(-) diff --git a/flake.nix b/flake.nix index 06a5515c5..f0d928494 100644 --- a/flake.nix +++ b/flake.nix @@ -13,8 +13,6 @@ checks = { binaryTarball = hydraJobs.binaryTarball.x86_64-linux; perlBindings = hydraJobs.perlBindings.x86_64-linux; - inherit (hydraJobs.tests) remoteBuilds nix-copy-closure; - setuid = hydraJobs.tests.setuid.x86_64-linux; }; packages = { From 55e55b34e6ea770ef1310dffb96f476bf37d460c Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Tue, 10 Sep 2019 17:39:55 +0200 Subject: [PATCH 266/634] nix flake check: Check hydraJobs --- src/nix/flake.cc | 45 +++++++++++++++++++++++++++++++++++---------- 1 file changed, 35 insertions(+), 10 deletions(-) diff --git a/src/nix/flake.cc b/src/nix/flake.cc index b29aa212c..5fd3f5508 100644 --- a/src/nix/flake.cc +++ b/src/nix/flake.cc @@ -211,7 +211,7 @@ struct CmdFlakeInfo : FlakeCommand, MixJSON auto provide = nlohmann::json::object(); if (name == "checks" || name == "packages") { - state->forceAttrs(vProvide); + state->forceAttrs(vProvide, pos); for (auto & aCheck : *vProvide.attrs) provide[aCheck.name] = nlohmann::json::object(); } @@ -282,7 +282,7 @@ struct CmdFlakeCheck : FlakeCommand, MixJSON auto checkOverlay = [&](const std::string & attrPath, Value & v, const Pos & pos) { try { - state->forceValue(v); + state->forceValue(v, pos); if (v.type != tLambda || v.lambda.fun->matchAttrs || std::string(v.lambda.fun->arg) != "final") throw Error("overlay does not take an argument named 'final'"); auto body = dynamic_cast(v.lambda.fun->body); @@ -298,14 +298,14 @@ struct CmdFlakeCheck : FlakeCommand, MixJSON auto checkModule = [&](const std::string & attrPath, Value & v, const Pos & pos) { try { - state->forceValue(v); + state->forceValue(v, pos); if (v.type == tLambda) { if (!v.lambda.fun->matchAttrs || !v.lambda.fun->formals->ellipsis) throw Error("module must match an open attribute set ('{ config, ... }')"); } else if (v.type == tAttrs) { for (auto & attr : *v.attrs) try { - state->forceValue(*attr.value); + state->forceValue(*attr.value, *attr.pos); } catch (Error & e) { e.addPrefix(fmt("while evaluating the option '" ANSI_BOLD "%s" ANSI_NORMAL "' at %s:\n", attr.name, *attr.pos)); throw; @@ -320,6 +320,28 @@ struct CmdFlakeCheck : FlakeCommand, MixJSON } }; + std::function checkHydraJobs; + + checkHydraJobs = [&](const std::string & attrPath, Value & v, const Pos & pos) { + try { + state->forceAttrs(v, pos); + + if (state->isDerivation(v)) + throw Error("jobset should not be a derivation at top-level"); + + for (auto & attr : *v.attrs) { + state->forceAttrs(*attr.value, *attr.pos); + if (!state->isDerivation(*attr.value)) + checkHydraJobs(attrPath + "." + (std::string) attr.name, + *attr.value, *attr.pos); + } + + } catch (Error & e) { + e.addPrefix(fmt("while checking the Hydra jobset '" ANSI_BOLD "%s" ANSI_NORMAL "' at %s:\n", attrPath, pos)); + throw; + } + }; + { Activity act(*logger, lvlInfo, actUnknown, "evaluating flake"); @@ -333,24 +355,24 @@ struct CmdFlakeCheck : FlakeCommand, MixJSON fmt("checking flake output '%s'", name)); try { - state->forceValue(vOutput); + state->forceValue(vOutput, pos); if (name == "checks") { - state->forceAttrs(vOutput); + state->forceAttrs(vOutput, pos); for (auto & attr : *vOutput.attrs) drvPaths.insert(checkDerivation( name + "." + (std::string) attr.name, *attr.value, *attr.pos)); } else if (name == "packages") { - state->forceAttrs(vOutput); + state->forceAttrs(vOutput, pos); for (auto & attr : *vOutput.attrs) checkDerivation( name + "." + (std::string) attr.name, *attr.value, *attr.pos); } else if (name == "apps") { - state->forceAttrs(vOutput); + state->forceAttrs(vOutput, pos); for (auto & attr : *vOutput.attrs) checkApp( name + "." + (std::string) attr.name, *attr.value, *attr.pos); @@ -370,7 +392,7 @@ struct CmdFlakeCheck : FlakeCommand, MixJSON checkOverlay(name, vOutput, pos); else if (name == "overlays") { - state->forceAttrs(vOutput); + state->forceAttrs(vOutput, pos); for (auto & attr : *vOutput.attrs) checkOverlay(name + "." + (std::string) attr.name, *attr.value, *attr.pos); @@ -380,12 +402,15 @@ struct CmdFlakeCheck : FlakeCommand, MixJSON checkModule(name, vOutput, pos); else if (name == "nixosModules") { - state->forceAttrs(vOutput); + state->forceAttrs(vOutput, pos); for (auto & attr : *vOutput.attrs) checkModule(name + "." + (std::string) attr.name, *attr.value, *attr.pos); } + else if (name == "hydraJobs") + checkHydraJobs(name, vOutput, pos); + else warn("unknown flake output '%s'", name); From c67407172d8383394f4962ad177c84bf04529e5e Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Wed, 18 Sep 2019 21:17:27 +0200 Subject: [PATCH 267/634] Record original flakerefs in the lock file again If 'input..uri' changes, then the entry in the lockfile for input should be considered stale. Also print some messages when lock file entries are added/updated. --- flake.lock | 7 +++--- src/libexpr/flake/flake.cc | 43 ++++++++++++++++++++++++----------- src/libexpr/flake/flake.hh | 2 +- src/libexpr/flake/lockfile.cc | 6 +++-- src/libexpr/flake/lockfile.hh | 6 ++--- src/nix/flake.cc | 7 +++--- 6 files changed, 45 insertions(+), 26 deletions(-) diff --git a/flake.lock b/flake.lock index fa5649c03..70a433b26 100644 --- a/flake.lock +++ b/flake.lock @@ -2,9 +2,10 @@ "inputs": { "nixpkgs": { "inputs": {}, - "narHash": "sha256-TrLhI3xPkTTznE9gcMHhkHirGNN7N02zM4DxJ/U3WRs=", - "uri": "github:edolstra/nixpkgs/24bf27fc215e8300877dfa1c426b9966bbfbd150" + "narHash": "sha256-HGlE2VNbdEjCP76hWAS72kHBlMWhpvqWo58Obg1Vy6s=", + "originalUri": "nixpkgs", + "uri": "github:edolstra/nixpkgs/13e1bce51f4aebdf3db58ce8c4a93e904a272bff" } }, - "version": 2 + "version": 3 } diff --git a/src/libexpr/flake/flake.cc b/src/libexpr/flake/flake.cc index 050e65259..accdb4194 100644 --- a/src/libexpr/flake/flake.cc +++ b/src/libexpr/flake/flake.cc @@ -205,8 +205,10 @@ static void expectType(EvalState & state, ValueType type, showType(type), showType(value.type), pos); } -Flake getFlake(EvalState & state, const FlakeRef & flakeRef) +Flake getFlake(EvalState & state, const FlakeRef & originalRef, bool allowLookup) { + auto flakeRef = maybeLookupFlake(state, originalRef, allowLookup); + SourceInfo sourceInfo = fetchFlake(state, flakeRef); debug("got flake source '%s' with flakeref %s", sourceInfo.storePath, sourceInfo.resolvedRef.to_string()); @@ -223,7 +225,7 @@ Flake getFlake(EvalState & state, const FlakeRef & flakeRef) if (!isInDir(realFlakeFile, state.store->toRealPath(sourceInfo.storePath))) throw Error("'flake.nix' file of flake '%s' escapes from '%s'", resolvedRef, sourceInfo.storePath); - Flake flake(flakeRef, sourceInfo); + Flake flake(originalRef, sourceInfo); if (!pathExists(realFlakeFile)) throw Error("source tree referenced by '%s' does not contain a '%s/flake.nix' file", resolvedRef, resolvedRef.subdir); @@ -358,6 +360,7 @@ bool allowedToUseRegistries(HandleLockFile handle, bool isTopRef) Note that this is lazy: we only recursively fetch inputs that are not in the lockfile yet. */ static std::pair updateLocks( + const std::string & inputPath, EvalState & state, const Flake & flake, HandleLockFile handleLockFile, @@ -366,23 +369,36 @@ static std::pair updateLocks( { LockedInput newEntry( flake.sourceInfo.resolvedRef, + flake.originalRef, flake.sourceInfo.narHash); for (auto & [id, input] : flake.inputs) { + auto inputPath2 = (inputPath.empty() ? "" : inputPath + "/") + id; auto i = oldEntry.inputs.find(id); - if (i != oldEntry.inputs.end()) { + if (i != oldEntry.inputs.end() && i->second.originalRef == input.ref) { newEntry.inputs.insert_or_assign(id, i->second); } else { if (handleLockFile == AllPure || handleLockFile == TopRefUsesRegistries) throw Error("cannot update flake input '%s' in pure mode", id); - if (input.isFlake) + if (input.isFlake) { + auto actualInput = getFlake(state, input.ref, + allowedToUseRegistries(handleLockFile, false)); + if (i == oldEntry.inputs.end()) + printMsg(lvlWarn, "mapped flake input '%s' to '%s'", + inputPath2, actualInput.sourceInfo.resolvedRef); + else + printMsg(lvlWarn, "updated flake input '%s' from '%s' to '%s'", + inputPath2, i->second.originalRef, actualInput.sourceInfo.resolvedRef); newEntry.inputs.insert_or_assign(id, - updateLocks(state, - getFlake(state, maybeLookupFlake(state, input.ref, allowedToUseRegistries(handleLockFile, false))), - handleLockFile, {}, false).second); - else { - auto sourceInfo = getNonFlake(state, maybeLookupFlake(state, input.ref, allowedToUseRegistries(handleLockFile, false))); - newEntry.inputs.insert_or_assign(id, LockedInput(sourceInfo.resolvedRef, sourceInfo.narHash)); + updateLocks(inputPath2, state, actualInput, handleLockFile, {}, false).second); + } else { + auto sourceInfo = getNonFlake(state, + maybeLookupFlake(state, input.ref, + allowedToUseRegistries(handleLockFile, false))); + printMsg(lvlWarn, "mapped flake input '%s' to '%s'", + inputPath2, sourceInfo.resolvedRef); + newEntry.inputs.insert_or_assign(id, + LockedInput(sourceInfo.resolvedRef, input.ref, sourceInfo.narHash)); } } } @@ -394,7 +410,8 @@ static std::pair updateLocks( and optionally write it to file, it the flake is writable. */ ResolvedFlake resolveFlake(EvalState & state, const FlakeRef & topRef, HandleLockFile handleLockFile) { - auto flake = getFlake(state, maybeLookupFlake(state, topRef, allowedToUseRegistries(handleLockFile, true))); + auto flake = getFlake(state, topRef, + allowedToUseRegistries(handleLockFile, true)); LockFile oldLockFile; @@ -407,7 +424,7 @@ ResolvedFlake resolveFlake(EvalState & state, const FlakeRef & topRef, HandleLoc } LockFile lockFile(updateLocks( - state, flake, handleLockFile, oldLockFile, true).second); + "", state, flake, handleLockFile, oldLockFile, true).second); if (!(lockFile == oldLockFile)) { if (allowedToWrite(handleLockFile)) { @@ -476,7 +493,7 @@ static void prim_callFlake(EvalState & state, const Pos & pos, Value * * args, V assert(lazyInput->lockedInput.ref.isImmutable()); if (lazyInput->isFlake) { - auto flake = getFlake(state, lazyInput->lockedInput.ref); + auto flake = getFlake(state, lazyInput->lockedInput.ref, false); if (flake.sourceInfo.narHash != lazyInput->lockedInput.narHash) throw Error("the content hash of flake '%s' doesn't match the hash recorded in the referring lockfile", flake.sourceInfo.resolvedRef); diff --git a/src/libexpr/flake/flake.hh b/src/libexpr/flake/flake.hh index 8f05e9799..b366e650b 100644 --- a/src/libexpr/flake/flake.hh +++ b/src/libexpr/flake/flake.hh @@ -78,7 +78,7 @@ struct Flake : originalRef(origRef), sourceInfo(sourceInfo) {}; }; -Flake getFlake(EvalState &, const FlakeRef &); +Flake getFlake(EvalState & state, const FlakeRef & flakeRef, bool allowLookup); /* If 'allowLookup' is true, then resolve 'flakeRef' using the registries. */ diff --git a/src/libexpr/flake/lockfile.cc b/src/libexpr/flake/lockfile.cc index f32d752f9..039b7a7c1 100644 --- a/src/libexpr/flake/lockfile.cc +++ b/src/libexpr/flake/lockfile.cc @@ -6,6 +6,7 @@ namespace nix::flake { LockedInput::LockedInput(const nlohmann::json & json) : LockedInputs(json) , ref(json["uri"]) + , originalRef(json["originalUri"]) , narHash(Hash((std::string) json["narHash"])) { if (!ref.isImmutable()) @@ -16,6 +17,7 @@ nlohmann::json LockedInput::toJson() const { auto json = LockedInputs::toJson(); json["uri"] = ref.to_string(); + json["originalUri"] = originalRef.to_string(); json["narHash"] = narHash.to_string(SRI); return json; } @@ -54,7 +56,7 @@ bool LockedInputs::isDirty() const nlohmann::json LockFile::toJson() const { auto json = LockedInputs::toJson(); - json["version"] = 2; + json["version"] = 3; return json; } @@ -64,7 +66,7 @@ LockFile LockFile::read(const Path & path) auto json = nlohmann::json::parse(readFile(path)); auto version = json.value("version", 0); - if (version != 2) + if (version != 3) throw Error("lock file '%s' has unsupported version %d", path, version); return LockFile(json); diff --git a/src/libexpr/flake/lockfile.hh b/src/libexpr/flake/lockfile.hh index 084eabc1a..ab81eac8b 100644 --- a/src/libexpr/flake/lockfile.hh +++ b/src/libexpr/flake/lockfile.hh @@ -30,11 +30,11 @@ struct LockedInputs /* Lock file information about a flake input. */ struct LockedInput : LockedInputs { - FlakeRef ref; + FlakeRef ref, originalRef; Hash narHash; - LockedInput(const FlakeRef & ref, const Hash & narHash) - : ref(ref), narHash(narHash) + LockedInput(const FlakeRef & ref, const FlakeRef & originalRef, const Hash & narHash) + : ref(ref), originalRef(originalRef), narHash(narHash) { assert(ref.isImmutable()); }; diff --git a/src/nix/flake.cc b/src/nix/flake.cc index 5fd3f5508..599bf12eb 100644 --- a/src/nix/flake.cc +++ b/src/nix/flake.cc @@ -38,8 +38,7 @@ public: Flake getFlake() { auto evalState = getEvalState(); - return flake::getFlake(*evalState, - maybeLookupFlake(*evalState, getFlakeRef(), useRegistries)); + return flake::getFlake(*evalState, getFlakeRef(), useRegistries); } ResolvedFlake resolveFlake() @@ -500,13 +499,13 @@ struct CmdFlakePin : virtual Args, EvalCommand FlakeRegistry userRegistry = *readRegistry(userRegistryPath); auto it = userRegistry.entries.find(FlakeRef(alias)); if (it != userRegistry.entries.end()) { - it->second = getFlake(*evalState, maybeLookupFlake(*evalState, it->second, true)).sourceInfo.resolvedRef; + it->second = getFlake(*evalState, it->second, true).sourceInfo.resolvedRef; writeRegistry(userRegistry, userRegistryPath); } else { std::shared_ptr globalReg = evalState->getGlobalFlakeRegistry(); it = globalReg->entries.find(FlakeRef(alias)); if (it != globalReg->entries.end()) { - auto newRef = getFlake(*evalState, maybeLookupFlake(*evalState, it->second, true)).sourceInfo.resolvedRef; + auto newRef = getFlake(*evalState, it->second, true).sourceInfo.resolvedRef; userRegistry.entries.insert_or_assign(alias, newRef); writeRegistry(userRegistry, userRegistryPath); } else From aeb7148afd56b228604b79373a45793d36d660a3 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Wed, 18 Sep 2019 23:59:45 +0200 Subject: [PATCH 268/634] Some effort to minimize flake dependencies For example, if the top-level flake depends on "nixpkgs/release-19.03", and one of its dependencies depends on "nixpkgs", then the latter will be mapped to "nixpkgs/release-19.03", rather than whatever the default branch of "nixpkgs" is. Thus you get only one "nixpkgs" dependency rather than two. This currently only works in a breadth-first way, so the other way around (i.e. if the top-level flake depends on "nixpkgs", and a dependency depends on "nixpkgs/release-19.03") still results in two "nixpkgs" dependencies. --- src/libexpr/flake/flake.cc | 90 +++++++++++++++++++++++++++-------- src/libexpr/flake/flake.hh | 7 --- src/libexpr/flake/flakeref.cc | 17 +++++++ src/libexpr/flake/flakeref.hh | 6 +++ 4 files changed, 94 insertions(+), 26 deletions(-) diff --git a/src/libexpr/flake/flake.cc b/src/libexpr/flake/flake.cc index accdb4194..d9bbf85c2 100644 --- a/src/libexpr/flake/flake.cc +++ b/src/libexpr/flake/flake.cc @@ -112,7 +112,9 @@ static FlakeRef lookupFlake(EvalState & state, const FlakeRef & flakeRef, const return flakeRef; } -FlakeRef maybeLookupFlake( +/* If 'allowLookup' is true, then resolve 'flakeRef' using the + registries. */ +static FlakeRef maybeLookupFlake( EvalState & state, const FlakeRef & flakeRef, bool allowLookup) @@ -126,6 +128,23 @@ FlakeRef maybeLookupFlake( return flakeRef; } +typedef std::vector> RefMap; + +static FlakeRef lookupInRefMap( + const RefMap & refMap, + const FlakeRef & flakeRef) +{ + // FIXME: inefficient. + for (auto & i : refMap) { + if (flakeRef.contains(i.first)) { + debug("mapping '%s' to previously seen input '%s' -> '%s", + flakeRef, i.first, i.second); + return i.second; + } + } + + return flakeRef; +} static SourceInfo fetchFlake(EvalState & state, const FlakeRef & resolvedRef) { @@ -205,15 +224,21 @@ static void expectType(EvalState & state, ValueType type, showType(type), showType(value.type), pos); } -Flake getFlake(EvalState & state, const FlakeRef & originalRef, bool allowLookup) +static Flake getFlake(EvalState & state, const FlakeRef & originalRef, + bool allowLookup, RefMap & refMap) { - auto flakeRef = maybeLookupFlake(state, originalRef, allowLookup); + auto flakeRef = lookupInRefMap(refMap, + maybeLookupFlake(state, + lookupInRefMap(refMap, originalRef), allowLookup)); SourceInfo sourceInfo = fetchFlake(state, flakeRef); debug("got flake source '%s' with flakeref %s", sourceInfo.storePath, sourceInfo.resolvedRef.to_string()); FlakeRef resolvedRef = sourceInfo.resolvedRef; + refMap.push_back({originalRef, resolvedRef}); + refMap.push_back({flakeRef, resolvedRef}); + state.store->assertStorePath(sourceInfo.storePath); if (state.allowedPaths) @@ -314,13 +339,27 @@ Flake getFlake(EvalState & state, const FlakeRef & originalRef, bool allowLookup return flake; } -static SourceInfo getNonFlake(EvalState & state, const FlakeRef & flakeRef) +Flake getFlake(EvalState & state, const FlakeRef & originalRef, bool allowLookup) { + RefMap refMap; + return getFlake(state, originalRef, allowLookup, refMap); +} + +static SourceInfo getNonFlake(EvalState & state, const FlakeRef & originalRef, + bool allowLookup, RefMap & refMap) +{ + auto flakeRef = lookupInRefMap(refMap, + maybeLookupFlake(state, + lookupInRefMap(refMap, originalRef), allowLookup)); + auto sourceInfo = fetchFlake(state, flakeRef); debug("got non-flake source '%s' with flakeref %s", sourceInfo.storePath, sourceInfo.resolvedRef.to_string()); FlakeRef resolvedRef = sourceInfo.resolvedRef; + refMap.push_back({originalRef, resolvedRef}); + refMap.push_back({flakeRef, resolvedRef}); + state.store->assertStorePath(sourceInfo.storePath); if (state.allowedPaths) @@ -360,6 +399,7 @@ bool allowedToUseRegistries(HandleLockFile handle, bool isTopRef) Note that this is lazy: we only recursively fetch inputs that are not in the lockfile yet. */ static std::pair updateLocks( + RefMap & refMap, const std::string & inputPath, EvalState & state, const Flake & flake, @@ -372,6 +412,8 @@ static std::pair updateLocks( flake.originalRef, flake.sourceInfo.narHash); + std::vector> postponed; + for (auto & [id, input] : flake.inputs) { auto inputPath2 = (inputPath.empty() ? "" : inputPath + "/") + id; auto i = oldEntry.inputs.find(id); @@ -380,29 +422,36 @@ static std::pair updateLocks( } else { if (handleLockFile == AllPure || handleLockFile == TopRefUsesRegistries) throw Error("cannot update flake input '%s' in pure mode", id); - if (input.isFlake) { - auto actualInput = getFlake(state, input.ref, - allowedToUseRegistries(handleLockFile, false)); + + auto warn = [&](const SourceInfo & sourceInfo) { if (i == oldEntry.inputs.end()) - printMsg(lvlWarn, "mapped flake input '%s' to '%s'", - inputPath2, actualInput.sourceInfo.resolvedRef); + printInfo("mapped flake input '%s' to '%s'", + inputPath2, sourceInfo.resolvedRef); else printMsg(lvlWarn, "updated flake input '%s' from '%s' to '%s'", - inputPath2, i->second.originalRef, actualInput.sourceInfo.resolvedRef); - newEntry.inputs.insert_or_assign(id, - updateLocks(inputPath2, state, actualInput, handleLockFile, {}, false).second); + inputPath2, i->second.originalRef, sourceInfo.resolvedRef); + }; + + if (input.isFlake) { + auto actualInput = getFlake(state, input.ref, + allowedToUseRegistries(handleLockFile, false), refMap); + warn(actualInput.sourceInfo); + postponed.push_back([&, id{id}, inputPath2, actualInput]() { + newEntry.inputs.insert_or_assign(id, + updateLocks(refMap, inputPath2, state, actualInput, handleLockFile, {}, false).second); + }); } else { - auto sourceInfo = getNonFlake(state, - maybeLookupFlake(state, input.ref, - allowedToUseRegistries(handleLockFile, false))); - printMsg(lvlWarn, "mapped flake input '%s' to '%s'", - inputPath2, sourceInfo.resolvedRef); + auto sourceInfo = getNonFlake(state, input.ref, + allowedToUseRegistries(handleLockFile, false), refMap); + warn(sourceInfo); newEntry.inputs.insert_or_assign(id, LockedInput(sourceInfo.resolvedRef, input.ref, sourceInfo.narHash)); } } } + for (auto & f : postponed) f(); + return {flake, newEntry}; } @@ -423,8 +472,10 @@ ResolvedFlake resolveFlake(EvalState & state, const FlakeRef & topRef, HandleLoc + "/" + flake.sourceInfo.resolvedRef.subdir + "/flake.lock"); } + RefMap refMap; + LockFile lockFile(updateLocks( - "", state, flake, handleLockFile, oldLockFile, true).second); + refMap, "", state, flake, handleLockFile, oldLockFile, true).second); if (!(lockFile == oldLockFile)) { if (allowedToWrite(handleLockFile)) { @@ -500,7 +551,8 @@ static void prim_callFlake(EvalState & state, const Pos & pos, Value * * args, V callFlake(state, flake, lazyInput->lockedInput, v); } else { - auto sourceInfo = getNonFlake(state, lazyInput->lockedInput.ref); + RefMap refMap; + auto sourceInfo = getNonFlake(state, lazyInput->lockedInput.ref, false, refMap); if (sourceInfo.narHash != lazyInput->lockedInput.narHash) throw Error("the content hash of repository '%s' doesn't match the hash recorded in the referring lockfile", sourceInfo.resolvedRef); diff --git a/src/libexpr/flake/flake.hh b/src/libexpr/flake/flake.hh index b366e650b..63d848889 100644 --- a/src/libexpr/flake/flake.hh +++ b/src/libexpr/flake/flake.hh @@ -80,13 +80,6 @@ struct Flake Flake getFlake(EvalState & state, const FlakeRef & flakeRef, bool allowLookup); -/* If 'allowLookup' is true, then resolve 'flakeRef' using the - registries. */ -FlakeRef maybeLookupFlake( - EvalState & state, - const FlakeRef & flakeRef, - bool allowLookup); - /* Fingerprint of a locked flake; used as a cache key. */ typedef Hash Fingerprint; diff --git a/src/libexpr/flake/flakeref.cc b/src/libexpr/flake/flakeref.cc index 4ce326c0b..4930d03ce 100644 --- a/src/libexpr/flake/flakeref.cc +++ b/src/libexpr/flake/flakeref.cc @@ -255,6 +255,23 @@ FlakeRef FlakeRef::baseRef() const // Removes the ref and rev from a FlakeRef. return result; } +bool FlakeRef::contains(const FlakeRef & other) const +{ + if (!(data == other.data)) + return false; + + if (ref && ref != other.ref) + return false; + + if (rev && rev != other.rev) + return false; + + if (subdir != other.subdir) + return false; + + return true; +} + std::optional parseFlakeRef( const std::string & uri, bool allowRelative) { diff --git a/src/libexpr/flake/flakeref.hh b/src/libexpr/flake/flakeref.hh index 6b47330a7..39e019dbd 100644 --- a/src/libexpr/flake/flakeref.hh +++ b/src/libexpr/flake/flakeref.hh @@ -182,6 +182,12 @@ struct FlakeRef return std::get_if(&data) && rev == Hash(rev->type); } + + /* Return true if 'other' is not less specific than 'this'. For + example, 'nixpkgs' contains 'nixpkgs/release-19.03', and both + 'nixpkgs' and 'nixpkgs/release-19.03' contain + 'nixpkgs/release-19.03/'. */ + bool contains(const FlakeRef & other) const; }; std::ostream & operator << (std::ostream & str, const FlakeRef & flakeRef); From 5573365dffc96df5fcb6898077aeff1cd5e2b711 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Thu, 19 Sep 2019 20:15:42 +0200 Subject: [PATCH 269/634] nix flake check: Validate nixosConfigurations outputs --- src/nix/flake.cc | 23 +++++++++++++++++++++++ 1 file changed, 23 insertions(+) diff --git a/src/nix/flake.cc b/src/nix/flake.cc index 599bf12eb..2e352306e 100644 --- a/src/nix/flake.cc +++ b/src/nix/flake.cc @@ -8,6 +8,7 @@ #include "get-drvs.hh" #include "store-api.hh" #include "derivations.hh" +#include "attr-path.hh" #include #include @@ -341,6 +342,21 @@ struct CmdFlakeCheck : FlakeCommand, MixJSON } }; + auto checkNixOSConfiguration = [&](const std::string & attrPath, Value & v, const Pos & pos) { + try { + Activity act(*logger, lvlChatty, actUnknown, + fmt("checking NixOS configuration '%s'", attrPath)); + Bindings & bindings(*state->allocBindings(0)); + auto vToplevel = findAlongAttrPath(*state, "config.system.build.toplevel", bindings, v); + state->forceAttrs(*vToplevel, pos); + if (!state->isDerivation(*vToplevel)) + throw Error("attribute 'config.system.build.toplevel' is not a derivation"); + } catch (Error & e) { + e.addPrefix(fmt("while checking the NixOS configuration '" ANSI_BOLD "%s" ANSI_NORMAL "' at %s:\n", attrPath, pos)); + throw; + } + }; + { Activity act(*logger, lvlInfo, actUnknown, "evaluating flake"); @@ -407,6 +423,13 @@ struct CmdFlakeCheck : FlakeCommand, MixJSON *attr.value, *attr.pos); } + else if (name == "nixosConfigurations") { + state->forceAttrs(vOutput, pos); + for (auto & attr : *vOutput.attrs) + checkNixOSConfiguration(name + "." + (std::string) attr.name, + *attr.value, *attr.pos); + } + else if (name == "hydraJobs") checkHydraJobs(name, vOutput, pos); From 99e8e58f2de9941353b47ed14fbe4ed76d635519 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Fri, 20 Sep 2019 13:48:53 +0200 Subject: [PATCH 270/634] Shut up some warnings --- src/libstore/download.hh | 2 ++ src/libstore/fs-accessor.hh | 2 ++ 2 files changed, 4 insertions(+) diff --git a/src/libstore/download.hh b/src/libstore/download.hh index abc4a828c..c5dd893b5 100644 --- a/src/libstore/download.hh +++ b/src/libstore/download.hh @@ -94,6 +94,8 @@ class Store; struct Downloader { + virtual ~Downloader() { } + /* Enqueue a download request, returning a future to the result of the download. The future may throw a DownloadError exception. */ diff --git a/src/libstore/fs-accessor.hh b/src/libstore/fs-accessor.hh index f703e1d15..64780a6da 100644 --- a/src/libstore/fs-accessor.hh +++ b/src/libstore/fs-accessor.hh @@ -19,6 +19,8 @@ public: uint64_t narOffset = 0; // regular files only }; + virtual ~FSAccessor() { } + virtual Stat stat(const Path & path) = 0; virtual StringSet readDirectory(const Path & path) = 0; From 68e0f23edc7622290809c81f5071c9f99aded459 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Fri, 20 Sep 2019 14:29:49 +0200 Subject: [PATCH 271/634] Add flags to disallow dirty Git trees and to turn off warnings --- src/libexpr/eval.hh | 8 +++++++- src/libexpr/flake/flake.cc | 7 ++++--- src/libexpr/primops/fetchGit.cc | 6 +++++- src/libexpr/primops/fetchMercurial.cc | 6 +++++- 4 files changed, 21 insertions(+), 6 deletions(-) diff --git a/src/libexpr/eval.hh b/src/libexpr/eval.hh index 16350a5bf..1cc3c8507 100644 --- a/src/libexpr/eval.hh +++ b/src/libexpr/eval.hh @@ -377,10 +377,16 @@ struct EvalSettings : Config "Prefixes of URIs that builtin functions such as fetchurl and fetchGit are allowed to fetch."}; Setting traceFunctionCalls{this, false, "trace-function-calls", - "Emit log messages for each function entry and exit at the 'vomit' log level (-vvvv)"}; + "Emit log messages for each function entry and exit at the 'vomit' log level (-vvvv)."}; Setting flakeRegistry{this, "https://raw.githubusercontent.com/NixOS/flake-registry/master/flake-registry.json", "flake-registry", "Path or URI of the global flake registry."}; + + Setting allowDirty{this, true, "allow-dirty", + "Whether to allow dirty Git/Mercurial trees."}; + + Setting warnDirty{this, true, "warn-dirty", + "Whether to warn about dirty Git/Mercurial trees."}; }; extern EvalSettings evalSettings; diff --git a/src/libexpr/flake/flake.cc b/src/libexpr/flake/flake.cc index d9bbf85c2..f18159c05 100644 --- a/src/libexpr/flake/flake.cc +++ b/src/libexpr/flake/flake.cc @@ -480,9 +480,10 @@ ResolvedFlake resolveFlake(EvalState & state, const FlakeRef & topRef, HandleLoc if (!(lockFile == oldLockFile)) { if (allowedToWrite(handleLockFile)) { if (auto refData = std::get_if(&topRef.data)) { - if (lockFile.isDirty()) - warn("will not write lock file of flake '%s' because it has a dirty input", topRef); - else { + if (lockFile.isDirty()) { + if (evalSettings.warnDirty) + warn("will not write lock file of flake '%s' because it has a dirty input", topRef); + } else { lockFile.write(refData->path + (topRef.subdir == "" ? "" : "/" + topRef.subdir) + "/flake.lock"); // Hack: Make sure that flake.lock is visible to Git, so it ends up in the Nix store. diff --git a/src/libexpr/primops/fetchGit.cc b/src/libexpr/primops/fetchGit.cc index 48d84c4a1..1b55b6f32 100644 --- a/src/libexpr/primops/fetchGit.cc +++ b/src/libexpr/primops/fetchGit.cc @@ -47,7 +47,11 @@ GitInfo exportGit(ref store, std::string uri, /* This is an unclean working tree. So copy all tracked files. */ - warn("Git tree '%s' is dirty", uri); + if (!evalSettings.allowDirty) + throw Error("Git tree '%s' is dirty", uri); + + if (evalSettings.warnDirty) + warn("Git tree '%s' is dirty", uri); GitInfo gitInfo; gitInfo.ref = "HEAD"; diff --git a/src/libexpr/primops/fetchMercurial.cc b/src/libexpr/primops/fetchMercurial.cc index c791443c3..40082894f 100644 --- a/src/libexpr/primops/fetchMercurial.cc +++ b/src/libexpr/primops/fetchMercurial.cc @@ -36,7 +36,11 @@ HgInfo exportMercurial(ref store, const std::string & uri, /* This is an unclean working tree. So copy all tracked files. */ - printTalkative("copying unclean Mercurial working tree '%s'", uri); + if (!evalSettings.allowDirty) + throw Error("Mercurial tree '%s' is unclean", uri); + + if (evalSettings.warnDirty) + warn("Mercurial tree '%s' is unclean", uri); HgInfo hgInfo; hgInfo.rev = "0000000000000000000000000000000000000000"; From 5961c9409739751caaea54540607703c53c9a37e Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Fri, 20 Sep 2019 14:46:37 +0200 Subject: [PATCH 272/634] Flake alias -> id --- src/libexpr/flake/flakeref.cc | 12 ++++++------ src/libexpr/flake/flakeref.hh | 13 ++++++------- 2 files changed, 12 insertions(+), 13 deletions(-) diff --git a/src/libexpr/flake/flakeref.cc b/src/libexpr/flake/flakeref.cc index 4930d03ce..364a98358 100644 --- a/src/libexpr/flake/flakeref.cc +++ b/src/libexpr/flake/flakeref.cc @@ -20,7 +20,7 @@ const static std::string revOrRefRegex = "(?:(" + revRegexS + ")|(" + refRegex + // "master/e72daba8250068216d79d2aeef40d4d95aff6666"). const static std::string refAndOrRevRegex = "(?:(" + revRegexS + ")|(?:(" + refRegex + ")(?:/(" + revRegexS + "))?))"; -const static std::string flakeAlias = "[a-zA-Z][a-zA-Z0-9_-]*"; +const static std::string flakeId = "[a-zA-Z][a-zA-Z0-9_-]*"; // GitHub references. const static std::string ownerRegex = "[a-zA-Z][a-zA-Z0-9_-]*"; @@ -43,7 +43,7 @@ FlakeRef::FlakeRef(const std::string & uri_, bool allowRelative) // FIXME: could combine this into one regex. static std::regex flakeRegex( - "(?:flake:)?(" + flakeAlias + ")(?:/(?:" + refAndOrRevRegex + "))?", + "(?:flake:)?(" + flakeId + ")(?:/(?:" + refAndOrRevRegex + "))?", std::regex::ECMAScript); static std::regex githubRegex( @@ -90,8 +90,8 @@ FlakeRef::FlakeRef(const std::string & uri_, bool allowRelative) std::cmatch match; if (std::regex_match(uri.c_str(), match, flakeRegex)) { - IsAlias d; - d.alias = match[1]; + IsId d; + d.id = match[1]; if (match[2].matched) rev = Hash(match[2], htSHA1); else if (match[3].matched) { @@ -195,8 +195,8 @@ std::string FlakeRef::to_string() const string += value; // FIXME: escaping }; - if (auto refData = std::get_if(&data)) { - string = refData->alias; + if (auto refData = std::get_if(&data)) { + string = refData->id; if (ref) string += '/' + *ref; if (rev) string += '/' + rev->gitRev(); } diff --git a/src/libexpr/flake/flakeref.hh b/src/libexpr/flake/flakeref.hh index 39e019dbd..addf5449f 100644 --- a/src/libexpr/flake/flakeref.hh +++ b/src/libexpr/flake/flakeref.hh @@ -101,16 +101,15 @@ namespace nix { */ typedef std::string FlakeId; -typedef std::string FlakeAlias; typedef std::string FlakeUri; struct FlakeRef { - struct IsAlias + struct IsId { - FlakeAlias alias; - bool operator<(const IsAlias & b) const { return alias < b.alias; }; - bool operator==(const IsAlias & b) const { return alias == b.alias; }; + FlakeId id; + bool operator<(const IsId & b) const { return id < b.id; }; + bool operator==(const IsId & b) const { return id == b.id; }; }; struct IsGitHub { @@ -140,7 +139,7 @@ struct FlakeRef // Git, Tarball - std::variant data; + std::variant data; std::optional ref; std::optional rev; @@ -168,7 +167,7 @@ struct FlakeRef a flake ID, which requires a lookup in the flake registry. */ bool isDirect() const { - return !std::get_if(&data); + return !std::get_if(&data); } /* Check whether this is an "immutable" flake reference, that is, From 5a0e98d1e5697d1d28131e3fd893282be660e008 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Fri, 20 Sep 2019 16:01:40 +0200 Subject: [PATCH 273/634] Use '#' instead of ':' to separate flakeref and attrpath This is less ambiguous. --- src/nix/installables.cc | 22 ++++++++----------- tests/flakes.sh | 48 ++++++++++++++++++++--------------------- 2 files changed, 33 insertions(+), 37 deletions(-) diff --git a/src/nix/installables.cc b/src/nix/installables.cc index a4726a59e..987b744fe 100644 --- a/src/nix/installables.cc +++ b/src/nix/installables.cc @@ -404,7 +404,7 @@ std::vector> SourceExprCommand::parseInstallables( for (auto & s : ss) { - size_t colon; + size_t hash; std::optional storePath; if (s.compare(0, 1, "(") == 0) @@ -417,8 +417,14 @@ std::vector> SourceExprCommand::parseInstallables( Strings{"legacyPackages." + std::string(s, 8)})); } - else { + else if ((hash = s.rfind('#')) != std::string::npos) + result.push_back(std::make_shared( + *this, + FlakeRef(std::string(s, 0, hash), true), + std::string(s, hash + 1), + getDefaultFlakeAttrPathPrefixes())); + else { std::exception_ptr flakeEx; try { @@ -434,17 +440,7 @@ std::vector> SourceExprCommand::parseInstallables( } catch (BadFlakeRef &) { } - if ((colon = s.rfind(':')) != std::string::npos) { - auto flakeRef = std::string(s, 0, colon); - auto attrPath = std::string(s, colon + 1); - result.push_back(std::make_shared( - *this, - FlakeRef(flakeRef, true), - attrPath, - getDefaultFlakeAttrPathPrefixes())); - } - - else if (s.find('/') != std::string::npos && (storePath = follow(s))) + if (s.find('/') != std::string::npos && (storePath = follow(s))) result.push_back(std::make_shared(*storePath)); else { diff --git a/tests/flakes.sh b/tests/flakes.sh index f3baf8cef..a91a0c37c 100644 --- a/tests/flakes.sh +++ b/tests/flakes.sh @@ -126,7 +126,7 @@ json=$(nix flake info --flake-registry $registry flake1 --json | jq .) [[ $(echo "$json" | jq -r .lastModified) = $(git -C $flake1Dir log -n1 --format=%ct) ]] # Test 'nix build' on a flake. -nix build -o $TEST_ROOT/result --flake-registry $registry flake1:foo +nix build -o $TEST_ROOT/result --flake-registry $registry flake1#foo [[ -e $TEST_ROOT/result/hello ]] # Test defaultPackage. @@ -144,33 +144,33 @@ nix path-info $flake1Dir/result (! nix eval "(builtins.getFlake "$flake2Dir")") # But should succeed in impure mode. -nix build -o $TEST_ROOT/result --flake-registry $registry flake2:bar --impure +nix build -o $TEST_ROOT/result --flake-registry $registry flake2#bar --impure # Test automatic lock file generation. -nix build -o $TEST_ROOT/result --flake-registry $registry $flake2Dir:bar +nix build -o $TEST_ROOT/result --flake-registry $registry $flake2Dir#bar [[ -e $flake2Dir/flake.lock ]] git -C $flake2Dir commit flake.lock -m 'Add flake.lock' # Rerunning the build should not change the lockfile. -nix build -o $TEST_ROOT/result --flake-registry $registry $flake2Dir:bar +nix build -o $TEST_ROOT/result --flake-registry $registry $flake2Dir#bar [[ -z $(git -C $flake2Dir diff master) ]] # Building with a lockfile should not require a fetch of the registry. -nix build -o $TEST_ROOT/result --flake-registry file:///no-registry.json $flake2Dir:bar --tarball-ttl 0 +nix build -o $TEST_ROOT/result --flake-registry file:///no-registry.json $flake2Dir#bar --tarball-ttl 0 # Updating the flake should not change the lockfile. nix flake update --flake-registry $registry $flake2Dir [[ -z $(git -C $flake2Dir diff master) ]] # Now we should be able to build the flake in pure mode. -nix build -o $TEST_ROOT/result --flake-registry $registry flake2:bar +nix build -o $TEST_ROOT/result --flake-registry $registry flake2#bar # Or without a registry. # FIXME: shouldn't need '--flake-registry /no-registry'? -nix build -o $TEST_ROOT/result --flake-registry /no-registry file://$flake2Dir:bar --tarball-ttl 0 +nix build -o $TEST_ROOT/result --flake-registry /no-registry file://$flake2Dir#bar --tarball-ttl 0 # Test whether indirect dependencies work. -nix build -o $TEST_ROOT/result --flake-registry $registry $flake3Dir:xyzzy +nix build -o $TEST_ROOT/result --flake-registry $registry $flake3Dir#xyzzy # Add dependency to flake3. rm $flake3Dir/flake.nix @@ -192,7 +192,7 @@ git -C $flake3Dir add flake.nix git -C $flake3Dir commit -m 'Update flake.nix' # Check whether `nix build` works with an incomplete lockfile -nix build -o $TEST_ROOT/result --flake-registry $registry $flake3Dir:"sth sth" +nix build -o $TEST_ROOT/result --flake-registry $registry $flake3Dir#"sth sth" # Check whether it saved the lockfile [[ ! (-z $(git -C $flake3Dir diff master)) ]] @@ -203,7 +203,7 @@ git -C $flake3Dir commit -m 'Add lockfile' # Unsupported editions should be an error. sed -i $flake3Dir/flake.nix -e s/201909/201912/ -nix build -o $TEST_ROOT/result --flake-registry $registry $flake3Dir:sth 2>&1 | grep 'unsupported edition' +nix build -o $TEST_ROOT/result --flake-registry $registry $flake3Dir#sth 2>&1 | grep 'unsupported edition' # Test whether registry caching works. nix flake list --flake-registry file://$registry | grep -q flake3 @@ -214,12 +214,12 @@ mv $registry.tmp $registry # Test whether flakes are registered as GC roots for offline use. # FIXME: use tarballs rather than git. rm -rf $TEST_HOME/.cache -nix build -o $TEST_ROOT/result --flake-registry file://$registry file://$flake2Dir:bar +nix build -o $TEST_ROOT/result --flake-registry file://$registry file://$flake2Dir#bar mv $flake1Dir $flake1Dir.tmp mv $flake2Dir $flake2Dir.tmp nix-store --gc -nix build -o $TEST_ROOT/result --flake-registry file://$registry file://$flake2Dir:bar -nix build -o $TEST_ROOT/result --flake-registry file://$registry file://$flake2Dir:bar --tarball-ttl 0 +nix build -o $TEST_ROOT/result --flake-registry file://$registry file://$flake2Dir#bar +nix build -o $TEST_ROOT/result --flake-registry file://$registry file://$flake2Dir#bar --tarball-ttl 0 mv $flake1Dir.tmp $flake1Dir mv $flake2Dir.tmp $flake2Dir @@ -264,30 +264,30 @@ git -C $flake3Dir commit -m 'Add nonFlakeInputs' # Check whether `nix build` works with a lockfile which is missing a # nonFlakeInputs. -nix build -o $TEST_ROOT/result --flake-registry $registry $flake3Dir:sth +nix build -o $TEST_ROOT/result --flake-registry $registry $flake3Dir#sth git -C $flake3Dir add flake.lock git -C $flake3Dir commit -m 'Update nonFlakeInputs' -nix build -o $TEST_ROOT/result --flake-registry $registry flake3:fnord +nix build -o $TEST_ROOT/result --flake-registry $registry flake3#fnord [[ $(cat $TEST_ROOT/result) = FNORD ]] -# Check whether flake input fetching is lazy: flake3:sth does not +# Check whether flake input fetching is lazy: flake3#sth does not # depend on flake2, so this shouldn't fail. rm -rf $TEST_HOME/.cache clearStore mv $flake2Dir $flake2Dir.tmp mv $nonFlakeDir $nonFlakeDir.tmp -nix build -o $TEST_ROOT/result --flake-registry $registry flake3:sth -(! nix build -o $TEST_ROOT/result --flake-registry $registry flake3:xyzzy) -(! nix build -o $TEST_ROOT/result --flake-registry $registry flake3:fnord) +nix build -o $TEST_ROOT/result --flake-registry $registry flake3#sth +(! nix build -o $TEST_ROOT/result --flake-registry $registry flake3#xyzzy) +(! nix build -o $TEST_ROOT/result --flake-registry $registry flake3#fnord) mv $flake2Dir.tmp $flake2Dir mv $nonFlakeDir.tmp $nonFlakeDir -nix build -o $TEST_ROOT/result --flake-registry $registry flake3:xyzzy flake3:fnord +nix build -o $TEST_ROOT/result --flake-registry $registry flake3#xyzzy flake3#fnord # Test doing multiple `lookupFlake`s -nix build -o $TEST_ROOT/result --flake-registry $registry flake4:xyzzy +nix build -o $TEST_ROOT/result --flake-registry $registry flake4#xyzzy # Make branch "removeXyzzy" where flake3 doesn't have xyzzy anymore git -C $flake3Dir checkout -b removeXyzzy @@ -325,11 +325,11 @@ git -C $flake3Dir commit -m 'Remove packages.xyzzy' git -C $flake3Dir checkout master # Test whether fuzzy-matching works for IsAlias -(! nix build -o $TEST_ROOT/result --flake-registry $registry flake4/removeXyzzy:xyzzy) +(! nix build -o $TEST_ROOT/result --flake-registry $registry flake4/removeXyzzy#xyzzy) # Test whether fuzzy-matching works for IsGit -(! nix build -o $TEST_ROOT/result --flake-registry $registry flake4/removeXyzzy:xyzzy) -nix build -o $TEST_ROOT/result --flake-registry $registry flake4/removeXyzzy:sth +(! nix build -o $TEST_ROOT/result --flake-registry $registry flake4/removeXyzzy#xyzzy) +nix build -o $TEST_ROOT/result --flake-registry $registry flake4/removeXyzzy#sth # Testing the nix CLI nix flake add --flake-registry $registry flake1 flake3 From 14d3f450098e0f4a9b8d538545f1d1bd1decdab3 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Fri, 20 Sep 2019 16:06:49 +0200 Subject: [PATCH 274/634] Simplify --- src/nix/installables.cc | 22 ++++------------------ 1 file changed, 4 insertions(+), 18 deletions(-) diff --git a/src/nix/installables.cc b/src/nix/installables.cc index 987b744fe..85005cc95 100644 --- a/src/nix/installables.cc +++ b/src/nix/installables.cc @@ -425,29 +425,15 @@ std::vector> SourceExprCommand::parseInstallables( getDefaultFlakeAttrPathPrefixes())); else { - std::exception_ptr flakeEx; - try { auto flakeRef = FlakeRef(s, true); result.push_back(std::make_shared( *this, std::move(flakeRef), getDefaultFlakeAttrPaths())); - continue; - } catch (MissingFlake &) { - /* 's' could be parsed as a flakeref, but it - references a local path that is not a flake. So - take note of that. */ - flakeEx = std::current_exception(); - } catch (BadFlakeRef &) { - } - - if (s.find('/') != std::string::npos && (storePath = follow(s))) - result.push_back(std::make_shared(*storePath)); - - else { - if (flakeEx) - std::rethrow_exception(flakeEx); + } catch (...) { + if (s.find('/') != std::string::npos && (storePath = follow(s))) + result.push_back(std::make_shared(*storePath)); else - throw Error("unsupported argument '%s'", s); + throw; } } } From 893be6f5e36abb58bbaa9c49055a5218114dd514 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Sun, 22 Sep 2019 21:29:33 +0200 Subject: [PATCH 275/634] Don't catch exceptions by value --- src/libexpr/json-to-value.cc | 4 ++-- src/libexpr/primops/fetchGit.cc | 2 +- src/libstore/local-store.cc | 4 ++-- src/nix-build/nix-build.cc | 2 +- src/nix/edit.cc | 2 +- 5 files changed, 7 insertions(+), 7 deletions(-) diff --git a/src/libexpr/json-to-value.cc b/src/libexpr/json-to-value.cc index 3f6017957..8bae986f9 100644 --- a/src/libexpr/json-to-value.cc +++ b/src/libexpr/json-to-value.cc @@ -111,9 +111,9 @@ static void parseJSON(EvalState & state, const char * & s, Value & v) mkFloat(v, stod(tmp_number)); else mkInt(v, stol(tmp_number)); - } catch (std::invalid_argument e) { + } catch (std::invalid_argument & e) { throw JSONParseError("invalid JSON number"); - } catch (std::out_of_range e) { + } catch (std::out_of_range & e) { throw JSONParseError("out-of-range JSON number"); } } diff --git a/src/libexpr/primops/fetchGit.cc b/src/libexpr/primops/fetchGit.cc index 1b55b6f32..64f138195 100644 --- a/src/libexpr/primops/fetchGit.cc +++ b/src/libexpr/primops/fetchGit.cc @@ -37,7 +37,7 @@ GitInfo exportGit(ref store, std::string uri, try { runProgram("git", true, { "-C", uri, "diff-index", "--quiet", "HEAD", "--" }); - } catch (ExecError e) { + } catch (ExecError & e) { if (!WIFEXITED(e.status) || WEXITSTATUS(e.status) != 1) throw; clean = false; } diff --git a/src/libstore/local-store.cc b/src/libstore/local-store.cc index 1062df781..307c00dbb 100644 --- a/src/libstore/local-store.cc +++ b/src/libstore/local-store.cc @@ -872,8 +872,8 @@ void LocalStore::querySubstitutablePathInfos(const PathSet & paths, info->references, narInfo ? narInfo->fileSize : 0, info->narSize}; - } catch (InvalidPath) { - } catch (SubstituterDisabled) { + } catch (InvalidPath &) { + } catch (SubstituterDisabled &) { } catch (Error & e) { if (settings.tryFallback) printError(e.what()); diff --git a/src/nix-build/nix-build.cc b/src/nix-build/nix-build.cc index d4749f8fd..605802f72 100755 --- a/src/nix-build/nix-build.cc +++ b/src/nix-build/nix-build.cc @@ -280,7 +280,7 @@ static void _main(int argc, char * * argv) auto absolute = i; try { absolute = canonPath(absPath(i), true); - } catch (Error e) {}; + } catch (Error & e) {}; if (store->isStorePath(absolute) && std::regex_match(absolute, std::regex(".*\\.drv(!.*)?"))) drvs.push_back(DrvInfo(*state, store, absolute)); else diff --git a/src/nix/edit.cc b/src/nix/edit.cc index c62b35c46..632a99d11 100644 --- a/src/nix/edit.cc +++ b/src/nix/edit.cc @@ -50,7 +50,7 @@ struct CmdEdit : InstallableCommand int lineno; try { lineno = std::stoi(std::string(pos, colon + 1)); - } catch (std::invalid_argument e) { + } catch (std::invalid_argument & e) { throw Error("cannot parse line number '%s'", pos); } From 382aa05ff71b61379f5c2792eaf517bdf4a5c5bf Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Sun, 22 Sep 2019 21:53:01 +0200 Subject: [PATCH 276/634] nix flake info --json: Get rid of duplicate getFlake() call Also fix some gcc warnings. --- src/nix/flake.cc | 19 ++++++++++--------- src/nix/installables.cc | 7 ++++--- 2 files changed, 14 insertions(+), 12 deletions(-) diff --git a/src/nix/flake.cc b/src/nix/flake.cc index 2e352306e..d0135143c 100644 --- a/src/nix/flake.cc +++ b/src/nix/flake.cc @@ -174,11 +174,12 @@ static void enumerateOutputs(EvalState & state, Value & vFlake, { state.forceAttrs(vFlake); - auto vOutputs = (*vFlake.attrs->get(state.symbols.create("outputs")))->value; + auto aOutputs = vFlake.attrs->get(state.symbols.create("outputs")); + assert(aOutputs); - state.forceAttrs(*vOutputs); + state.forceAttrs(*(*aOutputs)->value); - for (auto & attr : *vOutputs->attrs) + for (auto & attr : *((*aOutputs)->value->attrs)) callback(attr.name, *attr.value, *attr.pos); } @@ -191,15 +192,12 @@ struct CmdFlakeInfo : FlakeCommand, MixJSON void run(nix::ref store) override { - auto flake = getFlake(); - stopProgressBar(); - if (json) { - auto json = flakeToJson(flake); - auto state = getEvalState(); auto flake = resolveFlake(); + auto json = flakeToJson(flake.flake); + auto vFlake = state->allocValue(); flake::callFlake(*state, flake, *vFlake); @@ -222,8 +220,11 @@ struct CmdFlakeInfo : FlakeCommand, MixJSON json["outputs"] = std::move(outputs); std::cout << json.dump() << std::endl; - } else + } else { + auto flake = getFlake(); + stopProgressBar(); printFlakeInfo(flake); + } } }; diff --git a/src/nix/installables.cc b/src/nix/installables.cc index 85005cc95..867133653 100644 --- a/src/nix/installables.cc +++ b/src/nix/installables.cc @@ -291,11 +291,12 @@ struct InstallableFlake : InstallableValue makeFlakeClosureGCRoot(*state.store, flakeRef, resFlake); - auto vOutputs = (*vFlake->attrs->get(state.symbols.create("outputs")))->value; + auto aOutputs = vFlake->attrs->get(state.symbols.create("outputs")); + assert(aOutputs); - state.forceValue(*vOutputs); + state.forceValue(*(*aOutputs)->value); - return vOutputs; + return (*aOutputs)->value; } std::vector toDerivations() override From c32bba748902a8603036744d4ebccc134f02958c Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Tue, 24 Sep 2019 17:28:18 +0200 Subject: [PATCH 277/634] Shut up some warnings --- src/libstore/nar-info-disk-cache.hh | 2 ++ src/libstore/sqlite.hh | 4 ++-- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/src/libstore/nar-info-disk-cache.hh b/src/libstore/nar-info-disk-cache.hh index 88d909732..0646d5a94 100644 --- a/src/libstore/nar-info-disk-cache.hh +++ b/src/libstore/nar-info-disk-cache.hh @@ -10,6 +10,8 @@ class NarInfoDiskCache public: typedef enum { oValid, oInvalid, oUnknown } Outcome; + virtual ~NarInfoDiskCache() { } + virtual void createCache(const std::string & uri, const Path & storeDir, bool wantMassQuery, int priority) = 0; diff --git a/src/libstore/sqlite.hh b/src/libstore/sqlite.hh index 78e53fa32..0f46f6a07 100644 --- a/src/libstore/sqlite.hh +++ b/src/libstore/sqlite.hh @@ -5,8 +5,8 @@ #include "types.hh" -class sqlite3; -class sqlite3_stmt; +struct sqlite3; +struct sqlite3_stmt; namespace nix { From 454e3a541a05a9b422ae5cd0911316f589f02506 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Thu, 26 Sep 2019 17:51:51 +0200 Subject: [PATCH 278/634] Fix sorting of non-flake input attributes --- src/libexpr/flake/flake.cc | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/src/libexpr/flake/flake.cc b/src/libexpr/flake/flake.cc index f18159c05..2061a34a4 100644 --- a/src/libexpr/flake/flake.cc +++ b/src/libexpr/flake/flake.cc @@ -146,7 +146,7 @@ static FlakeRef lookupInRefMap( return flakeRef; } -static SourceInfo fetchFlake(EvalState & state, const FlakeRef & resolvedRef) +static SourceInfo fetchInput(EvalState & state, const FlakeRef & resolvedRef) { assert(resolvedRef.isDirect()); @@ -231,7 +231,7 @@ static Flake getFlake(EvalState & state, const FlakeRef & originalRef, maybeLookupFlake(state, lookupInRefMap(refMap, originalRef), allowLookup)); - SourceInfo sourceInfo = fetchFlake(state, flakeRef); + SourceInfo sourceInfo = fetchInput(state, flakeRef); debug("got flake source '%s' with flakeref %s", sourceInfo.storePath, sourceInfo.resolvedRef.to_string()); FlakeRef resolvedRef = sourceInfo.resolvedRef; @@ -352,7 +352,7 @@ static SourceInfo getNonFlake(EvalState & state, const FlakeRef & originalRef, maybeLookupFlake(state, lookupInRefMap(refMap, originalRef), allowLookup)); - auto sourceInfo = fetchFlake(state, flakeRef); + auto sourceInfo = fetchInput(state, flakeRef); debug("got non-flake source '%s' with flakeref %s", sourceInfo.storePath, sourceInfo.resolvedRef.to_string()); FlakeRef resolvedRef = sourceInfo.resolvedRef; @@ -566,6 +566,8 @@ static void prim_callFlake(EvalState & state, const Pos & pos, Value * * args, V sourceInfo.storePath, {sourceInfo.storePath}); emitSourceInfoAttrs(state, sourceInfo, v); + + v.attrs->sort(); } } From 15b888c9a589e71a6e3b9bc2cfcb3679f90fbf70 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Fri, 27 Sep 2019 15:31:09 +0200 Subject: [PATCH 279/634] cmatch -> smatch --- src/libexpr/flake/flakeref.cc | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/libexpr/flake/flakeref.cc b/src/libexpr/flake/flakeref.cc index 364a98358..8e90e5989 100644 --- a/src/libexpr/flake/flakeref.cc +++ b/src/libexpr/flake/flakeref.cc @@ -88,8 +88,8 @@ FlakeRef::FlakeRef(const std::string & uri_, bool allowRelative) return true; }; - std::cmatch match; - if (std::regex_match(uri.c_str(), match, flakeRegex)) { + std::smatch match; + if (std::regex_match(uri, match, flakeRegex)) { IsId d; d.id = match[1]; if (match[2].matched) @@ -102,7 +102,7 @@ FlakeRef::FlakeRef(const std::string & uri_, bool allowRelative) data = d; } - else if (std::regex_match(uri.c_str(), match, githubRegex)) { + else if (std::regex_match(uri, match, githubRegex)) { IsGitHub d; d.owner = match[1]; d.repo = match[2]; @@ -120,7 +120,7 @@ FlakeRef::FlakeRef(const std::string & uri_, bool allowRelative) data = d; } - else if (std::regex_match(uri.c_str(), match, uriRegex)) { + else if (std::regex_match(uri, match, uriRegex)) { auto & scheme = match[2]; if (scheme == "git" || scheme == "git+http" || From 9b9de3a5e321c764ec018b32c1f949a49b0c69ef Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Fri, 27 Sep 2019 17:01:25 +0200 Subject: [PATCH 280/634] nix dev-shell: Improve environment handling Only variables that were marked as exported are exported in the dev shell. Also, we no longer try to parse the function section of the env file, fixing $ nix dev-shell error: shell environment '/nix/store/h7ama3kahb8lypf4nvjx34z06g9ncw4h-nixops-1.7pre20190926.4c7acbb-env' has unexpected line '/^[a-z]?"""/ {' --- src/nix/shell.cc | 110 +++++++++++++++++++++++++++++------------------ 1 file changed, 69 insertions(+), 41 deletions(-) diff --git a/src/nix/shell.cc b/src/nix/shell.cc index a3827c297..db7106793 100644 --- a/src/nix/shell.cc +++ b/src/nix/shell.cc @@ -7,55 +7,76 @@ #include "affinity.hh" #include "progress-bar.hh" +#include + using namespace nix; +struct Var +{ + bool exported; + std::string value; // quoted string or array +}; + struct BuildEnvironment { - // FIXME: figure out which vars should be exported. - std::map env; - std::map functions; + std::map env; + std::string bashFunctions; }; BuildEnvironment readEnvironment(const Path & path) { BuildEnvironment res; - auto lines = tokenizeString(readFile(path), "\n"); + std::set exported; - auto getLine = - [&]() { - if (lines.empty()) - throw Error("shell environment '%s' ends unexpectedly", path); - auto line = lines.front(); - lines.pop_front(); - return line; - }; + auto file = readFile(path); + //auto file = readFile("/tmp/x"); - while (!lines.empty()) { - auto line = getLine(); + auto pos = file.cbegin(); - auto eq = line.find('='); - if (eq != std::string::npos) { - std::string name(line, 0, eq); - std::string value(line, eq + 1); - // FIXME: parse arrays - res.env.insert({name, value}); + static std::string varNameRegex = + R"re((?:[a-zA-Z_][a-zA-Z0-9_]*))re"; + + static std::regex declareRegex( + "^declare -x (" + varNameRegex + ")" + + R"re((?:="((?:[^"\\]|\\.)*)")?\n)re"); + + static std::string simpleStringRegex = + R"re((?:[a-zA-Z0-9_/:\.\-1\+]*))re"; + + static std::string quotedStringRegex = + R"re((?:\$?'[^']*'))re"; + + static std::string arrayRegex = + R"re((?:\(( *\[[^\]]+\]="(?:[^"\\]|\\.)*")*\)))re"; + + static std::regex varRegex( + "^(" + varNameRegex + ")=(" + simpleStringRegex + "|" + quotedStringRegex + "|" + arrayRegex + ")\n"); + + static std::regex functionRegex( + "^" + varNameRegex + " \\(\\) *\n"); + + while (pos != file.end()) { + + std::smatch match; + + if (std::regex_search(pos, file.cend(), match, declareRegex)) { + pos = match[0].second; + exported.insert(match[1]); } - else if (hasSuffix(line, " () ")) { - std::string name(line, 0, line.size() - 4); - // FIXME: validate name - auto l = getLine(); - if (l != "{ ") throw Error("shell environment '%s' has unexpected line '%s'", path, l); - std::string body; - while ((l = getLine()) != "}") { - body += l; - body += '\n'; - } - res.functions.insert({name, body}); + else if (std::regex_search(pos, file.cend(), match, varRegex)) { + pos = match[0].second; + res.env.insert({match[1], Var { (bool) exported.count(match[1]), match[2] }}); } - else throw Error("shell environment '%s' has unexpected line '%s'", path, line); + else if (std::regex_search(pos, file.cend(), match, functionRegex)) { + res.bashFunctions = std::string(pos, file.cend()); + break; + } + + else throw Error("shell environment '%s' has unexpected line '%s'", + path, file.substr(pos - file.cbegin(), 60)); } return res; @@ -72,7 +93,16 @@ Path getDerivationEnvironment(ref store, Derivation drv) if (builder != "bash") throw Error("'nix shell' only works on derivations that use 'bash' as their builder"); - drv.args = {"-c", "set -e; export IN_NIX_SHELL=impure; export dontAddDisableDepTrack=1; if [[ -n $stdenv ]]; then source $stdenv/setup; fi; set > $out"}; + drv.args = { + "-c", + "set -e; " + "export IN_NIX_SHELL=impure; " + "export dontAddDisableDepTrack=1; " + "if [[ -n $stdenv ]]; then " + " source $stdenv/setup; " + "fi; " + "export > $out; " + "set >> $out "}; /* Remove derivation checks. */ drv.env.erase("allowedReferences"); @@ -146,18 +176,16 @@ struct Common : InstallableCommand, MixProfile out << "nix_saved_PATH=\"$PATH\"\n"; for (auto & i : buildEnvironment.env) { - // FIXME: shellEscape - // FIXME: figure out what to export - // FIXME: handle arrays - if (!ignoreVars.count(i.first) && !hasPrefix(i.first, "BASH_")) - out << fmt("export %s=%s\n", i.first, i.second); + if (!ignoreVars.count(i.first) && !hasPrefix(i.first, "BASH_")) { + out << fmt("%s=%s\n", i.first, i.second.value); + if (i.second.exported) + out << fmt("export %s\n", i.first); + } } out << "PATH=\"$PATH:$nix_saved_PATH\"\n"; - for (auto & i : buildEnvironment.functions) { - out << fmt("%s () {\n%s\n}\n", i.first, i.second); - } + out << buildEnvironment.bashFunctions << "\n"; // FIXME: set outputs From 780c1a8f271113cea7d70d57f44afce5da5928d6 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Wed, 2 Oct 2019 10:52:56 +0200 Subject: [PATCH 281/634] nix dev-shell: Ignore $NIX_LOG_FD --- src/nix/shell.cc | 1 + 1 file changed, 1 insertion(+) diff --git a/src/nix/shell.cc b/src/nix/shell.cc index db7106793..5e1d53aff 100644 --- a/src/nix/shell.cc +++ b/src/nix/shell.cc @@ -157,6 +157,7 @@ struct Common : InstallableCommand, MixProfile "HOME", // FIXME: don't ignore in pure mode? "NIX_BUILD_TOP", "NIX_ENFORCE_PURITY", + "NIX_LOG_FD", "PPID", "PWD", "SHELLOPTS", From 90d6018509e60fd07d93ddefbf6f983a72d4b587 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Wed, 2 Oct 2019 22:08:19 +0200 Subject: [PATCH 282/634] Fix aborts when using builtins.getFlake In that case, 'self' could refer to a value on the stack, so accessing 'self.rev' would abort. --- src/libexpr/flake/flake.cc | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/src/libexpr/flake/flake.cc b/src/libexpr/flake/flake.cc index 2061a34a4..9e260263c 100644 --- a/src/libexpr/flake/flake.cc +++ b/src/libexpr/flake/flake.cc @@ -574,8 +574,9 @@ static void prim_callFlake(EvalState & state, const Pos & pos, Value * * args, V void callFlake(EvalState & state, const Flake & flake, const LockedInputs & lockedInputs, - Value & vRes) + Value & vResFinal) { + auto & vRes = *state.allocValue(); auto & vInputs = *state.allocValue(); state.mkAttrs(vInputs, flake.inputs.size() + 1); @@ -617,6 +618,8 @@ void callFlake(EvalState & state, state.callFunction(vCall, *flake.vOutputs, vCall2, noPos); state.callFunction(vCall2, vInputs, vCall3, noPos); state.callFunction(vCall3, vSourceInfo, vRes, noPos); + + vResFinal = vRes; } void callFlake(EvalState & state, From 204291f0598c43b10cf20a89b49fc63624d78c7c Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Fri, 4 Oct 2019 10:45:33 +0200 Subject: [PATCH 283/634] Merge release.nix, shell.nix and release-common.nix into flake.nix Also provide a Nixpkgs overlay, memoize Nixpkgs evaluation and fit the githubFlakes test. --- flake.lock | 6 +- flake.nix | 468 +++++++++++++++++++++++++++++++++++-- release-common.nix | 80 ------- release.nix | 396 ------------------------------- shell.nix | 28 --- tests/github-flakes.nix | 12 +- tests/nix-copy-closure.nix | 9 +- tests/remote-builds.nix | 9 +- tests/setuid.nix | 8 +- 9 files changed, 476 insertions(+), 540 deletions(-) delete mode 100644 release-common.nix delete mode 100644 release.nix delete mode 100644 shell.nix diff --git a/flake.lock b/flake.lock index 70a433b26..05e3a6e25 100644 --- a/flake.lock +++ b/flake.lock @@ -2,9 +2,9 @@ "inputs": { "nixpkgs": { "inputs": {}, - "narHash": "sha256-HGlE2VNbdEjCP76hWAS72kHBlMWhpvqWo58Obg1Vy6s=", - "originalUri": "nixpkgs", - "uri": "github:edolstra/nixpkgs/13e1bce51f4aebdf3db58ce8c4a93e904a272bff" + "narHash": "sha256-ltGlDPfwicH/u4orj1n4JXgRsA+jvKQsGnekObi0TV4=", + "originalUri": "nixpkgs/release-19.03", + "uri": "github:edolstra/nixpkgs/9a593b575e4044f9aff939b512e7cb1cf1e76a65" } }, "version": 3 diff --git a/flake.nix b/flake.nix index f0d928494..220b6d8c5 100644 --- a/flake.nix +++ b/flake.nix @@ -3,27 +3,461 @@ edition = 201909; - outputs = { self, nixpkgs }: rec { + inputs.nixpkgs.uri = "nixpkgs/release-19.03"; - hydraJobs = import ./release.nix { - nix = self; - nixpkgs = nixpkgs; - }; + outputs = { self, nixpkgs }: - checks = { - binaryTarball = hydraJobs.binaryTarball.x86_64-linux; - perlBindings = hydraJobs.perlBindings.x86_64-linux; - }; + let - packages = { - nix = hydraJobs.build.x86_64-linux; - nix-perl-bindings = hydraJobs.perlBindings.x86_64-linux; - }; + officialRelease = false; - defaultPackage = packages.nix; + systems = [ "x86_64-linux" "i686-linux" "x86_64-darwin" "aarch64-linux" ]; + + # Memoize nixpkgs for different platforms for efficiency. + nixpkgsFor = nixpkgs.lib.genAttrs systems (system: + import nixpkgs { + inherit system; + overlays = [ self.overlay ]; + } + ); + + commonDeps = pkgs: with pkgs; rec { + # Use "busybox-sandbox-shell" if present, + # if not (legacy) fallback and hope it's sufficient. + sh = pkgs.busybox-sandbox-shell or (busybox.override { + useMusl = true; + enableStatic = true; + enableMinimal = true; + extraConfig = '' + CONFIG_FEATURE_FANCY_ECHO y + CONFIG_FEATURE_SH_MATH y + CONFIG_FEATURE_SH_MATH_64 y + + CONFIG_ASH y + CONFIG_ASH_OPTIMIZE_FOR_SIZE y + + CONFIG_ASH_ALIAS y + CONFIG_ASH_BASH_COMPAT y + CONFIG_ASH_CMDCMD y + CONFIG_ASH_ECHO y + CONFIG_ASH_GETOPTS y + CONFIG_ASH_INTERNAL_GLOB y + CONFIG_ASH_JOB_CONTROL y + CONFIG_ASH_PRINTF y + CONFIG_ASH_TEST y + ''; + }); + + configureFlags = + [ + "--enable-gc" + ] ++ lib.optionals stdenv.isLinux [ + "--with-sandbox-shell=${sh}/bin/busybox" + ]; + + tarballDeps = + [ bison + flex + libxml2 + libxslt + docbook5 + docbook_xsl_ns + autoconf-archive + autoreconfHook + ]; + + buildDeps = + [ curl + bzip2 xz brotli editline + openssl pkgconfig sqlite boehmgc + boost + + # Tests + git + mercurial + jq + ] + ++ lib.optionals stdenv.isLinux [libseccomp utillinuxMinimal] + ++ lib.optional (stdenv.isLinux || stdenv.isDarwin) libsodium + ++ lib.optional (stdenv.isLinux || stdenv.isDarwin) + (aws-sdk-cpp.override { + apis = ["s3" "transfer"]; + customMemoryManagement = false; + }); + + perlDeps = + [ perl + perlPackages.DBDSQLite + ]; + }; + + in { + + # A Nixpkgs overlay that overrides the 'nix' and 'nix-perl' + # packages. + overlay = final: prev: { + + nix = with final; with commonDeps pkgs; releaseTools.nixBuild { + name = "nix"; + src = self.hydraJobs.tarball; + + buildInputs = buildDeps; + + preConfigure = + # Copy libboost_context so we don't get all of Boost in our closure. + # https://github.com/NixOS/nixpkgs/issues/45462 + '' + mkdir -p $out/lib + cp -pd ${boost}/lib/{libboost_context*,libboost_thread*,libboost_system*} $out/lib + rm -f $out/lib/*.a + ${lib.optionalString stdenv.isLinux '' + chmod u+w $out/lib/*.so.* + patchelf --set-rpath $out/lib:${stdenv.cc.cc.lib}/lib $out/lib/libboost_thread.so.* + ''} + ''; + + configureFlags = configureFlags ++ + [ "--sysconfdir=/etc" ]; + + enableParallelBuilding = true; + + makeFlags = "profiledir=$(out)/etc/profile.d"; + + installFlags = "sysconfdir=$(out)/etc"; + + doInstallCheck = true; + installCheckFlags = "sysconfdir=$(out)/etc"; + }; + + nix-perl = with final; releaseTools.nixBuild { + name = "nix-perl"; + src = self.hydraJobs.tarball; + + buildInputs = + [ nix curl bzip2 xz pkgconfig pkgs.perl boost ] + ++ lib.optional (stdenv.isLinux || stdenv.isDarwin) libsodium; + + configureFlags = '' + --with-dbi=${perlPackages.DBI}/${pkgs.perl.libPrefix} + --with-dbd-sqlite=${perlPackages.DBDSQLite}/${pkgs.perl.libPrefix} + ''; + + enableParallelBuilding = true; + + postUnpack = "sourceRoot=$sourceRoot/perl"; + }; + + }; + + hydraJobs = { + + # Source tarball. + tarball = + with nixpkgsFor.x86_64-linux; + with commonDeps pkgs; + + releaseTools.sourceTarball { + name = "nix-tarball"; + version = builtins.readFile ./.version; + versionSuffix = if officialRelease then "" else + "pre${builtins.substring 0 8 self.lastModified}_${self.shortRev}"; + src = self; + inherit officialRelease; + + buildInputs = tarballDeps ++ buildDeps; + + configureFlags = "--enable-gc"; + + postUnpack = '' + (cd $sourceRoot && find . -type f) | cut -c3- > $sourceRoot/.dist-files + cat $sourceRoot/.dist-files + ''; + + preConfigure = '' + (cd perl ; autoreconf --install --force --verbose) + # TeX needs a writable font cache. + export VARTEXFONTS=$TMPDIR/texfonts + ''; + + distPhase = + '' + runHook preDist + make dist + mkdir -p $out/tarballs + cp *.tar.* $out/tarballs + ''; + + preDist = '' + make install docdir=$out/share/doc/nix makefiles=doc/manual/local.mk + echo "doc manual $out/share/doc/nix/manual" >> $out/nix-support/hydra-build-products + ''; + }; + + # Binary package for various platforms. + build = nixpkgs.lib.genAttrs systems (system: nixpkgsFor.${system}.nix); + + # Perl bindings for various platforms. + perlBindings = nixpkgs.lib.genAttrs systems (system: nixpkgsFor.${system}.nix-perl); + + # Binary tarball for various platforms, containing a Nix store + # with the closure of 'nix' package, and the second half of + # the installation script. + binaryTarball = nixpkgs.lib.genAttrs systems (system: + + with nixpkgsFor.${system}; + + let + version = nix.src.version; + installerClosureInfo = closureInfo { rootPaths = [ nix cacert ]; }; + in + + runCommand "nix-binary-tarball-${version}" + { nativeBuildInputs = lib.optional (system != "aarch64-linux") shellcheck; + meta.description = "Distribution-independent Nix bootstrap binaries for ${system}"; + } + '' + cp ${installerClosureInfo}/registration $TMPDIR/reginfo + substitute ${./scripts/install-nix-from-closure.sh} $TMPDIR/install \ + --subst-var-by nix ${nix} \ + --subst-var-by cacert ${cacert} + + substitute ${./scripts/install-darwin-multi-user.sh} $TMPDIR/install-darwin-multi-user.sh \ + --subst-var-by nix ${nix} \ + --subst-var-by cacert ${cacert} + substitute ${./scripts/install-systemd-multi-user.sh} $TMPDIR/install-systemd-multi-user.sh \ + --subst-var-by nix ${nix} \ + --subst-var-by cacert ${cacert} + substitute ${./scripts/install-multi-user.sh} $TMPDIR/install-multi-user \ + --subst-var-by nix ${nix} \ + --subst-var-by cacert ${cacert} + + if type -p shellcheck; then + # SC1090: Don't worry about not being able to find + # $nix/etc/profile.d/nix.sh + shellcheck --exclude SC1090 $TMPDIR/install + shellcheck $TMPDIR/install-darwin-multi-user.sh + shellcheck $TMPDIR/install-systemd-multi-user.sh + + # SC1091: Don't panic about not being able to source + # /etc/profile + # SC2002: Ignore "useless cat" "error", when loading + # .reginfo, as the cat is a much cleaner + # implementation, even though it is "useless" + # SC2116: Allow ROOT_HOME=$(echo ~root) for resolving + # root's home directory + shellcheck --external-sources \ + --exclude SC1091,SC2002,SC2116 $TMPDIR/install-multi-user + fi + + chmod +x $TMPDIR/install + chmod +x $TMPDIR/install-darwin-multi-user.sh + chmod +x $TMPDIR/install-systemd-multi-user.sh + chmod +x $TMPDIR/install-multi-user + dir=nix-${version}-${system} + fn=$out/$dir.tar.xz + mkdir -p $out/nix-support + echo "file binary-dist $fn" >> $out/nix-support/hydra-build-products + tar cvfJ $fn \ + --owner=0 --group=0 --mode=u+rw,uga+r \ + --absolute-names \ + --hard-dereference \ + --transform "s,$TMPDIR/install,$dir/install," \ + --transform "s,$TMPDIR/reginfo,$dir/.reginfo," \ + --transform "s,$NIX_STORE,$dir/store,S" \ + $TMPDIR/install $TMPDIR/install-darwin-multi-user.sh \ + $TMPDIR/install-systemd-multi-user.sh \ + $TMPDIR/install-multi-user $TMPDIR/reginfo \ + $(cat ${installerClosureInfo}/store-paths) + ''); + + # The first half of the installation script. This is uploaded + # to https://nixos.org/nix/install. It downloads the binary + # tarball for the user's system and calls the second half of the + # installation script. + installerScript = + with nixpkgsFor.x86_64-linux; + runCommand "installer-script" + { buildInputs = [ nix ]; + } + '' + mkdir -p $out/nix-support + + substitute ${./scripts/install.in} $out/install \ + ${pkgs.lib.concatMapStrings + (system: "--replace '@binaryTarball_${system}@' $(nix hash-file --base16 --type sha256 ${self.hydraJobs.binaryTarball.${system}}/*.tar.xz) ") + [ "x86_64-linux" "i686-linux" "x86_64-darwin" "aarch64-linux" ] + } \ + --replace '@nixVersion@' ${nix.src.version} + + echo "file installer $out/install" >> $out/nix-support/hydra-build-products + ''; + + # Line coverage analysis. + coverage = + with nixpkgsFor.x86_64-linux; + with commonDeps pkgs; + + releaseTools.coverageAnalysis { + name = "nix-build"; + src = self.hydraJobs.tarball; + + buildInputs = buildDeps; + + dontInstall = false; + + doInstallCheck = true; + + lcovFilter = [ "*/boost/*" "*-tab.*" "*/nlohmann/*" "*/linenoise/*" ]; + + # We call `dot', and even though we just use it to + # syntax-check generated dot files, it still requires some + # fonts. So provide those. + FONTCONFIG_FILE = texFunctions.fontsConf; + }; + + # System tests. + tests.remoteBuilds = import ./tests/remote-builds.nix { + system = "x86_64-linux"; + inherit nixpkgs; + inherit (self) overlay; + }; + + tests.nix-copy-closure = import ./tests/nix-copy-closure.nix { + system = "x86_64-linux"; + inherit nixpkgs; + inherit (self) overlay; + }; + + tests.githubFlakes = (import ./tests/github-flakes.nix rec { + system = "x86_64-linux"; + inherit nixpkgs; + inherit (self) overlay; + }); + + tests.setuid = nixpkgs.lib.genAttrs + ["i686-linux" "x86_64-linux"] + (system: + import ./tests/setuid.nix rec { + inherit nixpkgs system; + inherit (self) overlay; + }); + + # Test whether the binary tarball works in an Ubuntu system. + tests.binaryTarball = + with nixpkgsFor.x86_64-linux; + vmTools.runInLinuxImage (runCommand "nix-binary-tarball-test" + { diskImage = vmTools.diskImages.ubuntu1204x86_64; + } + '' + set -x + useradd -m alice + su - alice -c 'tar xf ${self.hydraJobs.binaryTarball.x86_64-linux}/*.tar.*' + mkdir /dest-nix + mount -o bind /dest-nix /nix # Provide a writable /nix. + chown alice /nix + su - alice -c '_NIX_INSTALLER_TEST=1 ./nix-*/install' + su - alice -c 'nix-store --verify' + su - alice -c 'PAGER= nix-store -qR ${self.hydraJobs.build.x86_64-linux}' + + # Check whether 'nix upgrade-nix' works. + cat > /tmp/paths.nix < $sourceRoot/.dist-files - cat $sourceRoot/.dist-files - ''; - - preConfigure = '' - (cd perl ; autoreconf --install --force --verbose) - # TeX needs a writable font cache. - export VARTEXFONTS=$TMPDIR/texfonts - ''; - - distPhase = - '' - runHook preDist - make dist - mkdir -p $out/tarballs - cp *.tar.* $out/tarballs - ''; - - preDist = '' - make install docdir=$out/share/doc/nix makefiles=doc/manual/local.mk - echo "doc manual $out/share/doc/nix/manual" >> $out/nix-support/hydra-build-products - ''; - }; - - - build = pkgs.lib.genAttrs systems (system: - - let pkgs = import nixpkgs { inherit system; }; in - - with pkgs; - - with import ./release-common.nix { inherit pkgs; }; - - releaseTools.nixBuild { - name = "nix"; - src = tarball; - - buildInputs = buildDeps; - - preConfigure = - # Copy libboost_context so we don't get all of Boost in our closure. - # https://github.com/NixOS/nixpkgs/issues/45462 - '' - mkdir -p $out/lib - cp -pd ${boost}/lib/{libboost_context*,libboost_thread*,libboost_system*} $out/lib - rm -f $out/lib/*.a - ${lib.optionalString stdenv.isLinux '' - chmod u+w $out/lib/*.so.* - patchelf --set-rpath $out/lib:${stdenv.cc.cc.lib}/lib $out/lib/libboost_thread.so.* - ''} - ''; - - configureFlags = configureFlags ++ - [ "--sysconfdir=/etc" ]; - - enableParallelBuilding = true; - - makeFlags = "profiledir=$(out)/etc/profile.d"; - - installFlags = "sysconfdir=$(out)/etc"; - - doInstallCheck = true; - installCheckFlags = "sysconfdir=$(out)/etc"; - }); - - - perlBindings = pkgs.lib.genAttrs systems (system: - - let pkgs = import nixpkgs { inherit system; }; in with pkgs; - - releaseTools.nixBuild { - name = "nix-perl"; - src = tarball; - - buildInputs = - [ jobs.build.${system} curl bzip2 xz pkgconfig pkgs.perl boost ] - ++ lib.optional (stdenv.isLinux || stdenv.isDarwin) libsodium; - - configureFlags = '' - --with-dbi=${perlPackages.DBI}/${pkgs.perl.libPrefix} - --with-dbd-sqlite=${perlPackages.DBDSQLite}/${pkgs.perl.libPrefix} - ''; - - enableParallelBuilding = true; - - postUnpack = "sourceRoot=$sourceRoot/perl"; - }); - - - binaryTarball = pkgs.lib.genAttrs systems (system: - - with import nixpkgs { inherit system; }; - - let - toplevel = builtins.getAttr system jobs.build; - version = toplevel.src.version; - installerClosureInfo = closureInfo { rootPaths = [ toplevel cacert ]; }; - in - - runCommand "nix-binary-tarball-${version}" - { nativeBuildInputs = lib.optional (system != "aarch64-linux") shellcheck; - meta.description = "Distribution-independent Nix bootstrap binaries for ${system}"; - } - '' - cp ${installerClosureInfo}/registration $TMPDIR/reginfo - substitute ${./scripts/install-nix-from-closure.sh} $TMPDIR/install \ - --subst-var-by nix ${toplevel} \ - --subst-var-by cacert ${cacert} - - substitute ${./scripts/install-darwin-multi-user.sh} $TMPDIR/install-darwin-multi-user.sh \ - --subst-var-by nix ${toplevel} \ - --subst-var-by cacert ${cacert} - substitute ${./scripts/install-systemd-multi-user.sh} $TMPDIR/install-systemd-multi-user.sh \ - --subst-var-by nix ${toplevel} \ - --subst-var-by cacert ${cacert} - substitute ${./scripts/install-multi-user.sh} $TMPDIR/install-multi-user \ - --subst-var-by nix ${toplevel} \ - --subst-var-by cacert ${cacert} - - if type -p shellcheck; then - # SC1090: Don't worry about not being able to find - # $nix/etc/profile.d/nix.sh - shellcheck --exclude SC1090 $TMPDIR/install - shellcheck $TMPDIR/install-darwin-multi-user.sh - shellcheck $TMPDIR/install-systemd-multi-user.sh - - # SC1091: Don't panic about not being able to source - # /etc/profile - # SC2002: Ignore "useless cat" "error", when loading - # .reginfo, as the cat is a much cleaner - # implementation, even though it is "useless" - # SC2116: Allow ROOT_HOME=$(echo ~root) for resolving - # root's home directory - shellcheck --external-sources \ - --exclude SC1091,SC2002,SC2116 $TMPDIR/install-multi-user - fi - - chmod +x $TMPDIR/install - chmod +x $TMPDIR/install-darwin-multi-user.sh - chmod +x $TMPDIR/install-systemd-multi-user.sh - chmod +x $TMPDIR/install-multi-user - dir=nix-${version}-${system} - fn=$out/$dir.tar.xz - mkdir -p $out/nix-support - echo "file binary-dist $fn" >> $out/nix-support/hydra-build-products - tar cvfJ $fn \ - --owner=0 --group=0 --mode=u+rw,uga+r \ - --absolute-names \ - --hard-dereference \ - --transform "s,$TMPDIR/install,$dir/install," \ - --transform "s,$TMPDIR/reginfo,$dir/.reginfo," \ - --transform "s,$NIX_STORE,$dir/store,S" \ - $TMPDIR/install $TMPDIR/install-darwin-multi-user.sh \ - $TMPDIR/install-systemd-multi-user.sh \ - $TMPDIR/install-multi-user $TMPDIR/reginfo \ - $(cat ${installerClosureInfo}/store-paths) - ''); - - - coverage = - with pkgs; - - with import ./release-common.nix { inherit pkgs; }; - - releaseTools.coverageAnalysis { - name = "nix-build"; - src = tarball; - - buildInputs = buildDeps; - - dontInstall = false; - - doInstallCheck = true; - - lcovFilter = [ "*/boost/*" "*-tab.*" "*/nlohmann/*" "*/linenoise/*" ]; - - # We call `dot', and even though we just use it to - # syntax-check generated dot files, it still requires some - # fonts. So provide those. - FONTCONFIG_FILE = texFunctions.fontsConf; - }; - - - #rpm_fedora27x86_64 = makeRPM_x86_64 (diskImageFunsFun: diskImageFunsFun.fedora27x86_64) [ ]; - - - #deb_debian8i386 = makeDeb_i686 (diskImageFuns: diskImageFuns.debian8i386) [ "libsodium-dev" ] [ "libsodium13" ]; - #deb_debian8x86_64 = makeDeb_x86_64 (diskImageFunsFun: diskImageFunsFun.debian8x86_64) [ "libsodium-dev" ] [ "libsodium13" ]; - - #deb_ubuntu1710i386 = makeDeb_i686 (diskImageFuns: diskImageFuns.ubuntu1710i386) [ ] [ "libsodium18" ]; - #deb_ubuntu1710x86_64 = makeDeb_x86_64 (diskImageFuns: diskImageFuns.ubuntu1710x86_64) [ ] [ "libsodium18" "libboost-context1.62.0" ]; - - - # System tests. - tests.remoteBuilds = (import ./tests/remote-builds.nix rec { - inherit nixpkgs; - nix = build.x86_64-linux; system = "x86_64-linux"; - }); - - tests.nix-copy-closure = (import ./tests/nix-copy-closure.nix rec { - inherit nixpkgs; - nix = build.x86_64-linux; system = "x86_64-linux"; - }); - - tests.githubFlakes = (import ./tests/github-flakes.nix rec { - inherit nixpkgs; - nix = build.x86_64-linux; system = "x86_64-linux"; - }); - - tests.setuid = pkgs.lib.genAttrs - ["i686-linux" "x86_64-linux"] - (system: - import ./tests/setuid.nix rec { - inherit nixpkgs; - nix = build.${system}; inherit system; - }); - - tests.binaryTarball = - with import nixpkgs { system = "x86_64-linux"; }; - vmTools.runInLinuxImage (runCommand "nix-binary-tarball-test" - { diskImage = vmTools.diskImages.ubuntu1204x86_64; - } - '' - set -x - useradd -m alice - su - alice -c 'tar xf ${binaryTarball.x86_64-linux}/*.tar.*' - mkdir /dest-nix - mount -o bind /dest-nix /nix # Provide a writable /nix. - chown alice /nix - su - alice -c '_NIX_INSTALLER_TEST=1 ./nix-*/install' - su - alice -c 'nix-store --verify' - su - alice -c 'PAGER= nix-store -qR ${build.x86_64-linux}' - - # Check whether 'nix upgrade-nix' works. - cat > /tmp/paths.nix <> $out/nix-support/hydra-build-products - ''; - - - # Aggregate job containing the release-critical jobs. - release = pkgs.releaseTools.aggregate { - name = "nix-${tarball.version}"; - meta.description = "Release-critical builds"; - constituents = - [ tarball - build.i686-linux - build.x86_64-darwin - build.x86_64-linux - build.aarch64-linux - binaryTarball.i686-linux - binaryTarball.x86_64-darwin - binaryTarball.x86_64-linux - binaryTarball.aarch64-linux - tests.remoteBuilds - tests.nix-copy-closure - tests.binaryTarball - tests.evalNixpkgs - tests.evalNixOS - installerScript - ]; - }; - - }; - - - makeRPM_i686 = makeRPM "i686-linux"; - makeRPM_x86_64 = makeRPM "x86_64-linux"; - - makeRPM = - system: diskImageFun: extraPackages: - - with import nixpkgs { inherit system; }; - - releaseTools.rpmBuild rec { - name = "nix-rpm"; - src = jobs.tarball; - diskImage = (diskImageFun vmTools.diskImageFuns) - { extraPackages = - [ "sqlite" "sqlite-devel" "bzip2-devel" "libcurl-devel" "openssl-devel" "xz-devel" "libseccomp-devel" "libsodium-devel" "boost-devel" "bison" "flex" ] - ++ extraPackages; }; - # At most 2047MB can be simulated in qemu-system-i386 - memSize = 2047; - meta.schedulingPriority = 50; - postRPMInstall = "cd /tmp/rpmout/BUILD/nix-* && make installcheck"; - #enableParallelBuilding = true; - }; - - - makeDeb_i686 = makeDeb "i686-linux"; - makeDeb_x86_64 = makeDeb "x86_64-linux"; - - makeDeb = - system: diskImageFun: extraPackages: extraDebPackages: - - with import nixpkgs { inherit system; }; - - releaseTools.debBuild { - name = "nix-deb"; - src = jobs.tarball; - diskImage = (diskImageFun vmTools.diskImageFuns) - { extraPackages = - [ "libsqlite3-dev" "libbz2-dev" "libcurl-dev" "libcurl3-nss" "libssl-dev" "liblzma-dev" "libseccomp-dev" "libsodium-dev" "libboost-all-dev" ] - ++ extraPackages; }; - memSize = 2047; - meta.schedulingPriority = 50; - postInstall = "make installcheck"; - configureFlags = "--sysconfdir=/etc"; - debRequires = - [ "curl" "libsqlite3-0" "libbz2-1.0" "bzip2" "xz-utils" "libssl1.0.0" "liblzma5" "libseccomp2" ] - ++ extraDebPackages; - debMaintainer = "Eelco Dolstra "; - doInstallCheck = true; - #enableParallelBuilding = true; - }; - - -in jobs diff --git a/shell.nix b/shell.nix deleted file mode 100644 index 34abf05ea..000000000 --- a/shell.nix +++ /dev/null @@ -1,28 +0,0 @@ -{ useClang ? true -, nixpkgs ? builtins.fetchTarball https://github.com/NixOS/nixpkgs-channels/archive/nixos-19.03.tar.gz -}: - -with import nixpkgs { system = builtins.currentSystem or "x86_64-linux"; }; - -with import ./release-common.nix { inherit pkgs; }; - -(if useClang then clangStdenv else stdenv).mkDerivation { - name = "nix"; - - buildInputs = buildDeps ++ tarballDeps ++ perlDeps; - - inherit configureFlags; - - enableParallelBuilding = true; - - installFlags = "sysconfdir=$(out)/etc"; - - shellHook = - '' - export prefix=$(pwd)/inst - configureFlags+=" --prefix=$prefix" - PKG_CONFIG_PATH=$prefix/lib/pkgconfig:$PKG_CONFIG_PATH - PATH=$prefix/bin:$PATH - unset PYTHONPATH - ''; -} diff --git a/tests/github-flakes.nix b/tests/github-flakes.nix index 2345972ce..7211360cd 100644 --- a/tests/github-flakes.nix +++ b/tests/github-flakes.nix @@ -1,6 +1,9 @@ -{ nixpkgs, system, nix }: +{ nixpkgs, system, overlay }: -with import (nixpkgs + "/nixos/lib/testing.nix") { inherit system; }; +with import (nixpkgs + "/nixos/lib/testing.nix") { + inherit system; + extraConfigurations = [ { nixpkgs.overlays = [ overlay ]; } ]; +}; let @@ -101,7 +104,6 @@ makeTest ( { config, pkgs, nodes, ... }: { virtualisation.writableStore = true; virtualisation.pathsInNixDB = [ pkgs.hello pkgs.fuse ]; - nix.package = nix; nix.binaryCaches = [ ]; environment.systemPackages = [ pkgs.jq ]; networking.hosts.${(builtins.head nodes.github.config.networking.interfaces.eth1.ipv4.addresses).address} = @@ -135,11 +137,11 @@ makeTest ( my $date = $client->succeed("nix flake info nixpkgs --json | jq -M .lastModified"); strftime("%Y%m%d%H%M%S", gmtime($date)) eq "${nixpkgs.lastModified}" or die "time mismatch"; - $client->succeed("nix build nixpkgs:hello"); + $client->succeed("nix build nixpkgs#hello"); # The build shouldn't fail even with --tarball-ttl 0 (the server # being down should not be a fatal error). - $client->succeed("nix build nixpkgs:fuse --tarball-ttl 0"); + $client->succeed("nix build nixpkgs#fuse --tarball-ttl 0"); ''; }) diff --git a/tests/nix-copy-closure.nix b/tests/nix-copy-closure.nix index 0dc147fb3..7c4bdaded 100644 --- a/tests/nix-copy-closure.nix +++ b/tests/nix-copy-closure.nix @@ -1,8 +1,11 @@ # Test ‘nix-copy-closure’. -{ nixpkgs, system, nix }: +{ nixpkgs, system, overlay }: -with import (nixpkgs + "/nixos/lib/testing.nix") { inherit system; }; +with import (nixpkgs + "/nixos/lib/testing.nix") { + inherit system; + extraConfigurations = [ { nixpkgs.overlays = [ overlay ]; } ]; +}; makeTest (let pkgA = pkgs.cowsay; pkgB = pkgs.wget; pkgC = pkgs.hello; in { @@ -11,7 +14,6 @@ makeTest (let pkgA = pkgs.cowsay; pkgB = pkgs.wget; pkgC = pkgs.hello; in { { config, pkgs, ... }: { virtualisation.writableStore = true; virtualisation.pathsInNixDB = [ pkgA ]; - nix.package = nix; nix.binaryCaches = [ ]; }; @@ -20,7 +22,6 @@ makeTest (let pkgA = pkgs.cowsay; pkgB = pkgs.wget; pkgC = pkgs.hello; in { { services.openssh.enable = true; virtualisation.writableStore = true; virtualisation.pathsInNixDB = [ pkgB pkgC ]; - nix.package = nix; }; }; diff --git a/tests/remote-builds.nix b/tests/remote-builds.nix index b867f13b4..a53f9bfcd 100644 --- a/tests/remote-builds.nix +++ b/tests/remote-builds.nix @@ -1,8 +1,11 @@ # Test Nix's remote build feature. -{ nixpkgs, system, nix }: +{ nixpkgs, system, overlay }: -with import (nixpkgs + "/nixos/lib/testing.nix") { inherit system; }; +with import (nixpkgs + "/nixos/lib/testing.nix") { + inherit system; + extraConfigurations = [ { nixpkgs.overlays = [ overlay ]; } ]; +}; makeTest ( @@ -13,7 +16,6 @@ let { config, pkgs, ... }: { services.openssh.enable = true; virtualisation.writableStore = true; - nix.package = nix; nix.useSandbox = true; }; @@ -59,7 +61,6 @@ in ]; virtualisation.writableStore = true; virtualisation.pathsInNixDB = [ config.system.build.extraUtils ]; - nix.package = nix; nix.binaryCaches = [ ]; programs.ssh.extraConfig = "ConnectTimeout 30"; }; diff --git a/tests/setuid.nix b/tests/setuid.nix index 77e83c8d6..d79d071df 100644 --- a/tests/setuid.nix +++ b/tests/setuid.nix @@ -1,15 +1,17 @@ # Verify that Linux builds cannot create setuid or setgid binaries. -{ nixpkgs, system, nix }: +{ nixpkgs, system, overlay }: -with import (nixpkgs + "/nixos/lib/testing.nix") { inherit system; }; +with import (nixpkgs + "/nixos/lib/testing.nix") { + inherit system; + extraConfigurations = [ { nixpkgs.overlays = [ overlay ]; } ]; +}; makeTest { machine = { config, lib, pkgs, ... }: { virtualisation.writableStore = true; - nix.package = nix; nix.binaryCaches = [ ]; nix.nixPath = [ "nixpkgs=${lib.cleanSource pkgs.path}" ]; virtualisation.pathsInNixDB = [ pkgs.stdenv pkgs.pkgsi686Linux.stdenv ]; From 21e2088c1b6f5458f31fd368f416d45e4eb2bc5f Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Fri, 4 Oct 2019 17:25:59 +0200 Subject: [PATCH 284/634] nix-perl -> nix.perl-bindings --- flake.nix | 38 ++++++++++++++++++++------------------ 1 file changed, 20 insertions(+), 18 deletions(-) diff --git a/flake.nix b/flake.nix index 220b6d8c5..0f8da30e2 100644 --- a/flake.nix +++ b/flake.nix @@ -93,11 +93,11 @@ in { - # A Nixpkgs overlay that overrides the 'nix' and 'nix-perl' - # packages. + # A Nixpkgs overlay that overrides the 'nix' and + # 'nix.perl-bindings' packages. overlay = final: prev: { - nix = with final; with commonDeps pkgs; releaseTools.nixBuild { + nix = with final; with commonDeps pkgs; (releaseTools.nixBuild { name = "nix"; src = self.hydraJobs.tarball; @@ -127,24 +127,26 @@ doInstallCheck = true; installCheckFlags = "sysconfdir=$(out)/etc"; - }; + }) // { - nix-perl = with final; releaseTools.nixBuild { - name = "nix-perl"; - src = self.hydraJobs.tarball; + perl-bindings = with final; releaseTools.nixBuild { + name = "nix-perl"; + src = self.hydraJobs.tarball; - buildInputs = - [ nix curl bzip2 xz pkgconfig pkgs.perl boost ] - ++ lib.optional (stdenv.isLinux || stdenv.isDarwin) libsodium; + buildInputs = + [ nix curl bzip2 xz pkgconfig pkgs.perl boost ] + ++ lib.optional (stdenv.isLinux || stdenv.isDarwin) libsodium; - configureFlags = '' - --with-dbi=${perlPackages.DBI}/${pkgs.perl.libPrefix} - --with-dbd-sqlite=${perlPackages.DBDSQLite}/${pkgs.perl.libPrefix} - ''; + configureFlags = '' + --with-dbi=${perlPackages.DBI}/${pkgs.perl.libPrefix} + --with-dbd-sqlite=${perlPackages.DBDSQLite}/${pkgs.perl.libPrefix} + ''; - enableParallelBuilding = true; + enableParallelBuilding = true; + + postUnpack = "sourceRoot=$sourceRoot/perl"; + }; - postUnpack = "sourceRoot=$sourceRoot/perl"; }; }; @@ -197,7 +199,7 @@ build = nixpkgs.lib.genAttrs systems (system: nixpkgsFor.${system}.nix); # Perl bindings for various platforms. - perlBindings = nixpkgs.lib.genAttrs systems (system: nixpkgsFor.${system}.nix-perl); + perlBindings = nixpkgs.lib.genAttrs systems (system: nixpkgsFor.${system}.nix.perl-bindings); # Binary tarball for various platforms, containing a Nix store # with the closure of 'nix' package, and the second half of @@ -429,7 +431,7 @@ }; packages = { - inherit (nixpkgsFor.x86_64-linux) nix nix-perl; + inherit (nixpkgsFor.x86_64-linux) nix; }; defaultPackage = self.packages.nix; From ce2c755d2aa4d970c2de8f00f9ed2099ad991255 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Mon, 7 Oct 2019 14:02:52 +0200 Subject: [PATCH 285/634] Add a "dev" output to the 'nix' package This is to ensure that references like 'nix.dev' in dwarffs work regardless of whether we're using the 'nix' package from this overlay or from Nixpkgs. --- flake.nix | 2 ++ 1 file changed, 2 insertions(+) diff --git a/flake.nix b/flake.nix index 0f8da30e2..4ec80e9d6 100644 --- a/flake.nix +++ b/flake.nix @@ -101,6 +101,8 @@ name = "nix"; src = self.hydraJobs.tarball; + outputs = [ "out" "dev" ]; + buildInputs = buildDeps; preConfigure = From a15f9b37eba805c2b3bd37844c0a527b14774bba Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Mon, 7 Oct 2019 15:44:32 +0200 Subject: [PATCH 286/634] fetchGit: Support Git trees without any commits Fixes $ nix build fatal: bad revision 'HEAD' error: program 'git' failed with exit code 128 on a new flake. It is now detected as a dirty tree with revCount = 0. --- src/libexpr/primops/fetchGit.cc | 16 +++++++++++----- 1 file changed, 11 insertions(+), 5 deletions(-) diff --git a/src/libexpr/primops/fetchGit.cc b/src/libexpr/primops/fetchGit.cc index 64f138195..21fa025c1 100644 --- a/src/libexpr/primops/fetchGit.cc +++ b/src/libexpr/primops/fetchGit.cc @@ -33,13 +33,19 @@ GitInfo exportGit(ref store, std::string uri, // or revision is given, then allow the use of an unclean working // tree. if (!ref && !rev && isLocal) { - bool clean = true; + bool clean = false; + + /* Check whether this repo has any commits. There are + probably better ways to do this. */ + bool haveCommits = !readDirectory(uri + "/.git/refs/heads").empty(); try { - runProgram("git", true, { "-C", uri, "diff-index", "--quiet", "HEAD", "--" }); + if (haveCommits) { + runProgram("git", true, { "-C", uri, "diff-index", "--quiet", "HEAD", "--" }); + clean = true; + } } catch (ExecError & e) { if (!WIFEXITED(e.status) || WEXITSTATUS(e.status) != 1) throw; - clean = false; } if (!clean) { @@ -75,10 +81,10 @@ GitInfo exportGit(ref store, std::string uri, }; gitInfo.storePath = store->addToStore("source", uri, true, htSHA256, filter); - gitInfo.revCount = std::stoull(runProgram("git", true, { "-C", uri, "rev-list", "--count", "HEAD" })); + gitInfo.revCount = haveCommits ? std::stoull(runProgram("git", true, { "-C", uri, "rev-list", "--count", "HEAD" })) : 0; // FIXME: maybe we should use the timestamp of the last // modified dirty file? - gitInfo.lastModified = std::stoull(runProgram("git", true, { "-C", uri, "show", "-s", "--format=%ct", "HEAD" })); + gitInfo.lastModified = haveCommits ? std::stoull(runProgram("git", true, { "-C", uri, "show", "-s", "--format=%ct", "HEAD" })) : 0; return gitInfo; } From d24bfe29a1edd30900f9205b9446252f3f3836af Mon Sep 17 00:00:00 2001 From: Emilio Karakey Date: Mon, 7 Oct 2019 18:28:10 -0500 Subject: [PATCH 287/634] deleted comment --- src/nix/shell.cc | 1 - 1 file changed, 1 deletion(-) diff --git a/src/nix/shell.cc b/src/nix/shell.cc index 5e1d53aff..a4488b229 100644 --- a/src/nix/shell.cc +++ b/src/nix/shell.cc @@ -30,7 +30,6 @@ BuildEnvironment readEnvironment(const Path & path) std::set exported; auto file = readFile(path); - //auto file = readFile("/tmp/x"); auto pos = file.cbegin(); From 519aa479d7379981a3ffd914d734c33ee9610efd Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Tue, 8 Oct 2019 14:41:41 +0200 Subject: [PATCH 288/634] Remove outdated flakes design doc --- doc/flakes/design.md | 482 ------------------------------------------- 1 file changed, 482 deletions(-) delete mode 100644 doc/flakes/design.md diff --git a/doc/flakes/design.md b/doc/flakes/design.md deleted file mode 100644 index 97bd12ce3..000000000 --- a/doc/flakes/design.md +++ /dev/null @@ -1,482 +0,0 @@ -# Nix Flake MVP - -## Goals - -* Standard and easy way for Nix repos to reference other Nix repos as - dependencies - -* Discoverability: Be able to query and update these references to Nix repos - automatically - -* To provide a replacement for `nix-channel`, `NIX_PATH` and Hydra jobset - definitions - -* Reproducibility: Evaluate packages and NixOS configurations hermetic by - default - -Upcoming but not yet implemented: - -* Sophisticated flake versioning, such as the ability to specify version ranges - on dependencies. - -* A way to specify the types of values provided by a flake. For the most part, - flakes can provide arbitrary Nix values, but there will be some standard - attribute names (e.g. `packages` must be a set of installable derivations). - - -## Overview - -* A flake is (usually) a Git repository that contains a file named `flake.nix` - at top-level - -* A flake *provides* an attribute set of values, such as packages, Nixpkgs - overlays, NixOS modules, library functions, Hydra jobs, `nix-shell` - definitions, etc. - -* Flakes can *depend* on other flakes or other repositories which aren't flakes - -* Flakes are referred to using a *flake reference*, which is either a URL - specifying its repository's location or an identifier looked up in a *lock - file* or *flake registry*. - -* A *flake registry* is a mapping from flake identifiers to flake locations - (e.g. `nixpkgs -> github:NixOS/nixpkgs/release-18.09`). There is a centrally - maintained flake registry on `nixos.org`. - -* A flake can contain a *lock file* (`flake.lock`) used when resolving the - dependencies in `flake.nix`. It maps mutable flake references - (e.g. `github:NixOS/nixpkgs/release-18.09`) to references containing revisions - (e.g. `nixpkgs -> - github:NixOS/nixpkgs/98a2a5b5370c1e2092d09cb38b9dcff6d98a109f`). - -* The `nix` command uses the flake registry as its default installation source. - For example, `nix build nixpkgs.hello` builds the `hello` package provided by - the `nixpkgs` flake listed in the registry. `nix` will automatically - download/upload the registry and flakes as needed. - -* `nix build` without arguments will build the flake in the current - directory (or some parent). - -* `nix flake update` generates `flake.lock` from `flake.nix`, ignoring the old - lockfile. - -* `nixos-rebuild` will build a configuration from a (locked) flake. Evaluation - is done in pure mode to ensure there are no unaccounted inputs. Thus the - NixOS configuration can be reproduced unambiguously from the top-level flake. - -* Nix code can query flake metadata such as `commitHash` (the Git revision) or - `edition` (the date of the last commit). This is useful for NixOS to compute - the NixOS version string (which will be the revision of the top-level - configuration flake, uniquely identifying the configuration). - -* Hydra jobset configurations will consist of a single flake reference. Thus we - can get rid of jobset inputs; any other needed repositories can be fetched by - the top-level flake. The top-level flake can be locked or unlocked; if some - dependencies are unlocked, then Nix will fetch the latest revision for each. - - -## Example flake - -Let us look at an example of a `flake.nix` file, here for `dwarffs`, a small -repository that provides a single package and a single NixOS module. - -```nix -{ - # The flake identifier. - name = "dwarffs"; - - # The edition may be used in the future to determine how Nix - # expressions inside this flake are to be parsed. - edition = 201906; - - # Some other metadata. - description = "A filesystem that fetches DWARF debug info from the Internet on demand"; - - # The flake dependencies. Nix will resolve and fetch these flakes and pass - # them as a function argument to `outputs` below. - # - # "nixpkgs" denotes a flake named `nixpkgs` which is looked up - # in the flake registry, or in `flake.lock` inside this flake, if it - # exists. - inputs = [ flake:nixpkgs ]; - - # An attribute set listing dependencies which aren't flakes, also to be passed as - # a function argument to `provides`. - nonFlakeRequires = {}; - - # The stuff provided by this flake. Flakes can provide whatever they - # want (convention over configuration), but some attributes have - # special meaning to tools / other flakes. For example, `packages` - # is used by the `nix` CLI to search for packages, and - # `nixosModules` is used by NixOS to automatically pull in the - # modules provided by a flake. - # - # `outputs` takes a single argument (`deps`) that contains - # the resolved set of flakes. (See below.) - outputs = deps: { - - # This is searched by `nix`, so something like `nix install - # dwarffs.dwarffs` resolves to this `packages.dwarffs`. - packages.dwarffs = - with deps.nixpkgs.packages; - with deps.nixpkgs.builders; - with deps.nixpkgs.lib; - - stdenv.mkDerivation { - name = "dwarffs-0.1"; - - buildInputs = [ fuse nix nlohmann_json boost ]; - - NIX_CFLAGS_COMPILE = "-I ${nix.dev}/include/nix -include ${nix.dev}/include/nix/config.h -D_FILE_OFFSET_BITS=64"; - - src = cleanSource ./.; - - installPhase = - '' - mkdir -p $out/bin $out/lib/systemd/system - - cp dwarffs $out/bin/ - ln -s dwarffs $out/bin/mount.fuse.dwarffs - - cp ${./run-dwarffs.mount} $out/lib/systemd/system/run-dwarffs.mount - cp ${./run-dwarffs.automount} $out/lib/systemd/system/run-dwarffs.automount - ''; - }; - - # NixOS modules. - nixosModules.dwarffs = import ./module.nix deps; - - # Provide a single Hydra job (`hydraJobs.dwarffs`). - hydraJobs.build.x86_64-linux = packages.dwarffs; - - # A bunch of things which can be checked (through `nix flake check`) to - # make sure the flake is well-defined. - checks.build = packages.dwarffs; - }; -} -``` - -Similarly, a minimal `flake.nix` for Nixpkgs: - -```nix -{ - name = "nixpkgs"; - - edition = 201906; - - description = "A collection of packages for the Nix package manager"; - - outputs = deps: - let pkgs = import ./. {}; in - let pkgs = import ./. { system = "x86_64-linux"; }; in - { - lib = (import ./lib) // { - nixosSystem = import ./nixos/lib/eval-config.nix; - }; - - builders = { - inherit (pkgs) stdenv fetchurl; - }; - - packages = { - inherit (pkgs) hello nix fuse nlohmann_json boost; - }; - - legacyPkgs = pkgs; - }; -} -``` -Note that `packages` is an unpolluted set of packages: non-package values like -`lib` or `fetchurl` are not part of it. - -## Flake registries - -Note: If a flake registry contains an entry `nixpkgs -> github:NixOS/nixpkgs`, -then `nixpkgs/release-18.09` will match to become -`github:NixOS/nixpkgs/release-18.09`. This is referred to as "fuzzymatching". - - -## Flake references - -Flake references are a URI-like syntax to specify the physical location of a -flake (e.g. a Git repository) or to denote a lookup in the flake registry or -lock file. There are four options for the syntax: - -* Flake aliases - A flake alias is a name which requires a lookup in a flake - registry or lock file. - - Example: "nixpkgs" - -* GitHub repositories - A repository which is stored on GitHub can easily be fetched using this type. - Note: - * Only the code in this particular commit is downloaded, not the entire repo - * By default, the commit to download is the last commit on the `master` branch. - See later for how to change this. - - Example: `github:NixOS/nixpkgs` - -* `ssh/https/git/file` - These are generic `FlakeRef`s for downloadding git repositories or tarballs. - - Examples: - - https://example.org/my/repo.git - - ssh://git@github.com:NixOS/nix.git - - git://github.com/edolstra/dwarffs.git - - file:///home/my-user/some-repo/some-repo.git - - https://releases.nixos.org/nixos/unstable/nixos-19.03pre167858.f2a1a4e93be/nixexprs.tar.xz - - file:///.tar.xz - -* Local, dirty paths - This `FlakeRef` is the equivalent of `file://` used for dirty paths. - - Example: /path/to/my/repo - -Notes: -- Each FlakeRef (except for the Path option) allows for a Git revision (i.e. - commit hash) and/or referenceo(i.e. git branch name) to be added. For - tarbals, an SRI hash needs to be added. - Examples: - * `"nixpkgs/release-18.09"` - * `github:NixOS/nixpkgs/1e9e709953e315ab004951248b186ac8e2306451` - * `git://github.com/edolstra/dwarffs.git?ref=flake&rev=2efca4bc9da70fb001b26c3dc858c6397d3c4817` - * file:///.tar.xz(?hash=) -- In full pure mode, no mutable `FlakeRef`s can be used - * No aliases, because they need to be looked up - * `github` requires a specified `rev` - * `ssh/https/git/file` require a specified `ref` _and_ `rev` - * `path` is always mutable -- Flakes don't need to be top-level, but can also reside in a subdirectory. This is shown by adding `dir=` to the `FlakeRef`. - Example: `./foo?dir=bar` - - -## Flake lock files - -A lockfile is a JSON file named `flake.lock` which contains a forrest of -entries mapping `FlakeRef`s to the immutable `FlakeRef` they were resolved to. - -Example: - -```json -{ - "nixpkgs": { - "uri": "github:NixOS/nixpkgs/41c0c1bf292ea3ac3858ff393b49ca1123dbd553", - "content-hash": "sha256-vy2UmXQM66aS/Kn2tCtjt9RwxfBvV+nQVb5tJQFwi8E=" - }, - "foo": { - "uri": "https://example.org/foo.tar.xz?hash=sha256-56bbc099995ea8581ead78f22832fee7dbcb0a0b6319293d8c2d0aef5379397c", - "content-hash": "sha256-vy2UmXQM66aS/Kn2tCtjt9RwxfBvV+nQVb5tJQFwi8E=" - } -} -``` - -Lockfiles are used to help resolve the dependencies of a flake. -- `nix build github:<..>` uses the remote lockfile and update it -- `nix build /home/user/dwarffs` uses the local lockfile, updates it and writes the result to file -- `nix flake update ` recreates the lockfile from scratch and writes it to file -- `--no-registries` makes the command pure, also when fetching dependencies -- `--no-save-lock-file`: Several commands will update the lockfile (e.g. `nix - build`). This flag prevents the updated lockfile to be written to file. -- `--recreate-lock-file` makes prevents the current lockfile from being used - -## `outputs` - -The function argument `deps` is an attrset containing all dependencies listed -in `requires` and `nonFlakeRequires` as well as `path` (for the flake's source -code) and an attribute `meta` with: -- `description` -- `commitHash` (not for tarball flakes): The Git commit hash. -- `date`: The timestamp of the most recent commit (for Git repos), or of the - most recently modified file (for tarballs) -- `revCount` (for Git flakes, but not GitHub flakes): The number of ancestors - of the revision. Useful for generating version strings. - -The flake attribute `outputs` is a function that takes an argument named `deps` -and returns an attribute set. Some of the members of this set have protected -names: - -* `packages`: A set of installable derivations used by the `nix` command. That - is, commands such as `nix install` ignore all other flake attributes. It - cannot be a nested set. - -* `hydraJobs`: Used by Hydra. - -* `nixosModules`: An attrset of NixOS modules. - -* `nixosSystems`: An attrset of calls to `evalModules`, i.e. things - that `nixos-rebuild` can switch to. (Maybe this is superfluous, but - we need to avoid a situation where `nixos-rebuild` needs to fetch - its own `nixpkgs` just to do `evalModules`.) - -* `devShell`: A derivation to create a development environment - -* `self`: The result of the flake's output which is passed to itself - Example: `self.outputs.foo` works. - - -## Flake registry - -A flake registry is a JSON file mapping flake references to flake references. -The default/global registry is kept at -`https://github.com/NixOS/flake-registry/blob/master/flake-registry.json` and -looks like this: - -```json -{ - "flakes": { - "dwarffs": { - "uri": "github:edolstra/dwarffs/flake" - }, - "nix": { - "uri": "github:NixOS/nix/flakes" - }, - "nixpkgs": { - "uri": "github:edolstra/nixpkgs/release-19.03" - }, - "hydra": { - "uri": "github:NixOS/hydra/flake" - }, - "patchelf": { - "uri": "github:NixOS/patchelf" - } - }, - "version": 1 -} -``` - -Nix automatically (re)downloads this file whenever you have network access. The -downloaded file is a GC root so the registry remains available if nixos.org is -unreachable. - -In addition to a global registry, there is also a user registry stored in -`~/.config/nix/registry.json`. - - -## Nix UI - -There is a list of new commands added to the `nix` CLI: - -* `nix flake list`: Show all flakes in the registry - -* `nix flake add `: Add or override a flake - to/in the user flake registry. - -* `nix flake remove `: Remove a FlakeRef from the user flake - registry. - -* `nix flake pin `: Look up to which immutable FlakeRef the - alias FlakeRef maps to currently, and store that map in the user registry. - Example: `nix flake pin github:NixOS/nixpkgs` will create an entry - `github:NixOS/nixpkgs -> - github:NixOS/nixpkgs/444f22ca892a873f76acd88d5d55bdc24ed08757`. - -* `nix flake init`: Create a `flake.nix` in the current directory - -* `nix flake update`: Recreate the lock file from scratch, from the `flake.nix`. - -* `nix flake check`: Do some checks on the flake, e.g. check that all - `packages` are really packages. - -* `nix flake clone`: `git clone` the flake repo - -Flags / configuration options: - -* `--flakes (=)*`: add/override some - FlakeRef - -* `--flake `: set the specified flake as the installation source - E.g. `nix build --flake ./my-nixpkgs hello`. - -The default installation source in `nix` is the `packages` from all flakes in -the registry, that is: -``` -builtins.mapAttrs (flakeName: flakeInfo: - (getFlake flakeInfo.uri).${flakeName}.outputs.packages or {}) - builtins.flakeRegistry -``` -where `builtins.flakeRegistry` is the global registry with user overrides -applied, and `builtins.getFlake` downloads a flake and resolves its -dependencies. - - -## Pure evaluation and caching - -Flake evaluation is done in pure mode. Thus: - -* Flakes cannot use `NIX_PATH` via the `<...>` syntax. - -* Flakes cannot read random stuff from non-flake directories, such as - `~/.nix/config.nix` or overlays. - -This enables aggressive caching or precomputation of Nixpkgs package sets. For -example, for a particular Nixpkgs flake closure (as identified by, say, a hash -of the fully-qualified flake references after dependency resolution) and system -type, an attribute like `packages.hello` should always evaluate to the same -derivation. So we can: - -* Keep a local evaluation cache (say `~/.cache/nix/eval-cache-v1.sqlite`) - mapping `() -> (, - , )`. - -* Download a precomputed cache, e.g. - `https://releases.nixos.org/eval/.sqlite`. So a command - like `nix search` could avoid evaluating Nixpkgs entirely. - -Of course, this doesn't allow overlays. With pure evaluation, the only way to -have these is to define a top-level flake that depends on the Nixpkgs flake and -somehow passes in a set of overlays. - - -## Hydra jobset dependencies - -Hydra can use the flake dependency resolution mechanism to fetch dependencies. -This allows us to get rid of jobset configuration in the web interface: a -jobset only requires a flake reference. That is, a jobset *is* a flake. Hydra -then just builds the `hydraJobs` attrset - - -## NixOS system configuration - -NixOS currently contains a lot of modules that really should be moved into -their own repositories. For example, it contains a Hydra module that duplicates -the one in the Hydra repository. Also, we want reproducible evaluation for -NixOS system configurations. So NixOS system configurations should be stored as -flakes in (local) Git repositories. - -`my-system/flake.nix`: -```nix -{ - name = "my-system"; - - edition = 201906; - - inputs = - [ "nixpkgs/nixos-18.09" - "dwarffs" - "hydra" - ... lots of other module flakes ... - ]; - - outputs = flakes: { - nixosSystems.default = - flakes.nixpkgs.lib.evalModules { - modules = - [ { networking.firewall.enable = true; - hydra.useSubstitutes = true; - } - # The latter could be extracted automatically from `flakes`. - flakes.dwarffs.nixosModules.dwarffs - flakes.hydra.nixosModules.hydra - ]; - }; - }; -} -``` - -We can then build the system: -``` -nixos-rebuild switch --flake ~/my-system -``` -This performs dependency resolution starting at `~/my-system/flake.nix` and -builds the `system` attribute in `nixosSystems.default`. From 21304c11f926892b1a0098ba5d424445d5ae30d6 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Tue, 8 Oct 2019 16:30:04 +0200 Subject: [PATCH 289/634] uri -> url for consistency --- flake.lock | 4 ++-- flake.nix | 2 +- src/libexpr/flake/flake.cc | 17 ++++++++++++----- src/libexpr/flake/lockfile.cc | 8 ++++---- src/nix/flake.cc | 20 ++++++++++---------- tests/flakes.sh | 22 +++++++++++----------- 6 files changed, 40 insertions(+), 33 deletions(-) diff --git a/flake.lock b/flake.lock index 05e3a6e25..9c11ec4fc 100644 --- a/flake.lock +++ b/flake.lock @@ -3,8 +3,8 @@ "nixpkgs": { "inputs": {}, "narHash": "sha256-ltGlDPfwicH/u4orj1n4JXgRsA+jvKQsGnekObi0TV4=", - "originalUri": "nixpkgs/release-19.03", - "uri": "github:edolstra/nixpkgs/9a593b575e4044f9aff939b512e7cb1cf1e76a65" + "originalUrl": "nixpkgs/release-19.03", + "url": "github:edolstra/nixpkgs/9a593b575e4044f9aff939b512e7cb1cf1e76a65" } }, "version": 3 diff --git a/flake.nix b/flake.nix index 4ec80e9d6..167064d72 100644 --- a/flake.nix +++ b/flake.nix @@ -3,7 +3,7 @@ edition = 201909; - inputs.nixpkgs.uri = "nixpkgs/release-19.03"; + inputs.nixpkgs.url = "nixpkgs/release-19.03"; outputs = { self, nixpkgs }: diff --git a/src/libexpr/flake/flake.cc b/src/libexpr/flake/flake.cc index 9e260263c..e8eb353fb 100644 --- a/src/libexpr/flake/flake.cc +++ b/src/libexpr/flake/flake.cc @@ -34,8 +34,14 @@ std::shared_ptr readRegistry(const Path & path) throw Error("flake registry '%s' has unsupported version %d", path, version); auto flakes = json["flakes"]; - for (auto i = flakes.begin(); i != flakes.end(); ++i) - registry->entries.emplace(i.key(), FlakeRef(i->value("uri", ""))); + for (auto i = flakes.begin(); i != flakes.end(); ++i) { + // FIXME: remove 'uri' soon. + auto url = i->value("url", i->value("uri", "")); + if (url.empty()) + throw Error("flake registry '%s' lacks a 'url' attribute for entry '%s'", + path, i.key()); + registry->entries.emplace(i.key(), url); + } return registry; } @@ -46,7 +52,7 @@ void writeRegistry(const FlakeRegistry & registry, const Path & path) nlohmann::json json; json["version"] = 1; for (auto elem : registry.entries) - json["flakes"][elem.first.to_string()] = { {"uri", elem.second.to_string()} }; + json["flakes"][elem.first.to_string()] = { {"url", elem.second.to_string()} }; createDirs(dirOf(path)); writeFile(path, json.dump(4)); // The '4' is the number of spaces used in the indentation in the json file. } @@ -283,7 +289,8 @@ static Flake getFlake(EvalState & state, const FlakeRef & originalRef, } auto sInputs = state.symbols.create("inputs"); - auto sUri = state.symbols.create("uri"); + auto sUrl = state.symbols.create("url"); + auto sUri = state.symbols.create("uri"); // FIXME: remove soon auto sFlake = state.symbols.create("flake"); if (std::optional inputs = vInfo.attrs->get(sInputs)) { @@ -295,7 +302,7 @@ static Flake getFlake(EvalState & state, const FlakeRef & originalRef, FlakeInput input(FlakeRef(inputAttr.name)); for (Attr attr : *(inputAttr.value->attrs)) { - if (attr.name == sUri) { + if (attr.name == sUrl || attr.name == sUri) { expectType(state, tString, *attr.value, *attr.pos); input.ref = std::string(attr.value->string.s); } else if (attr.name == sFlake) { diff --git a/src/libexpr/flake/lockfile.cc b/src/libexpr/flake/lockfile.cc index 039b7a7c1..2c3f3c93d 100644 --- a/src/libexpr/flake/lockfile.cc +++ b/src/libexpr/flake/lockfile.cc @@ -5,8 +5,8 @@ namespace nix::flake { LockedInput::LockedInput(const nlohmann::json & json) : LockedInputs(json) - , ref(json["uri"]) - , originalRef(json["originalUri"]) + , ref(json.value("url", json.value("uri", ""))) + , originalRef(json.value("originalUrl", json.value("originalUri", ""))) , narHash(Hash((std::string) json["narHash"])) { if (!ref.isImmutable()) @@ -16,8 +16,8 @@ LockedInput::LockedInput(const nlohmann::json & json) nlohmann::json LockedInput::toJson() const { auto json = LockedInputs::toJson(); - json["uri"] = ref.to_string(); - json["originalUri"] = originalRef.to_string(); + json["url"] = ref.to_string(); + json["originalUrl"] = originalRef.to_string(); json["narHash"] = narHash.to_string(SRI); return json; } diff --git a/src/nix/flake.cc b/src/nix/flake.cc index d0135143c..cf4fcf722 100644 --- a/src/nix/flake.cc +++ b/src/nix/flake.cc @@ -19,21 +19,21 @@ using namespace nix::flake; class FlakeCommand : virtual Args, public EvalCommand, public MixFlakeOptions { - std::string flakeUri = "."; + std::string flakeUrl = "."; public: FlakeCommand() { - expectArg("flake-uri", &flakeUri, true); + expectArg("flake-url", &flakeUrl, true); } FlakeRef getFlakeRef() { - if (flakeUri.find('/') != std::string::npos || flakeUri == ".") - return FlakeRef(flakeUri, true); + if (flakeUrl.find('/') != std::string::npos || flakeUrl == ".") + return FlakeRef(flakeUrl, true); else - return FlakeRef(flakeUri); + return FlakeRef(flakeUrl); } Flake getFlake() @@ -74,7 +74,7 @@ struct CmdFlakeList : EvalCommand static void printSourceInfo(const SourceInfo & sourceInfo) { - std::cout << fmt("URI: %s\n", sourceInfo.resolvedRef.to_string()); + std::cout << fmt("URL: %s\n", sourceInfo.resolvedRef.to_string()); if (sourceInfo.resolvedRef.ref) std::cout << fmt("Branch: %s\n",*sourceInfo.resolvedRef.ref); if (sourceInfo.resolvedRef.rev) @@ -89,7 +89,7 @@ static void printSourceInfo(const SourceInfo & sourceInfo) static void sourceInfoToJson(const SourceInfo & sourceInfo, nlohmann::json & j) { - j["uri"] = sourceInfo.resolvedRef.to_string(); + j["url"] = sourceInfo.resolvedRef.to_string(); if (sourceInfo.resolvedRef.ref) j["branch"] = *sourceInfo.resolvedRef.ref; if (sourceInfo.resolvedRef.rev) @@ -454,7 +454,7 @@ struct CmdFlakeCheck : FlakeCommand, MixJSON struct CmdFlakeAdd : MixEvalArgs, Command { FlakeUri alias; - FlakeUri uri; + FlakeUri url; std::string description() override { @@ -464,7 +464,7 @@ struct CmdFlakeAdd : MixEvalArgs, Command CmdFlakeAdd() { expectArg("alias", &alias); - expectArg("flake-uri", &uri); + expectArg("flake-url", &url); } void run() override @@ -473,7 +473,7 @@ struct CmdFlakeAdd : MixEvalArgs, Command Path userRegistryPath = getUserRegistryPath(); auto userRegistry = readRegistry(userRegistryPath); userRegistry->entries.erase(aliasRef); - userRegistry->entries.insert_or_assign(aliasRef, FlakeRef(uri)); + userRegistry->entries.insert_or_assign(aliasRef, FlakeRef(url)); writeRegistry(*userRegistry, userRegistryPath); } }; diff --git a/tests/flakes.sh b/tests/flakes.sh index a91a0c37c..c5e19826c 100644 --- a/tests/flakes.sh +++ b/tests/flakes.sh @@ -89,19 +89,19 @@ cat > $registry < $flake3Dir/flake.nix < $flake3Dir/flake.nix < Date: Tue, 8 Oct 2019 16:44:09 +0200 Subject: [PATCH 290/634] Move addRegistrOverrides --- src/nix/build.cc | 2 -- src/nix/flake.cc | 1 - src/nix/installables.cc | 4 +++- 3 files changed, 3 insertions(+), 4 deletions(-) diff --git a/src/nix/build.cc b/src/nix/build.cc index f63150012..4fd1de026 100644 --- a/src/nix/build.cc +++ b/src/nix/build.cc @@ -52,8 +52,6 @@ struct CmdBuild : MixDryRun, MixProfile, InstallablesCommand { auto buildables = build(store, dryRun ? DryRun : Build, installables); - auto evalState = std::make_shared(searchPath, store); - evalState->addRegistryOverrides(registryOverrides); if (dryRun) return; if (outLink != "") { diff --git a/src/nix/flake.cc b/src/nix/flake.cc index cf4fcf722..e6dc5680f 100644 --- a/src/nix/flake.cc +++ b/src/nix/flake.cc @@ -129,7 +129,6 @@ struct CmdFlakeDeps : FlakeCommand void run(nix::ref store) override { auto evalState = getEvalState(); - evalState->addRegistryOverrides(registryOverrides); std::queue todo; todo.push(resolveFlake()); diff --git a/src/nix/installables.cc b/src/nix/installables.cc index 867133653..5611a84ae 100644 --- a/src/nix/installables.cc +++ b/src/nix/installables.cc @@ -56,8 +56,10 @@ SourceExprCommand::SourceExprCommand() ref EvalCommand::getEvalState() { - if (!evalState) + if (!evalState) { evalState = std::make_shared(searchPath, getStore()); + evalState->addRegistryOverrides(registryOverrides); + } return ref(evalState); } From d343c03edb8d4d2be7aca4f1a377544b3889b8b3 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Tue, 8 Oct 2019 20:05:16 +0200 Subject: [PATCH 291/634] Temporary compatibility hack --- flake.lock | 4 ++-- flake.nix | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/flake.lock b/flake.lock index 9c11ec4fc..05e3a6e25 100644 --- a/flake.lock +++ b/flake.lock @@ -3,8 +3,8 @@ "nixpkgs": { "inputs": {}, "narHash": "sha256-ltGlDPfwicH/u4orj1n4JXgRsA+jvKQsGnekObi0TV4=", - "originalUrl": "nixpkgs/release-19.03", - "url": "github:edolstra/nixpkgs/9a593b575e4044f9aff939b512e7cb1cf1e76a65" + "originalUri": "nixpkgs/release-19.03", + "uri": "github:edolstra/nixpkgs/9a593b575e4044f9aff939b512e7cb1cf1e76a65" } }, "version": 3 diff --git a/flake.nix b/flake.nix index 167064d72..4ec80e9d6 100644 --- a/flake.nix +++ b/flake.nix @@ -3,7 +3,7 @@ edition = 201909; - inputs.nixpkgs.url = "nixpkgs/release-19.03"; + inputs.nixpkgs.uri = "nixpkgs/release-19.03"; outputs = { self, nixpkgs }: From 90df25ef7eb5157868a744906eaf6401430bb93d Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Thu, 10 Oct 2019 15:07:50 +0200 Subject: [PATCH 292/634] Fix build --- src/libutil/util.hh | 4 ---- 1 file changed, 4 deletions(-) diff --git a/src/libutil/util.hh b/src/libutil/util.hh index 9f8c7092d..814028442 100644 --- a/src/libutil/util.hh +++ b/src/libutil/util.hh @@ -122,10 +122,6 @@ void deletePath(const Path & path); void deletePath(const Path & path, unsigned long long & bytesFreed); -/* Create a temporary directory. */ -Path createTempDir(const Path & tmpRoot = "", const Path & prefix = "nix", - bool includePid = true, bool useGlobalCounter = true, mode_t mode = 0755); - std::string getUserName(); /* Return $HOME or the user's home directory from /etc/passwd. */ From 0bc8f1669d542ef65fbfa80ea3728f4dd36d63f2 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Mon, 14 Oct 2019 14:40:16 +0200 Subject: [PATCH 293/634] Move code around --- src/nix/command.hh | 49 +------- src/nix/installables.cc | 259 ++++++++++++++++++---------------------- src/nix/installables.hh | 97 +++++++++++++++ 3 files changed, 216 insertions(+), 189 deletions(-) create mode 100644 src/nix/installables.hh diff --git a/src/nix/command.hh b/src/nix/command.hh index 92f606bbe..802dd9828 100644 --- a/src/nix/command.hh +++ b/src/nix/command.hh @@ -1,15 +1,15 @@ #pragma once +#include "installables.hh" #include "args.hh" #include "common-eval-args.hh" + #include namespace nix { extern std::string programPath; -struct Value; -class Bindings; class EvalState; class Store; @@ -30,51 +30,6 @@ private: std::shared_ptr _store; }; -struct Buildable -{ - Path drvPath; // may be empty - std::map outputs; -}; - -typedef std::vector Buildables; - -struct App -{ - PathSet context; - Path program; - // FIXME: add args, sandbox settings, metadata, ... - - App(EvalState & state, Value & vApp); -}; - -struct Installable -{ - virtual ~Installable() { } - - virtual std::string what() = 0; - - virtual Buildables toBuildables() - { - throw Error("argument '%s' cannot be built", what()); - } - - Buildable toBuildable(); - - App toApp(EvalState & state); - - virtual Value * toValue(EvalState & state) - { - throw Error("argument '%s' cannot be evaluated", what()); - } - - /* Return a value only if this installable is a store path or a - symlink to it. */ - virtual std::optional getStorePath() - { - return {}; - } -}; - struct EvalCommand : virtual StoreCommand, MixEvalArgs { ref getEvalState(); diff --git a/src/nix/installables.cc b/src/nix/installables.cc index 5611a84ae..93509955a 100644 --- a/src/nix/installables.cc +++ b/src/nix/installables.cc @@ -1,3 +1,4 @@ +#include "installables.hh" #include "command.hh" #include "attr-path.hh" #include "common-eval-args.hh" @@ -111,65 +112,58 @@ struct InstallableStorePath : Installable } }; -struct InstallableValue : Installable +std::vector InstallableValue::toDerivations() { - SourceExprCommand & cmd; + auto state = cmd.getEvalState(); - InstallableValue(SourceExprCommand & cmd) : cmd(cmd) { } + auto v = toValue(*state); - virtual std::vector toDerivations() - { - auto state = cmd.getEvalState(); + Bindings & autoArgs = *cmd.getAutoArgs(*state); - auto v = toValue(*state); + DrvInfos drvInfos; + getDerivations(*state, *v, "", autoArgs, drvInfos, false); - Bindings & autoArgs = *cmd.getAutoArgs(*state); + std::vector res; + for (auto & drvInfo : drvInfos) { + res.push_back({ + drvInfo.queryDrvPath(), + drvInfo.queryOutPath(), + drvInfo.queryOutputName() + }); + } - DrvInfos drvInfos; - getDerivations(*state, *v, "", autoArgs, drvInfos, false); + return res; +} - std::vector res; - for (auto & drvInfo : drvInfos) { - res.push_back({ - drvInfo.queryDrvPath(), - drvInfo.queryOutPath(), - drvInfo.queryOutputName() - }); - } +Buildables InstallableValue::toBuildables() +{ + Buildables res; + PathSet drvPaths; + + for (auto & drv : toDerivations()) { + Buildable b{drv.drvPath}; + drvPaths.insert(b.drvPath); + + auto outputName = drv.outputName; + if (outputName == "") + throw Error("derivation '%s' lacks an 'outputName' attribute", b.drvPath); + + b.outputs.emplace(outputName, drv.outPath); + + res.push_back(std::move(b)); + } + + // Hack to recognize .all: if all drvs have the same drvPath, + // merge the buildables. + if (drvPaths.size() == 1) { + Buildable b{*drvPaths.begin()}; + for (auto & b2 : res) + b.outputs.insert(b2.outputs.begin(), b2.outputs.end()); + return {b}; + } else return res; - } - - Buildables toBuildables() override - { - Buildables res; - - PathSet drvPaths; - - for (auto & drv : toDerivations()) { - Buildable b{drv.drvPath}; - drvPaths.insert(b.drvPath); - - auto outputName = drv.outputName; - if (outputName == "") - throw Error("derivation '%s' lacks an 'outputName' attribute", b.drvPath); - - b.outputs.emplace(outputName, drv.outPath); - - res.push_back(std::move(b)); - } - - // Hack to recognize .all: if all drvs have the same drvPath, - // merge the buildables. - if (drvPaths.size() == 1) { - Buildable b{*drvPaths.begin()}; - for (auto & b2 : res) - b.outputs.insert(b2.outputs.begin(), b2.outputs.end()); - return {b}; - } else - return res; - } -}; +} struct InstallableExpr : InstallableValue { @@ -254,123 +248,104 @@ void makeFlakeClosureGCRoot(Store & store, store.addIndirectRoot(symlink); } -struct InstallableFlake : InstallableValue +std::vector InstallableFlake::getActualAttrPaths() { - FlakeRef flakeRef; - Strings attrPaths; - Strings prefixes; + std::vector res; - InstallableFlake(SourceExprCommand & cmd, FlakeRef && flakeRef, Strings attrPaths) - : InstallableValue(cmd), flakeRef(flakeRef), attrPaths(std::move(attrPaths)) - { } + for (auto & prefix : prefixes) + res.push_back(prefix + *attrPaths.begin()); - InstallableFlake(SourceExprCommand & cmd, FlakeRef && flakeRef, - std::string attrPath, Strings && prefixes) - : InstallableValue(cmd), flakeRef(flakeRef), attrPaths{attrPath}, - prefixes(prefixes) - { } + for (auto & s : attrPaths) + res.push_back(s); - std::string what() override { return flakeRef.to_string() + ":" + *attrPaths.begin(); } + return res; +} - std::vector getActualAttrPaths() - { - std::vector res; +Value * InstallableFlake::getFlakeOutputs(EvalState & state, const flake::ResolvedFlake & resFlake) +{ + auto vFlake = state.allocValue(); - for (auto & prefix : prefixes) - res.push_back(prefix + *attrPaths.begin()); + callFlake(state, resFlake, *vFlake); - for (auto & s : attrPaths) - res.push_back(s); + makeFlakeClosureGCRoot(*state.store, flakeRef, resFlake); - return res; - } + auto aOutputs = vFlake->attrs->get(state.symbols.create("outputs")); + assert(aOutputs); - Value * getFlakeOutputs(EvalState & state, const flake::ResolvedFlake & resFlake) - { - auto vFlake = state.allocValue(); + state.forceValue(*(*aOutputs)->value); - callFlake(state, resFlake, *vFlake); + return (*aOutputs)->value; +} - makeFlakeClosureGCRoot(*state.store, flakeRef, resFlake); +std::vector InstallableFlake::toDerivations() +{ + auto state = cmd.getEvalState(); - auto aOutputs = vFlake->attrs->get(state.symbols.create("outputs")); - assert(aOutputs); + auto resFlake = resolveFlake(*state, flakeRef, cmd.getLockFileMode()); - state.forceValue(*(*aOutputs)->value); + Value * vOutputs = nullptr; - return (*aOutputs)->value; - } + auto emptyArgs = state->allocBindings(0); - std::vector toDerivations() override - { - auto state = cmd.getEvalState(); + auto & evalCache = flake::EvalCache::singleton(); - auto resFlake = resolveFlake(*state, flakeRef, cmd.getLockFileMode()); + auto fingerprint = resFlake.getFingerprint(); - Value * vOutputs = nullptr; - - auto emptyArgs = state->allocBindings(0); - - auto & evalCache = flake::EvalCache::singleton(); - - auto fingerprint = resFlake.getFingerprint(); - - for (auto & attrPath : getActualAttrPaths()) { - auto drv = evalCache.getDerivation(fingerprint, attrPath); - if (drv) { - if (state->store->isValidPath(drv->drvPath)) - return {*drv}; - } - - if (!vOutputs) - vOutputs = getFlakeOutputs(*state, resFlake); - - try { - auto * v = findAlongAttrPath(*state, attrPath, *emptyArgs, *vOutputs); - state->forceValue(*v); - - auto drvInfo = getDerivation(*state, *v, false); - if (!drvInfo) - throw Error("flake output attribute '%s' is not a derivation", attrPath); - - auto drv = flake::EvalCache::Derivation{ - drvInfo->queryDrvPath(), - drvInfo->queryOutPath(), - drvInfo->queryOutputName() - }; - - evalCache.addDerivation(fingerprint, attrPath, drv); - - return {drv}; - } catch (AttrPathNotFound & e) { - } + for (auto & attrPath : getActualAttrPaths()) { + auto drv = evalCache.getDerivation(fingerprint, attrPath); + if (drv) { + if (state->store->isValidPath(drv->drvPath)) + return {*drv}; } - throw Error("flake '%s' does not provide attribute %s", - flakeRef, concatStringsSep(", ", quoteStrings(attrPaths))); - } + if (!vOutputs) + vOutputs = getFlakeOutputs(*state, resFlake); - Value * toValue(EvalState & state) override - { - auto resFlake = resolveFlake(state, flakeRef, cmd.getLockFileMode()); + try { + auto * v = findAlongAttrPath(*state, attrPath, *emptyArgs, *vOutputs); + state->forceValue(*v); - auto vOutputs = getFlakeOutputs(state, resFlake); + auto drvInfo = getDerivation(*state, *v, false); + if (!drvInfo) + throw Error("flake output attribute '%s' is not a derivation", attrPath); - auto emptyArgs = state.allocBindings(0); + auto drv = flake::EvalCache::Derivation{ + drvInfo->queryDrvPath(), + drvInfo->queryOutPath(), + drvInfo->queryOutputName() + }; - for (auto & attrPath : getActualAttrPaths()) { - try { - auto * v = findAlongAttrPath(state, attrPath, *emptyArgs, *vOutputs); - state.forceValue(*v); - return v; - } catch (AttrPathNotFound & e) { - } + evalCache.addDerivation(fingerprint, attrPath, drv); + + return {drv}; + } catch (AttrPathNotFound & e) { } - - throw Error("flake '%s' does not provide attribute %s", - flakeRef, concatStringsSep(", ", quoteStrings(attrPaths))); } -}; + + throw Error("flake '%s' does not provide attribute %s", + flakeRef, concatStringsSep(", ", quoteStrings(attrPaths))); +} + +Value * InstallableFlake::toValue(EvalState & state) +{ + auto resFlake = resolveFlake(state, flakeRef, cmd.getLockFileMode()); + + auto vOutputs = getFlakeOutputs(state, resFlake); + + auto emptyArgs = state.allocBindings(0); + + for (auto & attrPath : getActualAttrPaths()) { + try { + auto * v = findAlongAttrPath(state, attrPath, *emptyArgs, *vOutputs); + state.forceValue(*v); + return v; + } catch (AttrPathNotFound & e) { + } + } + + throw Error("flake '%s' does not provide attribute %s", + flakeRef, concatStringsSep(", ", quoteStrings(attrPaths))); +} // FIXME: extend std::string attrRegex = R"([A-Za-z_][A-Za-z0-9-_+]*)"; diff --git a/src/nix/installables.hh b/src/nix/installables.hh new file mode 100644 index 000000000..020a61a2b --- /dev/null +++ b/src/nix/installables.hh @@ -0,0 +1,97 @@ +#pragma once + +#include "util.hh" +#include "flake/eval-cache.hh" + +#include + +namespace nix { + +struct Value; +class EvalState; +class SourceExprCommand; + +struct Buildable +{ + Path drvPath; // may be empty + std::map outputs; +}; + +typedef std::vector Buildables; + +struct App +{ + PathSet context; + Path program; + // FIXME: add args, sandbox settings, metadata, ... + + App(EvalState & state, Value & vApp); +}; + +struct Installable +{ + virtual ~Installable() { } + + virtual std::string what() = 0; + + virtual Buildables toBuildables() + { + throw Error("argument '%s' cannot be built", what()); + } + + Buildable toBuildable(); + + App toApp(EvalState & state); + + virtual Value * toValue(EvalState & state) + { + throw Error("argument '%s' cannot be evaluated", what()); + } + + /* Return a value only if this installable is a store path or a + symlink to it. */ + virtual std::optional getStorePath() + { + return {}; + } +}; + +struct InstallableValue : Installable +{ + SourceExprCommand & cmd; + + InstallableValue(SourceExprCommand & cmd) : cmd(cmd) { } + + virtual std::vector toDerivations(); + + Buildables toBuildables() override; +}; + +struct InstallableFlake : InstallableValue +{ + FlakeRef flakeRef; + Strings attrPaths; + Strings prefixes; + + InstallableFlake(SourceExprCommand & cmd, FlakeRef && flakeRef, Strings attrPaths) + : InstallableValue(cmd), flakeRef(flakeRef), attrPaths(std::move(attrPaths)) + { } + + InstallableFlake(SourceExprCommand & cmd, FlakeRef && flakeRef, + std::string attrPath, Strings && prefixes) + : InstallableValue(cmd), flakeRef(flakeRef), attrPaths{attrPath}, + prefixes(prefixes) + { } + + std::string what() override { return flakeRef.to_string() + ":" + *attrPaths.begin(); } + + std::vector getActualAttrPaths(); + + Value * getFlakeOutputs(EvalState & state, const flake::ResolvedFlake & resFlake); + + std::vector toDerivations() override; + + Value * toValue(EvalState & state) override; +}; + +} From 7d38060a0da2698052e84a0cfee422d409a38187 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Tue, 15 Oct 2019 17:52:10 +0200 Subject: [PATCH 294/634] Support non-x86_64-linux system types in flakes A command like $ nix run nixpkgs#hello will now build the attribute 'packages.${system}.hello' rather than 'packages.hello'. Note that this does mean that the flake needs to export an attribute for every system type it supports, and you can't build on unsupported systems. So 'packages' typically looks like this: packages = nixpkgs.lib.genAttrs ["x86_64-linux" "i686-linux"] (system: { hello = ...; }); The 'checks', 'defaultPackage', 'devShell', 'apps' and 'defaultApp' outputs similarly are now attrsets that map system types to derivations/apps. 'nix flake check' checks that the derivations for all platforms evaluate correctly, but only builds the derivations in 'checks.${system}'. Fixes #2861. (That issue also talks about access to ~/.config/nixpkgs and --arg, but I think it's reasonable to say that flakes shouldn't support those.) The alternative to attribute selection is to pass the system type as an argument to the flake's 'outputs' function, e.g. 'outputs = { self, nixpkgs, system }: ...'. However, that approach would be at odds with hermetic evaluation and make it impossible to enumerate the packages provided by a flake. --- flake.nix | 26 +++++++------- src/nix/command.hh | 17 ++------- src/nix/flake.cc | 80 +++++++++++++++++++++++++++++++---------- src/nix/installables.cc | 17 +++++++++ src/nix/run.cc | 4 +-- src/nix/shell.cc | 2 +- tests/flakes.sh | 24 ++++++------- 7 files changed, 109 insertions(+), 61 deletions(-) diff --git a/flake.nix b/flake.nix index 4ec80e9d6..2f0073cc8 100644 --- a/flake.nix +++ b/flake.nix @@ -13,8 +13,10 @@ systems = [ "x86_64-linux" "i686-linux" "x86_64-darwin" "aarch64-linux" ]; + forAllSystems = f: nixpkgs.lib.genAttrs systems (system: f system); + # Memoize nixpkgs for different platforms for efficiency. - nixpkgsFor = nixpkgs.lib.genAttrs systems (system: + nixpkgsFor = forAllSystems (system: import nixpkgs { inherit system; overlays = [ self.overlay ]; @@ -427,19 +429,19 @@ }; - checks = { - binaryTarball = self.hydraJobs.binaryTarball.x86_64-linux; - perlBindings = self.hydraJobs.perlBindings.x86_64-linux; - }; + checks = forAllSystems (system: { + binaryTarball = self.hydraJobs.binaryTarball.${system}; + perlBindings = self.hydraJobs.perlBindings.${system}; + }); - packages = { - inherit (nixpkgsFor.x86_64-linux) nix; - }; + packages = forAllSystems (system: { + inherit (nixpkgsFor.${system}) nix; + }); - defaultPackage = self.packages.nix; + defaultPackage = forAllSystems (system: self.packages.${system}.nix); - devShell = - with nixpkgsFor.x86_64-linux; + devShell = forAllSystems (system: + with nixpkgsFor.${system}; with commonDeps pkgs; stdenv.mkDerivation { @@ -461,7 +463,7 @@ PATH=$prefix/bin:$PATH unset PYTHONPATH ''; - }; + }); }; } diff --git a/src/nix/command.hh b/src/nix/command.hh index 802dd9828..93f324071 100644 --- a/src/nix/command.hh +++ b/src/nix/command.hh @@ -64,22 +64,9 @@ struct SourceExprCommand : virtual Args, EvalCommand, MixFlakeOptions std::shared_ptr parseInstallable( ref store, const std::string & installable); - virtual Strings getDefaultFlakeAttrPaths() - { - return {"defaultPackage"}; - } + virtual Strings getDefaultFlakeAttrPaths(); - virtual Strings getDefaultFlakeAttrPathPrefixes() - { - return { - // As a convenience, look for the attribute in - // 'outputs.packages'. - "packages.", - // As a temporary hack until Nixpkgs is properly converted - // to provide a clean 'packages' set, look in 'legacyPackages'. - "legacyPackages." - }; - } + virtual Strings getDefaultFlakeAttrPathPrefixes(); }; enum RealiseMode { Build, NoBuild, DryRun }; diff --git a/src/nix/flake.cc b/src/nix/flake.cc index e6dc5680f..d928af3b9 100644 --- a/src/nix/flake.cc +++ b/src/nix/flake.cc @@ -251,6 +251,12 @@ struct CmdFlakeCheck : FlakeCommand, MixJSON auto state = getEvalState(); auto flake = resolveFlake(); + auto checkSystemName = [&](const std::string & system, const Pos & pos) { + // FIXME: what's the format of "system"? + if (system.find('-') == std::string::npos) + throw Error("'%s' is not a valid system type, at %s", system, pos); + }; + auto checkDerivation = [&](const std::string & attrPath, Value & v, const Pos & pos) { try { auto drvInfo = getDerivation(*state, v, false); @@ -374,34 +380,70 @@ struct CmdFlakeCheck : FlakeCommand, MixJSON if (name == "checks") { state->forceAttrs(vOutput, pos); - for (auto & attr : *vOutput.attrs) - drvPaths.insert(checkDerivation( - name + "." + (std::string) attr.name, *attr.value, *attr.pos)); + for (auto & attr : *vOutput.attrs) { + checkSystemName(attr.name, *attr.pos); + state->forceAttrs(*attr.value, *attr.pos); + for (auto & attr2 : *attr.value->attrs) { + auto drvPath = checkDerivation( + fmt("%s.%s.%s", name, attr.name, attr2.name), + *attr2.value, *attr2.pos); + if ((std::string) attr.name == settings.thisSystem.get()) + drvPaths.insert(drvPath); + } + } } else if (name == "packages") { state->forceAttrs(vOutput, pos); - for (auto & attr : *vOutput.attrs) - checkDerivation( - name + "." + (std::string) attr.name, *attr.value, *attr.pos); + for (auto & attr : *vOutput.attrs) { + checkSystemName(attr.name, *attr.pos); + state->forceAttrs(*attr.value, *attr.pos); + for (auto & attr2 : *attr.value->attrs) + checkDerivation( + fmt("%s.%s.%s", name, attr.name, attr2.name), + *attr2.value, *attr2.pos); + } } else if (name == "apps") { state->forceAttrs(vOutput, pos); - for (auto & attr : *vOutput.attrs) - checkApp( - name + "." + (std::string) attr.name, *attr.value, *attr.pos); + for (auto & attr : *vOutput.attrs) { + checkSystemName(attr.name, *attr.pos); + state->forceAttrs(*attr.value, *attr.pos); + for (auto & attr2 : *attr.value->attrs) + checkApp( + fmt("%s.%s.%s", name, attr.name, attr2.name), + *attr2.value, *attr2.pos); + } } - else if (name == "defaultPackage" || name == "devShell") - checkDerivation(name, vOutput, pos); + else if (name == "defaultPackage" || name == "devShell") { + state->forceAttrs(vOutput, pos); + for (auto & attr : *vOutput.attrs) { + checkSystemName(attr.name, *attr.pos); + checkDerivation( + fmt("%s.%s", name, attr.name), + *attr.value, *attr.pos); + } + } - else if (name == "defaultApp") - checkApp(name, vOutput, pos); + else if (name == "defaultApp") { + state->forceAttrs(vOutput, pos); + for (auto & attr : *vOutput.attrs) { + checkSystemName(attr.name, *attr.pos); + checkApp( + fmt("%s.%s", name, attr.name), + *attr.value, *attr.pos); + } + } - else if (name == "legacyPackages") - // FIXME: do getDerivations? - ; + else if (name == "legacyPackages") { + state->forceAttrs(vOutput, pos); + for (auto & attr : *vOutput.attrs) { + checkSystemName(attr.name, *attr.pos); + // FIXME: do getDerivations? + } + } else if (name == "overlay") checkOverlay(name, vOutput, pos); @@ -409,7 +451,7 @@ struct CmdFlakeCheck : FlakeCommand, MixJSON else if (name == "overlays") { state->forceAttrs(vOutput, pos); for (auto & attr : *vOutput.attrs) - checkOverlay(name + "." + (std::string) attr.name, + checkOverlay(fmt("%s.%s", name, attr.name), *attr.value, *attr.pos); } @@ -419,14 +461,14 @@ struct CmdFlakeCheck : FlakeCommand, MixJSON else if (name == "nixosModules") { state->forceAttrs(vOutput, pos); for (auto & attr : *vOutput.attrs) - checkModule(name + "." + (std::string) attr.name, + checkModule(fmt("%s.%s", name, attr.name), *attr.value, *attr.pos); } else if (name == "nixosConfigurations") { state->forceAttrs(vOutput, pos); for (auto & attr : *vOutput.attrs) - checkNixOSConfiguration(name + "." + (std::string) attr.name, + checkNixOSConfiguration(fmt("%s.%s", name, attr.name), *attr.value, *attr.pos); } diff --git a/src/nix/installables.cc b/src/nix/installables.cc index 93509955a..bc8fbeb8f 100644 --- a/src/nix/installables.cc +++ b/src/nix/installables.cc @@ -55,6 +55,23 @@ SourceExprCommand::SourceExprCommand() .dest(&file); } +Strings SourceExprCommand::getDefaultFlakeAttrPaths() +{ + return {"defaultPackage." + settings.thisSystem.get()}; +} + +Strings SourceExprCommand::getDefaultFlakeAttrPathPrefixes() +{ + return { + // As a convenience, look for the attribute in + // 'outputs.packages'. + "packages." + settings.thisSystem.get() + ".", + // As a temporary hack until Nixpkgs is properly converted + // to provide a clean 'packages' set, look in 'legacyPackages'. + "legacyPackages." + settings.thisSystem.get() + "." + }; +} + ref EvalCommand::getEvalState() { if (!evalState) { diff --git a/src/nix/run.cc b/src/nix/run.cc index 01ec9a6f8..d444fd2eb 100644 --- a/src/nix/run.cc +++ b/src/nix/run.cc @@ -215,12 +215,12 @@ struct CmdApp : InstallableCommand, RunCommon Strings getDefaultFlakeAttrPaths() override { - return {"defaultApp"}; + return {"defaultApp." + settings.thisSystem.get()}; } Strings getDefaultFlakeAttrPathPrefixes() override { - return {"apps."}; + return {"apps." + settings.thisSystem.get() + "."}; } void run(ref store) override diff --git a/src/nix/shell.cc b/src/nix/shell.cc index a4488b229..50d0f9c88 100644 --- a/src/nix/shell.cc +++ b/src/nix/shell.cc @@ -198,7 +198,7 @@ struct Common : InstallableCommand, MixProfile Strings getDefaultFlakeAttrPaths() override { - return {"devShell", "defaultPackage"}; + return {"devShell." + settings.thisSystem.get(), "defaultPackage." + settings.thisSystem.get()}; } Path getShellOutPath(ref store) diff --git a/tests/flakes.sh b/tests/flakes.sh index c5e19826c..73f9d2685 100644 --- a/tests/flakes.sh +++ b/tests/flakes.sh @@ -34,8 +34,8 @@ cat > $flake1Dir/flake.nix < $flake2Dir/flake.nix < $flake3Dir/flake.nix < $flake3Dir/flake.nix < $flake3Dir/flake.nix < $flake3Dir/flake.nix < Date: Tue, 15 Oct 2019 19:53:29 +0200 Subject: [PATCH 295/634] Fix 'nix flake init' --- src/nix/flake-template.nix | 10 +++------- tests/flakes.sh | 7 ++++++- 2 files changed, 9 insertions(+), 8 deletions(-) diff --git a/src/nix/flake-template.nix b/src/nix/flake-template.nix index bec613f6c..eb8eb14fc 100644 --- a/src/nix/flake-template.nix +++ b/src/nix/flake-template.nix @@ -1,15 +1,11 @@ { - name = "hello"; - description = "A flake for building Hello World"; - epoch = 201906; + edition = 201909; - requires = [ "nixpkgs" ]; + outputs = { self, nixpkgs }: { - provides = deps: rec { - - packages.hello = deps.nixpkgs.provides.packages.hello; + packages.x86_64-linux.hello = nixpkgs.packages.x86_64-linux.hello; }; } diff --git a/tests/flakes.sh b/tests/flakes.sh index 73f9d2685..6cae8029c 100644 --- a/tests/flakes.sh +++ b/tests/flakes.sh @@ -36,6 +36,9 @@ cat > $flake1Dir/flake.nix < Date: Wed, 16 Oct 2019 00:12:40 +0200 Subject: [PATCH 296/634] Improve GitHub caching In particular, when building a flake lock file, inputs like 'nixpkgs' are now downloaded only once. Previously, it would fetch https://api.github.com/repos///tarball/ and then later https://api.github.com/repos///tarball/, even though they produce the same result. Git and GitHub now also share a cache that maps revs to a store path and other info. --- src/libexpr/flake/flake.cc | 37 +------- src/libexpr/primops/fetchGit.cc | 157 +++++++++++++++++++++++++------- src/libexpr/primops/fetchGit.hh | 15 ++- 3 files changed, 136 insertions(+), 73 deletions(-) diff --git a/src/libexpr/flake/flake.cc b/src/libexpr/flake/flake.cc index e8eb353fb..5fb40fabd 100644 --- a/src/libexpr/flake/flake.cc +++ b/src/libexpr/flake/flake.cc @@ -170,43 +170,10 @@ static SourceInfo fetchInput(EvalState & state, const FlakeRef & resolvedRef) // This only downloads only one revision of the repo, not the entire history. if (auto refData = std::get_if(&resolvedRef.data)) { - - // FIXME: use regular /archive URLs instead? api.github.com - // might have stricter rate limits. - - auto url = fmt("https://api.github.com/repos/%s/%s/tarball/%s", - refData->owner, refData->repo, - resolvedRef.rev ? resolvedRef.rev->to_string(Base16, false) - : resolvedRef.ref ? *resolvedRef.ref : "master"); - - std::string accessToken = settings.githubAccessToken.get(); - if (accessToken != "") - url += "?access_token=" + accessToken; - - CachedDownloadRequest request(url); - request.unpack = true; - request.name = "source"; - request.ttl = resolvedRef.rev ? 1000000000 : settings.tarballTtl; - request.getLastModified = true; - auto result = getDownloader()->downloadCached(state.store, request); - - if (!result.etag) - throw Error("did not receive an ETag header from '%s'", url); - - if (result.etag->size() != 42 || (*result.etag)[0] != '"' || (*result.etag)[41] != '"') - throw Error("ETag header '%s' from '%s' is not a Git revision", *result.etag, url); - - FlakeRef ref(resolvedRef.baseRef()); - ref.rev = Hash(std::string(*result.etag, 1, result.etag->size() - 2), htSHA1); - SourceInfo info(ref); - info.storePath = result.storePath; - info.narHash = state.store->queryPathInfo(info.storePath)->narHash; - info.lastModified = result.lastModified; - - return info; + return doGit(exportGitHub(state.store, refData->owner, refData->repo, resolvedRef.ref, resolvedRef.rev)); } - // This downloads the entire git history + // This downloads the entire git history. else if (auto refData = std::get_if(&resolvedRef.data)) { return doGit(exportGit(state.store, refData->uri, resolvedRef.ref, resolvedRef.rev, "source")); } diff --git a/src/libexpr/primops/fetchGit.cc b/src/libexpr/primops/fetchGit.cc index 21fa025c1..50277672c 100644 --- a/src/libexpr/primops/fetchGit.cc +++ b/src/libexpr/primops/fetchGit.cc @@ -18,6 +18,60 @@ namespace nix { extern std::regex revRegex; +static Path getCacheInfoPathFor(const std::string & name, const Hash & rev) +{ + Path cacheDir = getCacheDir() + "/nix/git-revs"; + std::string linkName = + name == "source" + ? rev.gitRev() + : hashString(htSHA512, name + std::string("\0"s) + rev.gitRev()).to_string(Base32, false); + return cacheDir + "/" + linkName + ".link"; +} + +static void cacheGitInfo(const std::string & name, const GitInfo & gitInfo) +{ + nlohmann::json json; + json["storePath"] = gitInfo.storePath; + json["name"] = name; + json["rev"] = gitInfo.rev.gitRev(); + if (gitInfo.revCount) + json["revCount"] = *gitInfo.revCount; + json["lastModified"] = gitInfo.lastModified; + + auto cacheInfoPath = getCacheInfoPathFor(name, gitInfo.rev); + createDirs(dirOf(cacheInfoPath)); + writeFile(cacheInfoPath, json.dump()); +} + +static std::optional lookupGitInfo( + ref store, + const std::string & name, + const Hash & rev) +{ + try { + auto json = nlohmann::json::parse(readFile(getCacheInfoPathFor(name, rev))); + + assert(json["name"] == name && Hash((std::string) json["rev"], htSHA1) == rev); + + Path storePath = json["storePath"]; + + if (store->isValidPath(storePath)) { + GitInfo gitInfo; + gitInfo.storePath = storePath; + gitInfo.rev = rev; + if (json.find("revCount") != json.end()) + gitInfo.revCount = json["revCount"]; + gitInfo.lastModified = json["lastModified"]; + return gitInfo; + } + + } catch (SysError & e) { + if (e.errNo != ENOENT) throw; + } + + return {}; +} + GitInfo exportGit(ref store, std::string uri, std::optional ref, std::optional rev, @@ -25,6 +79,17 @@ GitInfo exportGit(ref store, std::string uri, { assert(!rev || rev->type == htSHA1); + if (rev) { + if (auto gitInfo = lookupGitInfo(store, name, *rev)) { + // If this gitInfo was produced by exportGitHub, then it won't + // have a revCount. So we have to do a full clone. + if (gitInfo->revCount) { + gitInfo->ref = ref; + return *gitInfo; + } + } + } + if (hasPrefix(uri, "git+")) uri = std::string(uri, 4); bool isLocal = hasPrefix(uri, "/") && pathExists(uri + "/.git"); @@ -100,9 +165,6 @@ GitInfo exportGit(ref store, std::string uri, isLocal = true; } - deletePath(getCacheDir() + "/nix/git"); - deletePath(getCacheDir() + "/nix/gitv2"); - Path cacheDir = getCacheDir() + "/nix/gitv3/" + hashString(htSHA256, uri).to_string(Base32, false); Path repoDir; @@ -179,6 +241,13 @@ GitInfo exportGit(ref store, std::string uri, rev = Hash(chomp(readFile(localRefFile)), htSHA1); } + if (auto gitInfo = lookupGitInfo(store, name, *rev)) { + if (gitInfo->revCount) { + gitInfo->ref = ref; + return *gitInfo; + } + } + // FIXME: check whether rev is an ancestor of ref. GitInfo gitInfo; gitInfo.ref = *ref; @@ -186,29 +255,6 @@ GitInfo exportGit(ref store, std::string uri, printTalkative("using revision %s of repo '%s'", gitInfo.rev, uri); - std::string storeLinkName = hashString(htSHA512, - name + std::string("\0"s) + gitInfo.rev.gitRev()).to_string(Base32, false); - Path storeLink = cacheDir + "/" + storeLinkName + ".link"; - PathLocks storeLinkLock({storeLink}, fmt("waiting for lock on '%1%'...", storeLink)); // FIXME: broken - - try { - auto json = nlohmann::json::parse(readFile(storeLink)); - - assert(json["name"] == name && Hash((std::string) json["rev"], htSHA1) == gitInfo.rev); - - Path storePath = json["storePath"]; - - if (store->isValidPath(storePath)) { - gitInfo.storePath = storePath; - gitInfo.revCount = json["revCount"]; - gitInfo.lastModified = json["lastModified"]; - return gitInfo; - } - - } catch (SysError & e) { - if (e.errNo != ENOENT) throw; - } - // FIXME: should pipe this, or find some better way to extract a // revision. auto tar = runProgram("git", true, { "-C", repoDir, "archive", gitInfo.rev.gitRev() }); @@ -223,15 +269,55 @@ GitInfo exportGit(ref store, std::string uri, gitInfo.revCount = std::stoull(runProgram("git", true, { "-C", repoDir, "rev-list", "--count", gitInfo.rev.gitRev() })); gitInfo.lastModified = std::stoull(runProgram("git", true, { "-C", repoDir, "show", "-s", "--format=%ct", gitInfo.rev.gitRev() })); - nlohmann::json json; - json["storePath"] = gitInfo.storePath; - json["uri"] = uri; - json["name"] = name; - json["rev"] = gitInfo.rev.gitRev(); - json["revCount"] = gitInfo.revCount; - json["lastModified"] = gitInfo.lastModified; + cacheGitInfo(name, gitInfo); - writeFile(storeLink, json.dump()); + return gitInfo; +} + +GitInfo exportGitHub( + ref store, + const std::string & owner, + const std::string & repo, + std::optional ref, + std::optional rev) +{ + if (rev) { + if (auto gitInfo = lookupGitInfo(store, "source", *rev)) + return *gitInfo; + } + + // FIXME: use regular /archive URLs instead? api.github.com + // might have stricter rate limits. + + auto url = fmt("https://api.github.com/repos/%s/%s/tarball/%s", + owner, repo, rev ? rev->to_string(Base16, false) : ref ? *ref : "master"); + + std::string accessToken = settings.githubAccessToken.get(); + if (accessToken != "") + url += "?access_token=" + accessToken; + + CachedDownloadRequest request(url); + request.unpack = true; + request.name = "source"; + request.ttl = rev ? 1000000000 : settings.tarballTtl; + request.getLastModified = true; + auto result = getDownloader()->downloadCached(store, request); + + if (!result.etag) + throw Error("did not receive an ETag header from '%s'", url); + + if (result.etag->size() != 42 || (*result.etag)[0] != '"' || (*result.etag)[41] != '"') + throw Error("ETag header '%s' from '%s' is not a Git revision", *result.etag, url); + + assert(result.lastModified); + + GitInfo gitInfo; + gitInfo.storePath = result.storePath; + gitInfo.rev = Hash(std::string(*result.etag, 1, result.etag->size() - 2), htSHA1); + gitInfo.lastModified = *result.lastModified; + + // FIXME: this can overwrite a cache file that contains a revCount. + cacheGitInfo("source", gitInfo); return gitInfo; } @@ -283,7 +369,8 @@ static void prim_fetchGit(EvalState & state, const Pos & pos, Value * * args, Va mkString(*state.allocAttr(v, state.sOutPath), gitInfo.storePath, PathSet({gitInfo.storePath})); mkString(*state.allocAttr(v, state.symbols.create("rev")), gitInfo.rev.gitRev()); mkString(*state.allocAttr(v, state.symbols.create("shortRev")), gitInfo.rev.gitShortRev()); - mkInt(*state.allocAttr(v, state.symbols.create("revCount")), gitInfo.revCount); + assert(gitInfo.revCount); + mkInt(*state.allocAttr(v, state.symbols.create("revCount")), *gitInfo.revCount); v.attrs->sort(); if (state.allowedPaths) diff --git a/src/libexpr/primops/fetchGit.hh b/src/libexpr/primops/fetchGit.hh index 006fa8b5f..fe2b49942 100644 --- a/src/libexpr/primops/fetchGit.hh +++ b/src/libexpr/primops/fetchGit.hh @@ -9,15 +9,24 @@ namespace nix { struct GitInfo { Path storePath; - std::string ref; + std::optional ref; Hash rev{htSHA1}; - uint64_t revCount; + std::optional revCount; time_t lastModified; }; -GitInfo exportGit(ref store, std::string uri, +GitInfo exportGit( + ref store, + std::string uri, std::optional ref, std::optional rev, const std::string & name); +GitInfo exportGitHub( + ref store, + const std::string & owner, + const std::string & repo, + std::optional ref, + std::optional rev); + } From a56036fa872d86b09586c41ead475d537b6df0a3 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Wed, 16 Oct 2019 00:20:18 +0200 Subject: [PATCH 297/634] Fix repeated fetchGit.sh test --- tests/fetchGit.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/fetchGit.sh b/tests/fetchGit.sh index be46d24a7..885dd9e77 100644 --- a/tests/fetchGit.sh +++ b/tests/fetchGit.sh @@ -11,7 +11,7 @@ repo=$TEST_ROOT/git export _NIX_FORCE_HTTP=1 -rm -rf $repo ${repo}-tmp $TEST_HOME/.cache/nix/gitv2 +rm -rf $repo ${repo}-tmp $TEST_HOME/.cache/nix/gitv* git init $repo git -C $repo config user.email "foobar@example.com" From 8e478c234100cf03ea1b777d4bd42a9be7be9e8c Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Wed, 16 Oct 2019 17:45:09 +0200 Subject: [PATCH 298/634] Add experimental-features setting Experimental features are now opt-in. There are currently two experimental features: "nix-command" (which enables the "nix" command), and "flakes" (which enables support for flakes). This will allow us to merge experimental features more quickly, without committing to supporting them indefinitely. Typical usage: $ nix build --experimental-features 'nix-command flakes' nixpkgs#hello --- src/libexpr/flake/flake.cc | 2 ++ src/libstore/globals.cc | 7 +++++++ src/libstore/globals.hh | 5 +++++ src/nix/main.cc | 2 ++ tests/init.sh | 1 + 5 files changed, 17 insertions(+) diff --git a/src/libexpr/flake/flake.cc b/src/libexpr/flake/flake.cc index 5fb40fabd..d03227f69 100644 --- a/src/libexpr/flake/flake.cc +++ b/src/libexpr/flake/flake.cc @@ -433,6 +433,8 @@ static std::pair updateLocks( and optionally write it to file, it the flake is writable. */ ResolvedFlake resolveFlake(EvalState & state, const FlakeRef & topRef, HandleLockFile handleLockFile) { + settings.requireExperimentalFeature("flakes"); + auto flake = getFlake(state, topRef, allowedToUseRegistries(handleLockFile, true)); diff --git a/src/libstore/globals.cc b/src/libstore/globals.cc index 1c2c08715..249c36673 100644 --- a/src/libstore/globals.cc +++ b/src/libstore/globals.cc @@ -105,6 +105,13 @@ StringSet Settings::getDefaultSystemFeatures() return features; } +void Settings::requireExperimentalFeature(const std::string & name) +{ + auto & f = experimentalFeatures.get(); + if (std::find(f.begin(), f.end(), name) == f.end()) + throw Error("experimental Nix feature '%s' is disabled", name); +} + const string nixVersion = PACKAGE_VERSION; template<> void BaseSetting::set(const std::string & str) diff --git a/src/libstore/globals.hh b/src/libstore/globals.hh index c0c535a12..0b1a8dac5 100644 --- a/src/libstore/globals.hh +++ b/src/libstore/globals.hh @@ -356,6 +356,11 @@ public: Setting githubAccessToken{this, "", "github-acces-token", "GitHub access token to get access to GitHub data through the GitHub API for github:<..> flakes."}; + + Setting experimentalFeatures{this, {}, "experimental-features", + "Experimental Nix features to enable."}; + + void requireExperimentalFeature(const std::string & name); }; diff --git a/src/nix/main.cc b/src/nix/main.cc index eedf8656e..8cdeed8f5 100644 --- a/src/nix/main.cc +++ b/src/nix/main.cc @@ -149,6 +149,8 @@ void mainWrapped(int argc, char * * argv) args.parseCmdline(argvToStrings(argc, argv)); + settings.requireExperimentalFeature("nix-command"); + initPlugins(); if (!args.command) args.showHelpAndExit(); diff --git a/tests/init.sh b/tests/init.sh index 19a12c1e2..c62c4856a 100644 --- a/tests/init.sh +++ b/tests/init.sh @@ -17,6 +17,7 @@ cat > "$NIX_CONF_DIR"/nix.conf < Date: Sun, 20 Oct 2019 16:43:00 +0200 Subject: [PATCH 299/634] Fix InstallableFlake::what() --- src/nix/installables.hh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/nix/installables.hh b/src/nix/installables.hh index 020a61a2b..a635cb96f 100644 --- a/src/nix/installables.hh +++ b/src/nix/installables.hh @@ -83,7 +83,7 @@ struct InstallableFlake : InstallableValue prefixes(prefixes) { } - std::string what() override { return flakeRef.to_string() + ":" + *attrPaths.begin(); } + std::string what() override { return flakeRef.to_string() + "#" + *attrPaths.begin(); } std::vector getActualAttrPaths(); From a07da2fd7a889c225847556c0d4bf88384995274 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Mon, 21 Oct 2019 14:57:01 +0200 Subject: [PATCH 300/634] Don't ignore revs/refs of local flakerefs Fixes error: the content hash of flake '/home/eelco/Dev/nixpkgs-flake?ref=HEAD&rev=0000000000000000000000000000000000000000' doesn't match the hash recorded in the referring lockfile --- src/libexpr/flake/flake.cc | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/src/libexpr/flake/flake.cc b/src/libexpr/flake/flake.cc index d03227f69..73ca59452 100644 --- a/src/libexpr/flake/flake.cc +++ b/src/libexpr/flake/flake.cc @@ -181,7 +181,7 @@ static SourceInfo fetchInput(EvalState & state, const FlakeRef & resolvedRef) else if (auto refData = std::get_if(&resolvedRef.data)) { if (!pathExists(refData->path + "/.git")) throw Error("flake '%s' does not reference a Git repository", refData->path); - return doGit(exportGit(state.store, refData->path, {}, {}, "source")); + return doGit(exportGit(state.store, refData->path, resolvedRef.ref, resolvedRef.rev, "source")); } else abort(); @@ -448,11 +448,15 @@ ResolvedFlake resolveFlake(EvalState & state, const FlakeRef & topRef, HandleLoc + "/" + flake.sourceInfo.resolvedRef.subdir + "/flake.lock"); } + debug("old lock file: %s", oldLockFile); + RefMap refMap; LockFile lockFile(updateLocks( refMap, "", state, flake, handleLockFile, oldLockFile, true).second); + debug("new lock file: %s", lockFile); + if (!(lockFile == oldLockFile)) { if (allowedToWrite(handleLockFile)) { if (auto refData = std::get_if(&topRef.data)) { @@ -524,7 +528,8 @@ static void prim_callFlake(EvalState & state, const Pos & pos, Value * * args, V auto flake = getFlake(state, lazyInput->lockedInput.ref, false); if (flake.sourceInfo.narHash != lazyInput->lockedInput.narHash) - throw Error("the content hash of flake '%s' doesn't match the hash recorded in the referring lockfile", flake.sourceInfo.resolvedRef); + throw Error("the content hash of flake '%s' doesn't match the hash recorded in the referring lockfile", + lazyInput->lockedInput.ref); callFlake(state, flake, lazyInput->lockedInput, v); } else { @@ -532,7 +537,8 @@ static void prim_callFlake(EvalState & state, const Pos & pos, Value * * args, V auto sourceInfo = getNonFlake(state, lazyInput->lockedInput.ref, false, refMap); if (sourceInfo.narHash != lazyInput->lockedInput.narHash) - throw Error("the content hash of repository '%s' doesn't match the hash recorded in the referring lockfile", sourceInfo.resolvedRef); + throw Error("the content hash of repository '%s' doesn't match the hash recorded in the referring lockfile", + lazyInput->lockedInput.ref); state.mkAttrs(v, 8); From a7aabd7cc785bbf34ad29101672677ced18a7fdd Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Mon, 21 Oct 2019 16:07:19 +0200 Subject: [PATCH 301/634] Add getDefaultProfile() function --- src/libstore/profiles.cc | 18 ++++++++++++++++++ src/libstore/profiles.hh | 4 ++++ src/nix-env/nix-env.cc | 17 ++--------------- 3 files changed, 24 insertions(+), 15 deletions(-) diff --git a/src/libstore/profiles.cc b/src/libstore/profiles.cc index 4c6af567a..29f6f6c17 100644 --- a/src/libstore/profiles.cc +++ b/src/libstore/profiles.cc @@ -256,4 +256,22 @@ string optimisticLockProfile(const Path & profile) } +Path getDefaultProfile() +{ + Path profileLink = getHome() + "/.nix-profile"; + try { + if (!pathExists(profileLink)) { + replaceSymlink( + getuid() == 0 + ? settings.nixStateDir + "/profiles/default" + : fmt("%s/profiles/per-user/%s/profile", settings.nixStateDir, getUserName()), + profileLink); + } + return absPath(readLink(profileLink), dirOf(profileLink)); + } catch (Error &) { + return profileLink; + } +} + + } diff --git a/src/libstore/profiles.hh b/src/libstore/profiles.hh index 5fa1533de..78645d8b6 100644 --- a/src/libstore/profiles.hh +++ b/src/libstore/profiles.hh @@ -64,4 +64,8 @@ void lockProfile(PathLocks & lock, const Path & profile); rebuilt. */ string optimisticLockProfile(const Path & profile); +/* Resolve ~/.nix-profile. If ~/.nix-profile doesn't exist yet, create + it. */ +Path getDefaultProfile(); + } diff --git a/src/nix-env/nix-env.cc b/src/nix-env/nix-env.cc index 199dc92aa..5ac0eb87c 100644 --- a/src/nix-env/nix-env.cc +++ b/src/nix-env/nix-env.cc @@ -1427,21 +1427,8 @@ static int _main(int argc, char * * argv) if (globals.profile == "") globals.profile = getEnv("NIX_PROFILE", ""); - if (globals.profile == "") { - Path profileLink = getHome() + "/.nix-profile"; - try { - if (!pathExists(profileLink)) { - replaceSymlink( - getuid() == 0 - ? settings.nixStateDir + "/profiles/default" - : fmt("%s/profiles/per-user/%s/profile", settings.nixStateDir, getUserName()), - profileLink); - } - globals.profile = absPath(readLink(profileLink), dirOf(profileLink)); - } catch (Error &) { - globals.profile = profileLink; - } - } + if (globals.profile == "") + globals.profile = getDefaultProfile(); op(globals, opFlags, opArgs); From b82f75464d1e5ae9a00d8004e5dd7b1ca05059e4 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Mon, 21 Oct 2019 17:17:15 +0200 Subject: [PATCH 302/634] buildenv: Eliminate global variables, other cleanup --- src/libstore/build.cc | 1 + src/libstore/builtins.hh | 1 - src/libstore/builtins/buildenv.cc | 128 ++++++++++++++---------------- src/libstore/builtins/buildenv.hh | 21 +++++ 4 files changed, 81 insertions(+), 70 deletions(-) create mode 100644 src/libstore/builtins/buildenv.hh diff --git a/src/libstore/build.cc b/src/libstore/build.cc index cdf848c98..1b27d7af0 100644 --- a/src/libstore/build.cc +++ b/src/libstore/build.cc @@ -6,6 +6,7 @@ #include "archive.hh" #include "affinity.hh" #include "builtins.hh" +#include "builtins/buildenv.hh" #include "download.hh" #include "finally.hh" #include "compression.hh" diff --git a/src/libstore/builtins.hh b/src/libstore/builtins.hh index 0d2da873e..f9b5f7900 100644 --- a/src/libstore/builtins.hh +++ b/src/libstore/builtins.hh @@ -6,6 +6,5 @@ namespace nix { // TODO: make pluggable. void builtinFetchurl(const BasicDerivation & drv, const std::string & netrcData); -void builtinBuildenv(const BasicDerivation & drv); } diff --git a/src/libstore/builtins/buildenv.cc b/src/libstore/builtins/buildenv.cc index 096593886..c1c85d0bf 100644 --- a/src/libstore/builtins/buildenv.cc +++ b/src/libstore/builtins/buildenv.cc @@ -1,4 +1,4 @@ -#include "builtins.hh" +#include "buildenv.hh" #include #include @@ -7,16 +7,14 @@ namespace nix { -typedef std::map Priorities; - -// FIXME: change into local variables. - -static Priorities priorities; - -static unsigned long symlinks; +struct State +{ + std::map priorities; + unsigned long symlinks = 0; +}; /* For each activated package, create symlinks */ -static void createLinks(const Path & srcDir, const Path & dstDir, int priority) +static void createLinks(State & state, const Path & srcDir, const Path & dstDir, int priority) { DirEntries srcFiles; @@ -67,7 +65,7 @@ static void createLinks(const Path & srcDir, const Path & dstDir, int priority) auto res = lstat(dstFile.c_str(), &dstSt); if (res == 0) { if (S_ISDIR(dstSt.st_mode)) { - createLinks(srcFile, dstFile, priority); + createLinks(state, srcFile, dstFile, priority); continue; } else if (S_ISLNK(dstSt.st_mode)) { auto target = canonPath(dstFile, true); @@ -77,8 +75,8 @@ static void createLinks(const Path & srcDir, const Path & dstDir, int priority) throw SysError(format("unlinking '%1%'") % dstFile); if (mkdir(dstFile.c_str(), 0755) == -1) throw SysError(format("creating directory '%1%'")); - createLinks(target, dstFile, priorities[dstFile]); - createLinks(srcFile, dstFile, priority); + createLinks(state, target, dstFile, state.priorities[dstFile]); + createLinks(state, srcFile, dstFile, priority); continue; } } else if (errno != ENOENT) @@ -90,7 +88,7 @@ static void createLinks(const Path & srcDir, const Path & dstDir, int priority) auto res = lstat(dstFile.c_str(), &dstSt); if (res == 0) { if (S_ISLNK(dstSt.st_mode)) { - auto prevPriority = priorities[dstFile]; + auto prevPriority = state.priorities[dstFile]; if (prevPriority == priority) throw Error( "packages '%1%' and '%2%' have the same priority %3%; " @@ -109,67 +107,30 @@ static void createLinks(const Path & srcDir, const Path & dstDir, int priority) } createSymlink(srcFile, dstFile); - priorities[dstFile] = priority; - symlinks++; + state.priorities[dstFile] = priority; + state.symlinks++; } } -typedef std::set FileProp; - -static FileProp done; -static FileProp postponed = FileProp{}; - -static Path out; - -static void addPkg(const Path & pkgDir, int priority) +void buildProfile(const Path & out, Packages && pkgs) { - if (!done.insert(pkgDir).second) return; - createLinks(pkgDir, out, priority); + State state; - try { - for (const auto & p : tokenizeString>( - readFile(pkgDir + "/nix-support/propagated-user-env-packages"), " \n")) - if (!done.count(p)) - postponed.insert(p); - } catch (SysError & e) { - if (e.errNo != ENOENT && e.errNo != ENOTDIR) throw; - } -} + std::set done, postponed; -struct Package { - Path path; - bool active; - int priority; - Package(Path path, bool active, int priority) : path{path}, active{active}, priority{priority} {} -}; + auto addPkg = [&](const Path & pkgDir, int priority) { + if (!done.insert(pkgDir).second) return; + createLinks(state, pkgDir, out, priority); -typedef std::vector Packages; - -void builtinBuildenv(const BasicDerivation & drv) -{ - auto getAttr = [&](const string & name) { - auto i = drv.env.find(name); - if (i == drv.env.end()) throw Error("attribute '%s' missing", name); - return i->second; - }; - - out = getAttr("out"); - createDirs(out); - - /* Convert the stuff we get from the environment back into a - * coherent data type. */ - Packages pkgs; - auto derivations = tokenizeString(getAttr("derivations")); - while (!derivations.empty()) { - /* !!! We're trusting the caller to structure derivations env var correctly */ - auto active = derivations.front(); derivations.pop_front(); - auto priority = stoi(derivations.front()); derivations.pop_front(); - auto outputs = stoi(derivations.front()); derivations.pop_front(); - for (auto n = 0; n < outputs; n++) { - auto path = derivations.front(); derivations.pop_front(); - pkgs.emplace_back(path, active != "false", priority); + try { + for (const auto & p : tokenizeString>( + readFile(pkgDir + "/nix-support/propagated-user-env-packages"), " \n")) + if (!done.count(p)) + postponed.insert(p); + } catch (SysError & e) { + if (e.errNo != ENOENT && e.errNo != ENOTDIR) throw; } - } + }; /* Symlink to the packages that have been installed explicitly by the * user. Process in priority order to reduce unnecessary @@ -189,13 +150,42 @@ void builtinBuildenv(const BasicDerivation & drv) */ auto priorityCounter = 1000; while (!postponed.empty()) { - auto pkgDirs = postponed; - postponed = FileProp{}; + std::set pkgDirs; + postponed.swap(pkgDirs); for (const auto & pkgDir : pkgDirs) addPkg(pkgDir, priorityCounter++); } - printError("created %d symlinks in user environment", symlinks); + printError("created %d symlinks in user environment", state.symlinks); +} + +void builtinBuildenv(const BasicDerivation & drv) +{ + auto getAttr = [&](const string & name) { + auto i = drv.env.find(name); + if (i == drv.env.end()) throw Error("attribute '%s' missing", name); + return i->second; + }; + + Path out = getAttr("out"); + createDirs(out); + + /* Convert the stuff we get from the environment back into a + * coherent data type. */ + Packages pkgs; + auto derivations = tokenizeString(getAttr("derivations")); + while (!derivations.empty()) { + /* !!! We're trusting the caller to structure derivations env var correctly */ + auto active = derivations.front(); derivations.pop_front(); + auto priority = stoi(derivations.front()); derivations.pop_front(); + auto outputs = stoi(derivations.front()); derivations.pop_front(); + for (auto n = 0; n < outputs; n++) { + auto path = derivations.front(); derivations.pop_front(); + pkgs.emplace_back(path, active != "false", priority); + } + } + + buildProfile(out, std::move(pkgs)); createSymlink(getAttr("manifest"), out + "/manifest.nix"); } diff --git a/src/libstore/builtins/buildenv.hh b/src/libstore/builtins/buildenv.hh new file mode 100644 index 000000000..0a37459b0 --- /dev/null +++ b/src/libstore/builtins/buildenv.hh @@ -0,0 +1,21 @@ +#pragma once + +#include "derivations.hh" +#include "store-api.hh" + +namespace nix { + +struct Package { + Path path; + bool active; + int priority; + Package(Path path, bool active, int priority) : path{path}, active{active}, priority{priority} {} +}; + +typedef std::vector Packages; + +void buildProfile(const Path & out, Packages && pkgs); + +void builtinBuildenv(const BasicDerivation & drv); + +} From cb1a79a96aa0602383f2fe33702f6adeee708922 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Mon, 21 Oct 2019 18:58:38 +0200 Subject: [PATCH 303/634] Fix build --- src/nix/make-content-addressable.cc | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/src/nix/make-content-addressable.cc b/src/nix/make-content-addressable.cc index 16344ee14..5b99b5084 100644 --- a/src/nix/make-content-addressable.cc +++ b/src/nix/make-content-addressable.cc @@ -11,11 +11,6 @@ struct CmdMakeContentAddressable : StorePathsCommand realiseMode = Build; } - std::string name() override - { - return "make-content-addressable"; - } - std::string description() override { return "rewrite a path or closure to content-addressable form"; @@ -92,4 +87,4 @@ struct CmdMakeContentAddressable : StorePathsCommand } }; -static RegisterCommand r1(make_ref()); +static auto r1 = registerCommand("make-content-addressable"); From 45b740c18b196d0326a94df23d08fa3d68e0863f Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Mon, 21 Oct 2019 22:11:21 +0200 Subject: [PATCH 304/634] Use upstream json_fwd.hpp to speed up compilation --- src/libexpr/flake/lockfile.cc | 2 + src/libexpr/flake/lockfile.hh | 2 +- src/libstore/build.cc | 2 +- src/libstore/parsed-derivations.cc | 6 ++- src/libstore/parsed-derivations.hh | 10 +++-- src/nlohmann/json_fwd.hpp | 66 +++++++++++++++++++++++++++--- 6 files changed, 75 insertions(+), 13 deletions(-) diff --git a/src/libexpr/flake/lockfile.cc b/src/libexpr/flake/lockfile.cc index 2c3f3c93d..5693e57dc 100644 --- a/src/libexpr/flake/lockfile.cc +++ b/src/libexpr/flake/lockfile.cc @@ -1,6 +1,8 @@ #include "lockfile.hh" #include "store-api.hh" +#include + namespace nix::flake { LockedInput::LockedInput(const nlohmann::json & json) diff --git a/src/libexpr/flake/lockfile.hh b/src/libexpr/flake/lockfile.hh index ab81eac8b..757c37989 100644 --- a/src/libexpr/flake/lockfile.hh +++ b/src/libexpr/flake/lockfile.hh @@ -2,7 +2,7 @@ #include "flakeref.hh" -#include +#include namespace nix { class Store; diff --git a/src/libstore/build.cc b/src/libstore/build.cc index 21b641f2c..8e795e555 100644 --- a/src/libstore/build.cc +++ b/src/libstore/build.cc @@ -2521,7 +2521,7 @@ static std::regex shVarName("[A-Za-z_][A-Za-z0-9_]*"); void DerivationGoal::writeStructuredAttrs() { - auto & structuredAttrs = parsedDrv->getStructuredAttrs(); + auto structuredAttrs = parsedDrv->getStructuredAttrs(); if (!structuredAttrs) return; auto json = *structuredAttrs; diff --git a/src/libstore/parsed-derivations.cc b/src/libstore/parsed-derivations.cc index 87be8a24e..5553dd863 100644 --- a/src/libstore/parsed-derivations.cc +++ b/src/libstore/parsed-derivations.cc @@ -1,5 +1,7 @@ #include "parsed-derivations.hh" +#include + namespace nix { ParsedDerivation::ParsedDerivation(const Path & drvPath, BasicDerivation & drv) @@ -9,13 +11,15 @@ ParsedDerivation::ParsedDerivation(const Path & drvPath, BasicDerivation & drv) auto jsonAttr = drv.env.find("__json"); if (jsonAttr != drv.env.end()) { try { - structuredAttrs = nlohmann::json::parse(jsonAttr->second); + structuredAttrs = std::make_unique(nlohmann::json::parse(jsonAttr->second)); } catch (std::exception & e) { throw Error("cannot process __json attribute of '%s': %s", drvPath, e.what()); } } } +ParsedDerivation::~ParsedDerivation() { } + std::optional ParsedDerivation::getStringAttr(const std::string & name) const { if (structuredAttrs) { diff --git a/src/libstore/parsed-derivations.hh b/src/libstore/parsed-derivations.hh index 9bde4b4dc..6e67e1665 100644 --- a/src/libstore/parsed-derivations.hh +++ b/src/libstore/parsed-derivations.hh @@ -1,6 +1,6 @@ #include "derivations.hh" -#include +#include namespace nix { @@ -8,15 +8,17 @@ class ParsedDerivation { Path drvPath; BasicDerivation & drv; - std::optional structuredAttrs; + std::unique_ptr structuredAttrs; public: ParsedDerivation(const Path & drvPath, BasicDerivation & drv); - const std::optional & getStructuredAttrs() const + ~ParsedDerivation(); + + const nlohmann::json * getStructuredAttrs() const { - return structuredAttrs; + return structuredAttrs.get(); } std::optional getStringAttr(const std::string & name) const; diff --git a/src/nlohmann/json_fwd.hpp b/src/nlohmann/json_fwd.hpp index ae6e4c64f..28fd10d45 100644 --- a/src/nlohmann/json_fwd.hpp +++ b/src/nlohmann/json_fwd.hpp @@ -1,10 +1,64 @@ -#pragma once +#ifndef INCLUDE_NLOHMANN_JSON_FWD_HPP_ +#define INCLUDE_NLOHMANN_JSON_FWD_HPP_ -namespace nlohmann { +#include // int64_t, uint64_t +#include // map +#include // allocator +#include // string +#include // vector -struct json : basic_json<> +/*! +@brief namespace for Niels Lohmann +@see https://github.com/nlohmann +@since version 1.0.0 +*/ +namespace nlohmann { - using basic_json<>::basic_json; -}; +/*! +@brief default JSONSerializer template argument -} +This serializer ignores the template arguments and uses ADL +([argument-dependent lookup](https://en.cppreference.com/w/cpp/language/adl)) +for serialization. +*/ +template +struct adl_serializer; + +template class ObjectType = + std::map, + template class ArrayType = std::vector, + class StringType = std::string, class BooleanType = bool, + class NumberIntegerType = std::int64_t, + class NumberUnsignedType = std::uint64_t, + class NumberFloatType = double, + template class AllocatorType = std::allocator, + template class JSONSerializer = + adl_serializer> +class basic_json; + +/*! +@brief JSON Pointer + +A JSON pointer defines a string syntax for identifying a specific value +within a JSON document. It can be used with functions `at` and +`operator[]`. Furthermore, JSON pointers are the base for JSON patches. + +@sa [RFC 6901](https://tools.ietf.org/html/rfc6901) + +@since version 2.0.0 +*/ +template +class json_pointer; + +/*! +@brief default JSON class + +This type is the default specialization of the @ref basic_json class which +uses the standard template types. + +@since version 1.0.0 +*/ +using json = basic_json<>; +} // namespace nlohmann + +#endif // INCLUDE_NLOHMANN_JSON_FWD_HPP_ From 1e23b82a5303fa9f22f8943c0c3f3a65bc5eb9a9 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Mon, 21 Oct 2019 23:14:29 +0200 Subject: [PATCH 305/634] exportGitHub(): Don't rely on the ETag from GitHub We relied on it being the Git revision, but that stopped being the case. --- src/libexpr/primops/fetchGit.cc | 22 +++++++++++++--------- 1 file changed, 13 insertions(+), 9 deletions(-) diff --git a/src/libexpr/primops/fetchGit.cc b/src/libexpr/primops/fetchGit.cc index 50277672c..e8d87ffa4 100644 --- a/src/libexpr/primops/fetchGit.cc +++ b/src/libexpr/primops/fetchGit.cc @@ -286,11 +286,21 @@ GitInfo exportGitHub( return *gitInfo; } + if (!rev) { + auto url = fmt("https://api.github.com/repos/%s/%s/commits/%s", + owner, repo, ref ? *ref : "master"); + CachedDownloadRequest request(url); + request.ttl = rev ? 1000000000 : settings.tarballTtl; + auto result = getDownloader()->downloadCached(store, request); + auto json = nlohmann::json::parse(readFile(result.path)); + rev = Hash(json["sha"], htSHA1); + } + // FIXME: use regular /archive URLs instead? api.github.com // might have stricter rate limits. auto url = fmt("https://api.github.com/repos/%s/%s/tarball/%s", - owner, repo, rev ? rev->to_string(Base16, false) : ref ? *ref : "master"); + owner, repo, rev->to_string(Base16, false)); std::string accessToken = settings.githubAccessToken.get(); if (accessToken != "") @@ -299,21 +309,15 @@ GitInfo exportGitHub( CachedDownloadRequest request(url); request.unpack = true; request.name = "source"; - request.ttl = rev ? 1000000000 : settings.tarballTtl; + request.ttl = 1000000000; request.getLastModified = true; auto result = getDownloader()->downloadCached(store, request); - if (!result.etag) - throw Error("did not receive an ETag header from '%s'", url); - - if (result.etag->size() != 42 || (*result.etag)[0] != '"' || (*result.etag)[41] != '"') - throw Error("ETag header '%s' from '%s' is not a Git revision", *result.etag, url); - assert(result.lastModified); GitInfo gitInfo; gitInfo.storePath = result.storePath; - gitInfo.rev = Hash(std::string(*result.etag, 1, result.etag->size() - 2), htSHA1); + gitInfo.rev = *rev; gitInfo.lastModified = *result.lastModified; // FIXME: this can overwrite a cache file that contains a revCount. From 91a88f3acba8978d17ca40fd64e381514a51244d Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Mon, 21 Oct 2019 23:38:07 +0200 Subject: [PATCH 306/634] Fix "nixpkgs." compatibility --- src/nix/installables.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/nix/installables.cc b/src/nix/installables.cc index bc8fbeb8f..38f37adb1 100644 --- a/src/nix/installables.cc +++ b/src/nix/installables.cc @@ -409,7 +409,7 @@ std::vector> SourceExprCommand::parseInstallables( bool static warned; warnOnce(warned, "the syntax 'nixpkgs.' is deprecated; use 'nixpkgs:' instead"); result.push_back(std::make_shared(*this, FlakeRef("nixpkgs"), - Strings{"legacyPackages." + std::string(s, 8)})); + Strings{"legacyPackages." + settings.thisSystem.get() + "." + std::string(s, 8)})); } else if ((hash = s.rfind('#')) != std::string::npos) From ce279209363cb6a9cbbe68a13fab9d8550b721f3 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Tue, 22 Oct 2019 00:21:58 +0200 Subject: [PATCH 307/634] Add start of 'nix profile' command --- src/nix/command.cc | 5 + src/nix/command.hh | 5 + src/nix/flake.cc | 1 + src/nix/installables.cc | 11 +- src/nix/installables.hh | 3 + src/nix/profile.cc | 234 ++++++++++++++++++++++++++++++++++++++++ 6 files changed, 256 insertions(+), 3 deletions(-) create mode 100644 src/nix/profile.cc diff --git a/src/nix/command.cc b/src/nix/command.cc index 57f3754cc..1cb4cc92a 100644 --- a/src/nix/command.cc +++ b/src/nix/command.cc @@ -123,4 +123,9 @@ void MixProfile::updateProfile(const Buildables & buildables) updateProfile(*result); } +MixDefaultProfile::MixDefaultProfile() +{ + profile = getDefaultProfile(); +} + } diff --git a/src/nix/command.hh b/src/nix/command.hh index 546c27a71..ef29381cf 100644 --- a/src/nix/command.hh +++ b/src/nix/command.hh @@ -188,4 +188,9 @@ struct MixProfile : virtual Args, virtual StoreCommand void updateProfile(const Buildables & buildables); }; +struct MixDefaultProfile : MixProfile +{ + MixDefaultProfile(); +}; + } diff --git a/src/nix/flake.cc b/src/nix/flake.cc index d928af3b9..6e7c5e2eb 100644 --- a/src/nix/flake.cc +++ b/src/nix/flake.cc @@ -653,6 +653,7 @@ struct CmdFlake : virtual MultiCommand, virtual Command { if (!command) throw UsageError("'nix flake' requires a sub-command."); + command->prepare(); command->run(); } diff --git a/src/nix/installables.cc b/src/nix/installables.cc index 38f37adb1..671cf513a 100644 --- a/src/nix/installables.cc +++ b/src/nix/installables.cc @@ -294,7 +294,7 @@ Value * InstallableFlake::getFlakeOutputs(EvalState & state, const flake::Resolv return (*aOutputs)->value; } -std::vector InstallableFlake::toDerivations() +std::tuple InstallableFlake::toDerivation() { auto state = cmd.getEvalState(); @@ -312,7 +312,7 @@ std::vector InstallableFlake::toDerivations() auto drv = evalCache.getDerivation(fingerprint, attrPath); if (drv) { if (state->store->isValidPath(drv->drvPath)) - return {*drv}; + return {attrPath, resFlake.flake.sourceInfo.resolvedRef, *drv}; } if (!vOutputs) @@ -334,7 +334,7 @@ std::vector InstallableFlake::toDerivations() evalCache.addDerivation(fingerprint, attrPath, drv); - return {drv}; + return {attrPath, resFlake.flake.sourceInfo.resolvedRef, drv}; } catch (AttrPathNotFound & e) { } } @@ -343,6 +343,11 @@ std::vector InstallableFlake::toDerivations() flakeRef, concatStringsSep(", ", quoteStrings(attrPaths))); } +std::vector InstallableFlake::toDerivations() +{ + return {std::get<2>(toDerivation())}; +} + Value * InstallableFlake::toValue(EvalState & state) { auto resFlake = resolveFlake(state, flakeRef, cmd.getLockFileMode()); diff --git a/src/nix/installables.hh b/src/nix/installables.hh index a635cb96f..9388c673e 100644 --- a/src/nix/installables.hh +++ b/src/nix/installables.hh @@ -8,6 +8,7 @@ namespace nix { struct Value; +struct DrvInfo; class EvalState; class SourceExprCommand; @@ -89,6 +90,8 @@ struct InstallableFlake : InstallableValue Value * getFlakeOutputs(EvalState & state, const flake::ResolvedFlake & resFlake); + std::tuple toDerivation(); + std::vector toDerivations() override; Value * toValue(EvalState & state) override; diff --git a/src/nix/profile.cc b/src/nix/profile.cc new file mode 100644 index 000000000..bc5c3870e --- /dev/null +++ b/src/nix/profile.cc @@ -0,0 +1,234 @@ +#include "command.hh" +#include "common-args.hh" +#include "shared.hh" +#include "store-api.hh" +#include "derivations.hh" +#include "archive.hh" +#include "builtins/buildenv.hh" +#include "flake/flakeref.hh" + +#include + +using namespace nix; + +struct ProfileElementSource +{ + FlakeRef originalRef; + FlakeRef resolvedRef; + std::string attrPath; + // FIXME: output names +}; + +struct ProfileElement +{ + PathSet storePaths; + std::optional source; + bool active = true; + // FIXME: priority +}; + +struct ProfileManifest +{ + std::vector elements; + + ProfileManifest(const Path & profile) + { + auto manifestPath = profile + "/manifest.json"; + + if (pathExists(manifestPath)) { + auto json = nlohmann::json::parse(readFile(manifestPath)); + + auto version = json.value("version", 0); + if (version != 1) + throw Error("profile manifest '%s' has unsupported version %d", manifestPath, version); + + for (auto & e : json["elements"]) { + ProfileElement element; + for (auto & p : e["storePaths"]) + element.storePaths.insert((std::string) p); + element.active = e["active"]; + if (e.value("uri", "") != "") { + element.source = ProfileElementSource{ + FlakeRef(e["originalUri"]), + FlakeRef(e["uri"]), + e["attrPath"] + }; + } + elements.emplace_back(std::move(element)); + } + } + } + + std::string toJSON() const + { + auto array = nlohmann::json::array(); + for (auto & element : elements) { + auto paths = nlohmann::json::array(); + for (auto & path : element.storePaths) + paths.push_back(path); + nlohmann::json obj; + obj["storePaths"] = paths; + obj["active"] = element.active; + if (element.source) { + obj["originalUri"] = element.source->originalRef.to_string(); + obj["uri"] = element.source->resolvedRef.to_string(); + obj["attrPath"] = element.source->attrPath; + } + array.push_back(obj); + } + nlohmann::json json; + json["version"] = 1; + json["elements"] = array; + return json.dump(); + } + + Path build(ref store) + { + auto tempDir = createTempDir(); + + ValidPathInfo info; + + Packages pkgs; + for (auto & element : elements) { + for (auto & path : element.storePaths) { + if (element.active) + pkgs.emplace_back(path, true, 5); + info.references.insert(path); + } + } + + buildProfile(tempDir, std::move(pkgs)); + + writeFile(tempDir + "/manifest.json", toJSON()); + + /* Add the symlink tree to the store. */ + StringSink sink; + dumpPath(tempDir, sink); + + info.narHash = hashString(htSHA256, *sink.s); + info.narSize = sink.s->size(); + info.path = store->makeFixedOutputPath(true, info.narHash, "profile", info.references); + info.ca = makeFixedOutputCA(true, info.narHash); + + store->addToStore(info, sink.s); + + return info.path; + } +}; + +struct CmdProfileInstall : InstallablesCommand, MixDefaultProfile +{ + std::string description() override + { + return "install a package into a profile"; + } + + Examples examples() override + { + return { + Example{ + "To install a package from Nixpkgs:", + "nix profile install nixpkgs#hello" + }, + Example{ + "To install a package from a specific branch of Nixpkgs:", + "nix profile install nixpkgs/release-19.09#hello" + }, + Example{ + "To install a package from a specific revision of Nixpkgs:", + "nix profile install nixpkgs/1028bb33859f8dfad7f98e1c8d185f3d1aaa7340#hello" + }, + }; + } + + void run(ref store) override + { + ProfileManifest manifest(*profile); + + PathSet pathsToBuild; + + for (auto & installable : installables) { + if (auto installable2 = std::dynamic_pointer_cast(installable)) { + auto [attrPath, resolvedRef, drv] = installable2->toDerivation(); + + ProfileElement element; + element.storePaths = {drv.outPath}; // FIXME + element.source = ProfileElementSource{ + installable2->flakeRef, + resolvedRef, + attrPath, + }; + + pathsToBuild.insert(makeDrvPathWithOutputs(drv.drvPath, {"out"})); // FIXME + + manifest.elements.emplace_back(std::move(element)); + } else + throw Error("'nix profile install' does not support argument '%s'", installable->what()); + } + + store->buildPaths(pathsToBuild); + + updateProfile(manifest.build(store)); + } +}; + +struct CmdProfileInfo : virtual StoreCommand, MixDefaultProfile +{ + std::string description() override + { + return "info"; + } + + Examples examples() override + { + return { + Example{ + "To show what packages are installed in the default profile:", + "nix profile info" + }, + }; + } + + void run(ref store) override + { + ProfileManifest manifest(*profile); + + for (auto & element : manifest.elements) { + std::cout << fmt("%s %s\n", + element.source ? element.source->originalRef.to_string() + "#" + element.source->attrPath : "-", + element.source ? element.source->resolvedRef.to_string() + "#" + element.source->attrPath : "-", + concatStringsSep(" ", element.storePaths)); + } + } +}; + +struct CmdProfile : virtual MultiCommand, virtual Command +{ + CmdProfile() + : MultiCommand({ + {"install", []() { return make_ref(); }}, + {"info", []() { return make_ref(); }}, + }) + { } + + std::string description() override + { + return "manage Nix profiles"; + } + + void run() override + { + if (!command) + throw UsageError("'nix profile' requires a sub-command."); + command->prepare(); + command->run(); + } + + void printHelp(const string & programName, std::ostream & out) override + { + MultiCommand::printHelp(programName, out); + } +}; + +static auto r1 = registerCommand("profile"); + From 555ca59f2b34bb8f3e738789e9548895766609cf Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Tue, 22 Oct 2019 00:28:16 +0200 Subject: [PATCH 308/634] nix profile info: Index elements --- src/nix/profile.cc | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/src/nix/profile.cc b/src/nix/profile.cc index bc5c3870e..2303900c0 100644 --- a/src/nix/profile.cc +++ b/src/nix/profile.cc @@ -14,6 +14,7 @@ using namespace nix; struct ProfileElementSource { FlakeRef originalRef; + // FIXME: record original attrpath. FlakeRef resolvedRef; std::string attrPath; // FIXME: output names @@ -193,8 +194,9 @@ struct CmdProfileInfo : virtual StoreCommand, MixDefaultProfile { ProfileManifest manifest(*profile); - for (auto & element : manifest.elements) { - std::cout << fmt("%s %s\n", + for (size_t i = 0; i < manifest.elements.size(); ++i) { + auto & element(manifest.elements[i]); + std::cout << fmt("%d %s %s\n", i, element.source ? element.source->originalRef.to_string() + "#" + element.source->attrPath : "-", element.source ? element.source->resolvedRef.to_string() + "#" + element.source->attrPath : "-", concatStringsSep(" ", element.storePaths)); From e30a0155d47a2e8a2eb9d0801b8b1602f71c5fd7 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Tue, 22 Oct 2019 13:06:32 +0200 Subject: [PATCH 309/634] Add "nix profile remove" command --- src/libstore/builtins/buildenv.cc | 2 +- src/nix/profile.cc | 109 +++++++++++++++++++++++++++++- 2 files changed, 108 insertions(+), 3 deletions(-) diff --git a/src/libstore/builtins/buildenv.cc b/src/libstore/builtins/buildenv.cc index c1c85d0bf..1b802d908 100644 --- a/src/libstore/builtins/buildenv.cc +++ b/src/libstore/builtins/buildenv.cc @@ -156,7 +156,7 @@ void buildProfile(const Path & out, Packages && pkgs) addPkg(pkgDir, priorityCounter++); } - printError("created %d symlinks in user environment", state.symlinks); + debug("created %d symlinks in user environment", state.symlinks); } void builtinBuildenv(const BasicDerivation & drv) diff --git a/src/nix/profile.cc b/src/nix/profile.cc index 2303900c0..c9a6a5355 100644 --- a/src/nix/profile.cc +++ b/src/nix/profile.cc @@ -8,6 +8,7 @@ #include "flake/flakeref.hh" #include +#include using namespace nix; @@ -32,6 +33,8 @@ struct ProfileManifest { std::vector elements; + ProfileManifest() { } + ProfileManifest(const Path & profile) { auto manifestPath = profile + "/manifest.json"; @@ -173,11 +176,112 @@ struct CmdProfileInstall : InstallablesCommand, MixDefaultProfile } }; +class MixProfileElementMatchers : virtual Args +{ + std::vector _matchers; + +public: + + MixProfileElementMatchers() + { + expectArgs("elements", &_matchers); + } + + typedef std::variant Matcher; + + std::vector getMatchers(ref store) + { + std::vector res; + + for (auto & s : _matchers) { + size_t n; + if (string2Int(s, n)) + res.push_back(n); + else if (store->isStorePath(s)) + res.push_back(s); + else + res.push_back(std::regex(s, std::regex::extended | std::regex::icase)); + } + + return res; + } + + bool matches(const ProfileElement & element, size_t pos, std::vector matchers) + { + for (auto & matcher : matchers) { + if (auto n = std::get_if(&matcher)) { + if (*n == pos) return true; + } else if (auto path = std::get_if(&matcher)) { + if (element.storePaths.count(*path)) return true; + } else if (auto regex = std::get_if(&matcher)) { + if (element.source + && std::regex_match(element.source->attrPath, *regex)) + return true; + } + } + + return false; + } +}; + +struct CmdProfileRemove : virtual StoreCommand, MixDefaultProfile, MixProfileElementMatchers +{ + std::string description() override + { + return "remove packages from a profile"; + } + + Examples examples() override + { + return { + Example{ + "To remove a package by attribute path:", + "nix profile remove packages.x86_64-linux.hello" + }, + Example{ + "To remove all package:", + "nix profile remove '.*'" + }, + Example{ + "To remove a package by store path:", + "nix profile remove /nix/store/rr3y0c6zyk7kjjl8y19s4lsrhn4aiq1z-hello-2.10" + }, + Example{ + "To remove a package by position:", + "nix profile remove 3" + }, + }; + } + + void run(ref store) override + { + ProfileManifest oldManifest(*profile); + + auto matchers = getMatchers(store); + + ProfileManifest newManifest; + + for (size_t i = 0; i < oldManifest.elements.size(); ++i) { + auto & element(oldManifest.elements[i]); + if (!matches(element, i, matchers)) + newManifest.elements.push_back(element); + } + + // FIXME: warn about unused matchers? + + printInfo("removed %d packages, kept %d packages", + oldManifest.elements.size() - newManifest.elements.size(), + newManifest.elements.size()); + + updateProfile(newManifest.build(store)); + } +}; + struct CmdProfileInfo : virtual StoreCommand, MixDefaultProfile { std::string description() override { - return "info"; + return "list installed packages"; } Examples examples() override @@ -196,7 +300,7 @@ struct CmdProfileInfo : virtual StoreCommand, MixDefaultProfile for (size_t i = 0; i < manifest.elements.size(); ++i) { auto & element(manifest.elements[i]); - std::cout << fmt("%d %s %s\n", i, + std::cout << fmt("%d %s %s %s\n", i, element.source ? element.source->originalRef.to_string() + "#" + element.source->attrPath : "-", element.source ? element.source->resolvedRef.to_string() + "#" + element.source->attrPath : "-", concatStringsSep(" ", element.storePaths)); @@ -209,6 +313,7 @@ struct CmdProfile : virtual MultiCommand, virtual Command CmdProfile() : MultiCommand({ {"install", []() { return make_ref(); }}, + {"remove", []() { return make_ref(); }}, {"info", []() { return make_ref(); }}, }) { } From af786432c53a0eb4f7a0aea2d0faf5f0655cbb05 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Tue, 22 Oct 2019 14:44:51 +0200 Subject: [PATCH 310/634] Add "nix profile upgrade" command --- src/nix/profile.cc | 68 +++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 67 insertions(+), 1 deletion(-) diff --git a/src/nix/profile.cc b/src/nix/profile.cc index c9a6a5355..8d387ef2e 100644 --- a/src/nix/profile.cc +++ b/src/nix/profile.cc @@ -239,7 +239,7 @@ struct CmdProfileRemove : virtual StoreCommand, MixDefaultProfile, MixProfileEle "nix profile remove packages.x86_64-linux.hello" }, Example{ - "To remove all package:", + "To remove all packages:", "nix profile remove '.*'" }, Example{ @@ -277,6 +277,71 @@ struct CmdProfileRemove : virtual StoreCommand, MixDefaultProfile, MixProfileEle } }; +struct CmdProfileUpgrade : virtual SourceExprCommand, MixDefaultProfile, MixProfileElementMatchers +{ + std::string description() override + { + return "upgrade packages using their most recent flake"; + } + + Examples examples() override + { + return { + Example{ + "To upgrade all packages that were installed using a mutable flake reference:", + "nix profile upgrade '.*'" + }, + Example{ + "To upgrade a specific package:", + "nix profile upgrade packages.x86_64-linux.hello" + }, + }; + } + + void run(ref store) override + { + ProfileManifest manifest(*profile); + + auto matchers = getMatchers(store); + + // FIXME: code duplication + PathSet pathsToBuild; + + for (size_t i = 0; i < manifest.elements.size(); ++i) { + auto & element(manifest.elements[i]); + if (element.source + && !element.source->originalRef.isImmutable() + && matches(element, i, matchers)) + { + Activity act(*logger, lvlChatty, actUnknown, + fmt("checking '%s' for updates", element.source->attrPath)); + + InstallableFlake installable(*this, FlakeRef(element.source->originalRef), {element.source->attrPath}); + + auto [attrPath, resolvedRef, drv] = installable.toDerivation(); + + if (element.source->resolvedRef == resolvedRef) continue; + + printInfo("upgrading '%s' from flake '%s' to '%s'", + element.source->attrPath, element.source->resolvedRef, resolvedRef); + + element.storePaths = {drv.outPath}; // FIXME + element.source = ProfileElementSource{ + installable.flakeRef, + resolvedRef, + attrPath, + }; + + pathsToBuild.insert(makeDrvPathWithOutputs(drv.drvPath, {"out"})); // FIXME + } + } + + store->buildPaths(pathsToBuild); + + updateProfile(manifest.build(store)); + } +}; + struct CmdProfileInfo : virtual StoreCommand, MixDefaultProfile { std::string description() override @@ -314,6 +379,7 @@ struct CmdProfile : virtual MultiCommand, virtual Command : MultiCommand({ {"install", []() { return make_ref(); }}, {"remove", []() { return make_ref(); }}, + {"upgrade", []() { return make_ref(); }}, {"info", []() { return make_ref(); }}, }) { } From ac9b427541cdc36fb696d6fe413872e12151ec3d Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Tue, 22 Oct 2019 15:16:57 +0200 Subject: [PATCH 311/634] Convert old-style profile manifest --- src/nix/profile.cc | 31 ++++++++++++++++++++++++------- 1 file changed, 24 insertions(+), 7 deletions(-) diff --git a/src/nix/profile.cc b/src/nix/profile.cc index 8d387ef2e..786ebddef 100644 --- a/src/nix/profile.cc +++ b/src/nix/profile.cc @@ -6,6 +6,7 @@ #include "archive.hh" #include "builtins/buildenv.hh" #include "flake/flakeref.hh" +#include "nix-env/user-env.hh" #include #include @@ -35,7 +36,7 @@ struct ProfileManifest ProfileManifest() { } - ProfileManifest(const Path & profile) + ProfileManifest(EvalState & state, const Path & profile) { auto manifestPath = profile + "/manifest.json"; @@ -61,6 +62,22 @@ struct ProfileManifest elements.emplace_back(std::move(element)); } } + + else if (pathExists(profile + "/manifest.nix")) { + // FIXME: needed because of pure mode; ugly. + if (state.allowedPaths) { + state.allowedPaths->insert(state.store->followLinksToStore(profile)); + state.allowedPaths->insert(state.store->followLinksToStore(profile + "/manifest.nix")); + } + + auto drvInfos = queryInstalled(state, state.store->followLinksToStore(profile)); + + for (auto & drvInfo : drvInfos) { + ProfileElement element; + element.storePaths = {drvInfo.queryOutPath()}; + elements.emplace_back(std::move(element)); + } + } } std::string toJSON() const @@ -147,7 +164,7 @@ struct CmdProfileInstall : InstallablesCommand, MixDefaultProfile void run(ref store) override { - ProfileManifest manifest(*profile); + ProfileManifest manifest(*getEvalState(), *profile); PathSet pathsToBuild; @@ -224,7 +241,7 @@ public: } }; -struct CmdProfileRemove : virtual StoreCommand, MixDefaultProfile, MixProfileElementMatchers +struct CmdProfileRemove : virtual EvalCommand, MixDefaultProfile, MixProfileElementMatchers { std::string description() override { @@ -255,7 +272,7 @@ struct CmdProfileRemove : virtual StoreCommand, MixDefaultProfile, MixProfileEle void run(ref store) override { - ProfileManifest oldManifest(*profile); + ProfileManifest oldManifest(*getEvalState(), *profile); auto matchers = getMatchers(store); @@ -300,7 +317,7 @@ struct CmdProfileUpgrade : virtual SourceExprCommand, MixDefaultProfile, MixProf void run(ref store) override { - ProfileManifest manifest(*profile); + ProfileManifest manifest(*getEvalState(), *profile); auto matchers = getMatchers(store); @@ -342,7 +359,7 @@ struct CmdProfileUpgrade : virtual SourceExprCommand, MixDefaultProfile, MixProf } }; -struct CmdProfileInfo : virtual StoreCommand, MixDefaultProfile +struct CmdProfileInfo : virtual EvalCommand, virtual StoreCommand, MixDefaultProfile { std::string description() override { @@ -361,7 +378,7 @@ struct CmdProfileInfo : virtual StoreCommand, MixDefaultProfile void run(ref store) override { - ProfileManifest manifest(*profile); + ProfileManifest manifest(*getEvalState(), *profile); for (size_t i = 0; i < manifest.elements.size(); ++i) { auto & element(manifest.elements[i]); From 9cac895406724e0304dff140379783c4d786e855 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Tue, 22 Oct 2019 20:12:23 +0200 Subject: [PATCH 312/634] Use upstream nlohmann_json --- flake.lock | 6 +- flake.nix | 5 +- src/nlohmann/json.hpp | 20842 ------------------------------------ src/nlohmann/json_fwd.hpp | 64 - 4 files changed, 6 insertions(+), 20911 deletions(-) delete mode 100644 src/nlohmann/json.hpp delete mode 100644 src/nlohmann/json_fwd.hpp diff --git a/flake.lock b/flake.lock index 05e3a6e25..10c9921bf 100644 --- a/flake.lock +++ b/flake.lock @@ -2,9 +2,9 @@ "inputs": { "nixpkgs": { "inputs": {}, - "narHash": "sha256-ltGlDPfwicH/u4orj1n4JXgRsA+jvKQsGnekObi0TV4=", - "originalUri": "nixpkgs/release-19.03", - "uri": "github:edolstra/nixpkgs/9a593b575e4044f9aff939b512e7cb1cf1e76a65" + "narHash": "sha256-D1YSQzR6Xo2cNZ4sf+WCak2fqE/CIbwpZbPKd2B2Syc=", + "originalUrl": "nixpkgs/release-19.09", + "url": "github:edolstra/nixpkgs/e3d44c4fc62c99b850c792429cb61b946b66a704" } }, "version": 3 diff --git a/flake.nix b/flake.nix index 2f0073cc8..a16ef4656 100644 --- a/flake.nix +++ b/flake.nix @@ -3,7 +3,7 @@ edition = 201909; - inputs.nixpkgs.uri = "nixpkgs/release-19.03"; + inputs.nixpkgs.uri = "nixpkgs/release-19.09"; outputs = { self, nixpkgs }: @@ -73,6 +73,7 @@ bzip2 xz brotli editline openssl pkgconfig sqlite boehmgc boost + nlohmann_json # Tests git @@ -313,7 +314,7 @@ doInstallCheck = true; - lcovFilter = [ "*/boost/*" "*-tab.*" "*/nlohmann/*" "*/linenoise/*" ]; + lcovFilter = [ "*/boost/*" "*-tab.*" "*/linenoise/*" ]; # We call `dot', and even though we just use it to # syntax-check generated dot files, it still requires some diff --git a/src/nlohmann/json.hpp b/src/nlohmann/json.hpp deleted file mode 100644 index 5003a4fa2..000000000 --- a/src/nlohmann/json.hpp +++ /dev/null @@ -1,20842 +0,0 @@ -/* - __ _____ _____ _____ - __| | __| | | | JSON for Modern C++ -| | |__ | | | | | | version 3.6.1 -|_____|_____|_____|_|___| https://github.com/nlohmann/json - -Licensed under the MIT License . -SPDX-License-Identifier: MIT -Copyright (c) 2013-2019 Niels Lohmann . - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. -*/ - -#ifndef INCLUDE_NLOHMANN_JSON_HPP_ -#define INCLUDE_NLOHMANN_JSON_HPP_ - -#define NLOHMANN_JSON_VERSION_MAJOR 3 -#define NLOHMANN_JSON_VERSION_MINOR 6 -#define NLOHMANN_JSON_VERSION_PATCH 1 - -#include // all_of, find, for_each -#include // assert -#include // and, not, or -#include // nullptr_t, ptrdiff_t, size_t -#include // hash, less -#include // initializer_list -#include // istream, ostream -#include // random_access_iterator_tag -#include // unique_ptr -#include // accumulate -#include // string, stoi, to_string -#include // declval, forward, move, pair, swap -#include // vector - -// #include - - -#include - -// #include - - -#include // transform -#include // array -#include // and, not -#include // forward_list -#include // inserter, front_inserter, end -#include // map -#include // string -#include // tuple, make_tuple -#include // is_arithmetic, is_same, is_enum, underlying_type, is_convertible -#include // unordered_map -#include // pair, declval -#include // valarray - -// #include - - -#include // exception -#include // runtime_error -#include // to_string - -// #include - - -#include // size_t - -namespace nlohmann -{ -namespace detail -{ -/// struct to capture the start position of the current token -struct position_t -{ - /// the total number of characters read - std::size_t chars_read_total = 0; - /// the number of characters read in the current line - std::size_t chars_read_current_line = 0; - /// the number of lines read - std::size_t lines_read = 0; - - /// conversion to size_t to preserve SAX interface - constexpr operator size_t() const - { - return chars_read_total; - } -}; - -} // namespace detail -} // namespace nlohmann - - -namespace nlohmann -{ -namespace detail -{ -//////////////// -// exceptions // -//////////////// - -/*! -@brief general exception of the @ref basic_json class - -This class is an extension of `std::exception` objects with a member @a id for -exception ids. It is used as the base class for all exceptions thrown by the -@ref basic_json class. This class can hence be used as "wildcard" to catch -exceptions. - -Subclasses: -- @ref parse_error for exceptions indicating a parse error -- @ref invalid_iterator for exceptions indicating errors with iterators -- @ref type_error for exceptions indicating executing a member function with - a wrong type -- @ref out_of_range for exceptions indicating access out of the defined range -- @ref other_error for exceptions indicating other library errors - -@internal -@note To have nothrow-copy-constructible exceptions, we internally use - `std::runtime_error` which can cope with arbitrary-length error messages. - Intermediate strings are built with static functions and then passed to - the actual constructor. -@endinternal - -@liveexample{The following code shows how arbitrary library exceptions can be -caught.,exception} - -@since version 3.0.0 -*/ -class exception : public std::exception -{ - public: - /// returns the explanatory string - const char* what() const noexcept override - { - return m.what(); - } - - /// the id of the exception - const int id; - - protected: - exception(int id_, const char* what_arg) : id(id_), m(what_arg) {} - - static std::string name(const std::string& ename, int id_) - { - return "[json.exception." + ename + "." + std::to_string(id_) + "] "; - } - - private: - /// an exception object as storage for error messages - std::runtime_error m; -}; - -/*! -@brief exception indicating a parse error - -This exception is thrown by the library when a parse error occurs. Parse errors -can occur during the deserialization of JSON text, CBOR, MessagePack, as well -as when using JSON Patch. - -Member @a byte holds the byte index of the last read character in the input -file. - -Exceptions have ids 1xx. - -name / id | example message | description ------------------------------- | --------------- | ------------------------- -json.exception.parse_error.101 | parse error at 2: unexpected end of input; expected string literal | This error indicates a syntax error while deserializing a JSON text. The error message describes that an unexpected token (character) was encountered, and the member @a byte indicates the error position. -json.exception.parse_error.102 | parse error at 14: missing or wrong low surrogate | JSON uses the `\uxxxx` format to describe Unicode characters. Code points above above 0xFFFF are split into two `\uxxxx` entries ("surrogate pairs"). This error indicates that the surrogate pair is incomplete or contains an invalid code point. -json.exception.parse_error.103 | parse error: code points above 0x10FFFF are invalid | Unicode supports code points up to 0x10FFFF. Code points above 0x10FFFF are invalid. -json.exception.parse_error.104 | parse error: JSON patch must be an array of objects | [RFC 6902](https://tools.ietf.org/html/rfc6902) requires a JSON Patch document to be a JSON document that represents an array of objects. -json.exception.parse_error.105 | parse error: operation must have string member 'op' | An operation of a JSON Patch document must contain exactly one "op" member, whose value indicates the operation to perform. Its value must be one of "add", "remove", "replace", "move", "copy", or "test"; other values are errors. -json.exception.parse_error.106 | parse error: array index '01' must not begin with '0' | An array index in a JSON Pointer ([RFC 6901](https://tools.ietf.org/html/rfc6901)) may be `0` or any number without a leading `0`. -json.exception.parse_error.107 | parse error: JSON pointer must be empty or begin with '/' - was: 'foo' | A JSON Pointer must be a Unicode string containing a sequence of zero or more reference tokens, each prefixed by a `/` character. -json.exception.parse_error.108 | parse error: escape character '~' must be followed with '0' or '1' | In a JSON Pointer, only `~0` and `~1` are valid escape sequences. -json.exception.parse_error.109 | parse error: array index 'one' is not a number | A JSON Pointer array index must be a number. -json.exception.parse_error.110 | parse error at 1: cannot read 2 bytes from vector | When parsing CBOR or MessagePack, the byte vector ends before the complete value has been read. -json.exception.parse_error.112 | parse error at 1: error reading CBOR; last byte: 0xF8 | Not all types of CBOR or MessagePack are supported. This exception occurs if an unsupported byte was read. -json.exception.parse_error.113 | parse error at 2: expected a CBOR string; last byte: 0x98 | While parsing a map key, a value that is not a string has been read. -json.exception.parse_error.114 | parse error: Unsupported BSON record type 0x0F | The parsing of the corresponding BSON record type is not implemented (yet). - -@note For an input with n bytes, 1 is the index of the first character and n+1 - is the index of the terminating null byte or the end of file. This also - holds true when reading a byte vector (CBOR or MessagePack). - -@liveexample{The following code shows how a `parse_error` exception can be -caught.,parse_error} - -@sa - @ref exception for the base class of the library exceptions -@sa - @ref invalid_iterator for exceptions indicating errors with iterators -@sa - @ref type_error for exceptions indicating executing a member function with - a wrong type -@sa - @ref out_of_range for exceptions indicating access out of the defined range -@sa - @ref other_error for exceptions indicating other library errors - -@since version 3.0.0 -*/ -class parse_error : public exception -{ - public: - /*! - @brief create a parse error exception - @param[in] id_ the id of the exception - @param[in] pos the position where the error occurred (or with - chars_read_total=0 if the position cannot be - determined) - @param[in] what_arg the explanatory string - @return parse_error object - */ - static parse_error create(int id_, const position_t& pos, const std::string& what_arg) - { - std::string w = exception::name("parse_error", id_) + "parse error" + - position_string(pos) + ": " + what_arg; - return parse_error(id_, pos.chars_read_total, w.c_str()); - } - - static parse_error create(int id_, std::size_t byte_, const std::string& what_arg) - { - std::string w = exception::name("parse_error", id_) + "parse error" + - (byte_ != 0 ? (" at byte " + std::to_string(byte_)) : "") + - ": " + what_arg; - return parse_error(id_, byte_, w.c_str()); - } - - /*! - @brief byte index of the parse error - - The byte index of the last read character in the input file. - - @note For an input with n bytes, 1 is the index of the first character and - n+1 is the index of the terminating null byte or the end of file. - This also holds true when reading a byte vector (CBOR or MessagePack). - */ - const std::size_t byte; - - private: - parse_error(int id_, std::size_t byte_, const char* what_arg) - : exception(id_, what_arg), byte(byte_) {} - - static std::string position_string(const position_t& pos) - { - return " at line " + std::to_string(pos.lines_read + 1) + - ", column " + std::to_string(pos.chars_read_current_line); - } -}; - -/*! -@brief exception indicating errors with iterators - -This exception is thrown if iterators passed to a library function do not match -the expected semantics. - -Exceptions have ids 2xx. - -name / id | example message | description ------------------------------------ | --------------- | ------------------------- -json.exception.invalid_iterator.201 | iterators are not compatible | The iterators passed to constructor @ref basic_json(InputIT first, InputIT last) are not compatible, meaning they do not belong to the same container. Therefore, the range (@a first, @a last) is invalid. -json.exception.invalid_iterator.202 | iterator does not fit current value | In an erase or insert function, the passed iterator @a pos does not belong to the JSON value for which the function was called. It hence does not define a valid position for the deletion/insertion. -json.exception.invalid_iterator.203 | iterators do not fit current value | Either iterator passed to function @ref erase(IteratorType first, IteratorType last) does not belong to the JSON value from which values shall be erased. It hence does not define a valid range to delete values from. -json.exception.invalid_iterator.204 | iterators out of range | When an iterator range for a primitive type (number, boolean, or string) is passed to a constructor or an erase function, this range has to be exactly (@ref begin(), @ref end()), because this is the only way the single stored value is expressed. All other ranges are invalid. -json.exception.invalid_iterator.205 | iterator out of range | When an iterator for a primitive type (number, boolean, or string) is passed to an erase function, the iterator has to be the @ref begin() iterator, because it is the only way to address the stored value. All other iterators are invalid. -json.exception.invalid_iterator.206 | cannot construct with iterators from null | The iterators passed to constructor @ref basic_json(InputIT first, InputIT last) belong to a JSON null value and hence to not define a valid range. -json.exception.invalid_iterator.207 | cannot use key() for non-object iterators | The key() member function can only be used on iterators belonging to a JSON object, because other types do not have a concept of a key. -json.exception.invalid_iterator.208 | cannot use operator[] for object iterators | The operator[] to specify a concrete offset cannot be used on iterators belonging to a JSON object, because JSON objects are unordered. -json.exception.invalid_iterator.209 | cannot use offsets with object iterators | The offset operators (+, -, +=, -=) cannot be used on iterators belonging to a JSON object, because JSON objects are unordered. -json.exception.invalid_iterator.210 | iterators do not fit | The iterator range passed to the insert function are not compatible, meaning they do not belong to the same container. Therefore, the range (@a first, @a last) is invalid. -json.exception.invalid_iterator.211 | passed iterators may not belong to container | The iterator range passed to the insert function must not be a subrange of the container to insert to. -json.exception.invalid_iterator.212 | cannot compare iterators of different containers | When two iterators are compared, they must belong to the same container. -json.exception.invalid_iterator.213 | cannot compare order of object iterators | The order of object iterators cannot be compared, because JSON objects are unordered. -json.exception.invalid_iterator.214 | cannot get value | Cannot get value for iterator: Either the iterator belongs to a null value or it is an iterator to a primitive type (number, boolean, or string), but the iterator is different to @ref begin(). - -@liveexample{The following code shows how an `invalid_iterator` exception can be -caught.,invalid_iterator} - -@sa - @ref exception for the base class of the library exceptions -@sa - @ref parse_error for exceptions indicating a parse error -@sa - @ref type_error for exceptions indicating executing a member function with - a wrong type -@sa - @ref out_of_range for exceptions indicating access out of the defined range -@sa - @ref other_error for exceptions indicating other library errors - -@since version 3.0.0 -*/ -class invalid_iterator : public exception -{ - public: - static invalid_iterator create(int id_, const std::string& what_arg) - { - std::string w = exception::name("invalid_iterator", id_) + what_arg; - return invalid_iterator(id_, w.c_str()); - } - - private: - invalid_iterator(int id_, const char* what_arg) - : exception(id_, what_arg) {} -}; - -/*! -@brief exception indicating executing a member function with a wrong type - -This exception is thrown in case of a type error; that is, a library function is -executed on a JSON value whose type does not match the expected semantics. - -Exceptions have ids 3xx. - -name / id | example message | description ------------------------------ | --------------- | ------------------------- -json.exception.type_error.301 | cannot create object from initializer list | To create an object from an initializer list, the initializer list must consist only of a list of pairs whose first element is a string. When this constraint is violated, an array is created instead. -json.exception.type_error.302 | type must be object, but is array | During implicit or explicit value conversion, the JSON type must be compatible to the target type. For instance, a JSON string can only be converted into string types, but not into numbers or boolean types. -json.exception.type_error.303 | incompatible ReferenceType for get_ref, actual type is object | To retrieve a reference to a value stored in a @ref basic_json object with @ref get_ref, the type of the reference must match the value type. For instance, for a JSON array, the @a ReferenceType must be @ref array_t &. -json.exception.type_error.304 | cannot use at() with string | The @ref at() member functions can only be executed for certain JSON types. -json.exception.type_error.305 | cannot use operator[] with string | The @ref operator[] member functions can only be executed for certain JSON types. -json.exception.type_error.306 | cannot use value() with string | The @ref value() member functions can only be executed for certain JSON types. -json.exception.type_error.307 | cannot use erase() with string | The @ref erase() member functions can only be executed for certain JSON types. -json.exception.type_error.308 | cannot use push_back() with string | The @ref push_back() and @ref operator+= member functions can only be executed for certain JSON types. -json.exception.type_error.309 | cannot use insert() with | The @ref insert() member functions can only be executed for certain JSON types. -json.exception.type_error.310 | cannot use swap() with number | The @ref swap() member functions can only be executed for certain JSON types. -json.exception.type_error.311 | cannot use emplace_back() with string | The @ref emplace_back() member function can only be executed for certain JSON types. -json.exception.type_error.312 | cannot use update() with string | The @ref update() member functions can only be executed for certain JSON types. -json.exception.type_error.313 | invalid value to unflatten | The @ref unflatten function converts an object whose keys are JSON Pointers back into an arbitrary nested JSON value. The JSON Pointers must not overlap, because then the resulting value would not be well defined. -json.exception.type_error.314 | only objects can be unflattened | The @ref unflatten function only works for an object whose keys are JSON Pointers. -json.exception.type_error.315 | values in object must be primitive | The @ref unflatten function only works for an object whose keys are JSON Pointers and whose values are primitive. -json.exception.type_error.316 | invalid UTF-8 byte at index 10: 0x7E | The @ref dump function only works with UTF-8 encoded strings; that is, if you assign a `std::string` to a JSON value, make sure it is UTF-8 encoded. | -json.exception.type_error.317 | JSON value cannot be serialized to requested format | The dynamic type of the object cannot be represented in the requested serialization format (e.g. a raw `true` or `null` JSON object cannot be serialized to BSON) | - -@liveexample{The following code shows how a `type_error` exception can be -caught.,type_error} - -@sa - @ref exception for the base class of the library exceptions -@sa - @ref parse_error for exceptions indicating a parse error -@sa - @ref invalid_iterator for exceptions indicating errors with iterators -@sa - @ref out_of_range for exceptions indicating access out of the defined range -@sa - @ref other_error for exceptions indicating other library errors - -@since version 3.0.0 -*/ -class type_error : public exception -{ - public: - static type_error create(int id_, const std::string& what_arg) - { - std::string w = exception::name("type_error", id_) + what_arg; - return type_error(id_, w.c_str()); - } - - private: - type_error(int id_, const char* what_arg) : exception(id_, what_arg) {} -}; - -/*! -@brief exception indicating access out of the defined range - -This exception is thrown in case a library function is called on an input -parameter that exceeds the expected range, for instance in case of array -indices or nonexisting object keys. - -Exceptions have ids 4xx. - -name / id | example message | description -------------------------------- | --------------- | ------------------------- -json.exception.out_of_range.401 | array index 3 is out of range | The provided array index @a i is larger than @a size-1. -json.exception.out_of_range.402 | array index '-' (3) is out of range | The special array index `-` in a JSON Pointer never describes a valid element of the array, but the index past the end. That is, it can only be used to add elements at this position, but not to read it. -json.exception.out_of_range.403 | key 'foo' not found | The provided key was not found in the JSON object. -json.exception.out_of_range.404 | unresolved reference token 'foo' | A reference token in a JSON Pointer could not be resolved. -json.exception.out_of_range.405 | JSON pointer has no parent | The JSON Patch operations 'remove' and 'add' can not be applied to the root element of the JSON value. -json.exception.out_of_range.406 | number overflow parsing '10E1000' | A parsed number could not be stored as without changing it to NaN or INF. -json.exception.out_of_range.407 | number overflow serializing '9223372036854775808' | UBJSON and BSON only support integer numbers up to 9223372036854775807. | -json.exception.out_of_range.408 | excessive array size: 8658170730974374167 | The size (following `#`) of an UBJSON array or object exceeds the maximal capacity. | -json.exception.out_of_range.409 | BSON key cannot contain code point U+0000 (at byte 2) | Key identifiers to be serialized to BSON cannot contain code point U+0000, since the key is stored as zero-terminated c-string | - -@liveexample{The following code shows how an `out_of_range` exception can be -caught.,out_of_range} - -@sa - @ref exception for the base class of the library exceptions -@sa - @ref parse_error for exceptions indicating a parse error -@sa - @ref invalid_iterator for exceptions indicating errors with iterators -@sa - @ref type_error for exceptions indicating executing a member function with - a wrong type -@sa - @ref other_error for exceptions indicating other library errors - -@since version 3.0.0 -*/ -class out_of_range : public exception -{ - public: - static out_of_range create(int id_, const std::string& what_arg) - { - std::string w = exception::name("out_of_range", id_) + what_arg; - return out_of_range(id_, w.c_str()); - } - - private: - out_of_range(int id_, const char* what_arg) : exception(id_, what_arg) {} -}; - -/*! -@brief exception indicating other library errors - -This exception is thrown in case of errors that cannot be classified with the -other exception types. - -Exceptions have ids 5xx. - -name / id | example message | description ------------------------------- | --------------- | ------------------------- -json.exception.other_error.501 | unsuccessful: {"op":"test","path":"/baz", "value":"bar"} | A JSON Patch operation 'test' failed. The unsuccessful operation is also printed. - -@sa - @ref exception for the base class of the library exceptions -@sa - @ref parse_error for exceptions indicating a parse error -@sa - @ref invalid_iterator for exceptions indicating errors with iterators -@sa - @ref type_error for exceptions indicating executing a member function with - a wrong type -@sa - @ref out_of_range for exceptions indicating access out of the defined range - -@liveexample{The following code shows how an `other_error` exception can be -caught.,other_error} - -@since version 3.0.0 -*/ -class other_error : public exception -{ - public: - static other_error create(int id_, const std::string& what_arg) - { - std::string w = exception::name("other_error", id_) + what_arg; - return other_error(id_, w.c_str()); - } - - private: - other_error(int id_, const char* what_arg) : exception(id_, what_arg) {} -}; -} // namespace detail -} // namespace nlohmann - -// #include - - -#include // pair - -// This file contains all internal macro definitions -// You MUST include macro_unscope.hpp at the end of json.hpp to undef all of them - -// exclude unsupported compilers -#if !defined(JSON_SKIP_UNSUPPORTED_COMPILER_CHECK) - #if defined(__clang__) - #if (__clang_major__ * 10000 + __clang_minor__ * 100 + __clang_patchlevel__) < 30400 - #error "unsupported Clang version - see https://github.com/nlohmann/json#supported-compilers" - #endif - #elif defined(__GNUC__) && !(defined(__ICC) || defined(__INTEL_COMPILER)) - #if (__GNUC__ * 10000 + __GNUC_MINOR__ * 100 + __GNUC_PATCHLEVEL__) < 40800 - #error "unsupported GCC version - see https://github.com/nlohmann/json#supported-compilers" - #endif - #endif -#endif - -// disable float-equal warnings on GCC/clang -#if defined(__clang__) || defined(__GNUC__) || defined(__GNUG__) - #pragma GCC diagnostic push - #pragma GCC diagnostic ignored "-Wfloat-equal" -#endif - -// disable documentation warnings on clang -#if defined(__clang__) - #pragma GCC diagnostic push - #pragma GCC diagnostic ignored "-Wdocumentation" -#endif - -// allow for portable deprecation warnings -#if defined(__clang__) || defined(__GNUC__) || defined(__GNUG__) - #define JSON_DEPRECATED __attribute__((deprecated)) -#elif defined(_MSC_VER) - #define JSON_DEPRECATED __declspec(deprecated) -#else - #define JSON_DEPRECATED -#endif - -// allow for portable nodiscard warnings -#if defined(__has_cpp_attribute) - #if __has_cpp_attribute(nodiscard) - #define JSON_NODISCARD [[nodiscard]] - #elif __has_cpp_attribute(gnu::warn_unused_result) - #define JSON_NODISCARD [[gnu::warn_unused_result]] - #else - #define JSON_NODISCARD - #endif -#else - #define JSON_NODISCARD -#endif - -// allow to disable exceptions -#if (defined(__cpp_exceptions) || defined(__EXCEPTIONS) || defined(_CPPUNWIND)) && !defined(JSON_NOEXCEPTION) - #define JSON_THROW(exception) throw exception - #define JSON_TRY try - #define JSON_CATCH(exception) catch(exception) - #define JSON_INTERNAL_CATCH(exception) catch(exception) -#else - #include - #define JSON_THROW(exception) std::abort() - #define JSON_TRY if(true) - #define JSON_CATCH(exception) if(false) - #define JSON_INTERNAL_CATCH(exception) if(false) -#endif - -// override exception macros -#if defined(JSON_THROW_USER) - #undef JSON_THROW - #define JSON_THROW JSON_THROW_USER -#endif -#if defined(JSON_TRY_USER) - #undef JSON_TRY - #define JSON_TRY JSON_TRY_USER -#endif -#if defined(JSON_CATCH_USER) - #undef JSON_CATCH - #define JSON_CATCH JSON_CATCH_USER - #undef JSON_INTERNAL_CATCH - #define JSON_INTERNAL_CATCH JSON_CATCH_USER -#endif -#if defined(JSON_INTERNAL_CATCH_USER) - #undef JSON_INTERNAL_CATCH - #define JSON_INTERNAL_CATCH JSON_INTERNAL_CATCH_USER -#endif - -// manual branch prediction -#if defined(__clang__) || defined(__GNUC__) || defined(__GNUG__) - #define JSON_LIKELY(x) __builtin_expect(x, 1) - #define JSON_UNLIKELY(x) __builtin_expect(x, 0) -#else - #define JSON_LIKELY(x) x - #define JSON_UNLIKELY(x) x -#endif - -// C++ language standard detection -#if (defined(__cplusplus) && __cplusplus >= 201703L) || (defined(_HAS_CXX17) && _HAS_CXX17 == 1) // fix for issue #464 - #define JSON_HAS_CPP_17 - #define JSON_HAS_CPP_14 -#elif (defined(__cplusplus) && __cplusplus >= 201402L) || (defined(_HAS_CXX14) && _HAS_CXX14 == 1) - #define JSON_HAS_CPP_14 -#endif - -/*! -@brief macro to briefly define a mapping between an enum and JSON -@def NLOHMANN_JSON_SERIALIZE_ENUM -@since version 3.4.0 -*/ -#define NLOHMANN_JSON_SERIALIZE_ENUM(ENUM_TYPE, ...) \ - template \ - inline void to_json(BasicJsonType& j, const ENUM_TYPE& e) \ - { \ - static_assert(std::is_enum::value, #ENUM_TYPE " must be an enum!"); \ - static const std::pair m[] = __VA_ARGS__; \ - auto it = std::find_if(std::begin(m), std::end(m), \ - [e](const std::pair& ej_pair) -> bool \ - { \ - return ej_pair.first == e; \ - }); \ - j = ((it != std::end(m)) ? it : std::begin(m))->second; \ - } \ - template \ - inline void from_json(const BasicJsonType& j, ENUM_TYPE& e) \ - { \ - static_assert(std::is_enum::value, #ENUM_TYPE " must be an enum!"); \ - static const std::pair m[] = __VA_ARGS__; \ - auto it = std::find_if(std::begin(m), std::end(m), \ - [j](const std::pair& ej_pair) -> bool \ - { \ - return ej_pair.second == j; \ - }); \ - e = ((it != std::end(m)) ? it : std::begin(m))->first; \ - } - -// Ugly macros to avoid uglier copy-paste when specializing basic_json. They -// may be removed in the future once the class is split. - -#define NLOHMANN_BASIC_JSON_TPL_DECLARATION \ - template class ObjectType, \ - template class ArrayType, \ - class StringType, class BooleanType, class NumberIntegerType, \ - class NumberUnsignedType, class NumberFloatType, \ - template class AllocatorType, \ - template class JSONSerializer> - -#define NLOHMANN_BASIC_JSON_TPL \ - basic_json - -// #include - - -#include // not -#include // size_t -#include // conditional, enable_if, false_type, integral_constant, is_constructible, is_integral, is_same, remove_cv, remove_reference, true_type - -namespace nlohmann -{ -namespace detail -{ -// alias templates to reduce boilerplate -template -using enable_if_t = typename std::enable_if::type; - -template -using uncvref_t = typename std::remove_cv::type>::type; - -// implementation of C++14 index_sequence and affiliates -// source: https://stackoverflow.com/a/32223343 -template -struct index_sequence -{ - using type = index_sequence; - using value_type = std::size_t; - static constexpr std::size_t size() noexcept - { - return sizeof...(Ints); - } -}; - -template -struct merge_and_renumber; - -template -struct merge_and_renumber, index_sequence> - : index_sequence < I1..., (sizeof...(I1) + I2)... > {}; - -template -struct make_index_sequence - : merge_and_renumber < typename make_index_sequence < N / 2 >::type, - typename make_index_sequence < N - N / 2 >::type > {}; - -template<> struct make_index_sequence<0> : index_sequence<> {}; -template<> struct make_index_sequence<1> : index_sequence<0> {}; - -template -using index_sequence_for = make_index_sequence; - -// dispatch utility (taken from ranges-v3) -template struct priority_tag : priority_tag < N - 1 > {}; -template<> struct priority_tag<0> {}; - -// taken from ranges-v3 -template -struct static_const -{ - static constexpr T value{}; -}; - -template -constexpr T static_const::value; -} // namespace detail -} // namespace nlohmann - -// #include - - -#include // not -#include // numeric_limits -#include // false_type, is_constructible, is_integral, is_same, true_type -#include // declval - -// #include - - -#include // random_access_iterator_tag - -// #include - - -namespace nlohmann -{ -namespace detail -{ -template struct make_void -{ - using type = void; -}; -template using void_t = typename make_void::type; -} // namespace detail -} // namespace nlohmann - -// #include - - -namespace nlohmann -{ -namespace detail -{ -template -struct iterator_types {}; - -template -struct iterator_types < - It, - void_t> -{ - using difference_type = typename It::difference_type; - using value_type = typename It::value_type; - using pointer = typename It::pointer; - using reference = typename It::reference; - using iterator_category = typename It::iterator_category; -}; - -// This is required as some compilers implement std::iterator_traits in a way that -// doesn't work with SFINAE. See https://github.com/nlohmann/json/issues/1341. -template -struct iterator_traits -{ -}; - -template -struct iterator_traits < T, enable_if_t < !std::is_pointer::value >> - : iterator_types -{ -}; - -template -struct iterator_traits::value>> -{ - using iterator_category = std::random_access_iterator_tag; - using value_type = T; - using difference_type = ptrdiff_t; - using pointer = T*; - using reference = T&; -}; -} // namespace detail -} // namespace nlohmann - -// #include - -// #include - -// #include - - -#include - -// #include - - -// http://en.cppreference.com/w/cpp/experimental/is_detected -namespace nlohmann -{ -namespace detail -{ -struct nonesuch -{ - nonesuch() = delete; - ~nonesuch() = delete; - nonesuch(nonesuch const&) = delete; - nonesuch(nonesuch const&&) = delete; - void operator=(nonesuch const&) = delete; - void operator=(nonesuch&&) = delete; -}; - -template class Op, - class... Args> -struct detector -{ - using value_t = std::false_type; - using type = Default; -}; - -template class Op, class... Args> -struct detector>, Op, Args...> -{ - using value_t = std::true_type; - using type = Op; -}; - -template