From eca8bce08112a7e0621baad29a1be2fcd0c668b9 Mon Sep 17 00:00:00 2001 From: FireFly Date: Wed, 15 May 2024 15:24:03 -0700 Subject: [PATCH 01/24] lix-doc: don't chomp bold headings off There are a few places in nixpkgs lib where `**Foo**:` is used as a heading instead of the usual markdown `# Foo` ones. I think this is intentional with how it gets rendered in the manual, e.g. [`lib.lists.sortOn`][1]. [1]: https://nixos.org/manual/nixpkgs/stable/#function-library-lib.lists.sortOn `nix-doc` prints this as ``` *Laws**: ```nix sortOn f == sort (p: q: f p < f q) ``` ``` chomping off the first asterisk as part of `cleanup_single_line` that's meant to deal with `/** \n * \n * \n */` style doc comments. This also means the usage in lix ends up funny-looking with a trailing asterisk as if there's a footnote to pay attention to (which is how I first noticed it, heh) The fix: When cleaning up a single line and removing a prefix comment character, ensure it's followed by whitespace (or the last character of the line). Upstream-PR: https://github.com/lf-/nix-doc/pull/26 Change-Id: If2870c53a632f6bbbcca98a4bfbd72f5bef37879 --- lix-doc/src/lib.rs | 24 ++++++++++++++++++------ 1 file changed, 18 insertions(+), 6 deletions(-) diff --git a/lix-doc/src/lib.rs b/lix-doc/src/lib.rs index 9c2e43f2f..27fe5c9b5 100644 --- a/lix-doc/src/lib.rs +++ b/lix-doc/src/lib.rs @@ -84,9 +84,13 @@ fn indented(s: &str, indent: usize) -> String { /// Cleans up a single line, erasing prefix single line comments but preserving indentation fn cleanup_single_line<'a>(s: &'a str) -> &'a str { let mut cmt_new_start = 0; - for (idx, ch) in s.char_indices() { + let mut iter = s.char_indices().peekable(); + while let Some((idx, ch)) = iter.next() { + // peek at the next character, with an explicit '\n' as "next character" at end of line + let (_, next_ch) = iter.peek().unwrap_or(&(0, '\n')); + // if we find a character, save the byte position after it as our new string start - if ch == '#' || ch == '*' { + if ch == '#' || (ch == '*' && next_ch.is_whitespace()) { cmt_new_start = idx + 1; break; } @@ -206,7 +210,7 @@ fn visit_lambda(name: String, lambda: &Lambda) -> SearchResult { SearchResult { identifier: name, doc: comment, - param_block + param_block, } } @@ -246,7 +250,7 @@ pub extern "C" fn nd_get_function_docs( filename: *const c_char, line: usize, col: usize, - ) -> *const c_char { +) -> *const c_char { let fname = unsafe { CStr::from_ptr(filename) }; fname .to_str() @@ -257,9 +261,9 @@ pub extern "C" fn nd_get_function_docs( eprintln!("panic!! {:#?}", e); e }) - .ok() + .ok() }) - .flatten() + .flatten() .and_then(|s| CString::new(s).ok()) .map(|s| s.into_raw() as *const c_char) .unwrap_or(ptr::null()) @@ -319,8 +323,16 @@ mod tests { let ex1 = " * a"; let ex2 = " # a"; let ex3 = " a"; + let ex4 = " *"; assert_eq!(cleanup_single_line(ex1), " a"); assert_eq!(cleanup_single_line(ex2), " a"); assert_eq!(cleanup_single_line(ex3), ex3); + assert_eq!(cleanup_single_line(ex4), ""); + } + + #[test] + fn test_single_line_retains_bold_headings() { + let ex1 = " **Foo**:"; + assert_eq!(cleanup_single_line(ex1), ex1); } } From c6bb377c91f40ae571d0a0fc951b736cb8fc2ead Mon Sep 17 00:00:00 2001 From: Puck Meerburg Date: Wed, 15 May 2024 23:22:18 +0000 Subject: [PATCH 02/24] Loosen constness on listElems() result Change-Id: I1caff000362c83e5172413a036c22a2e9ed3ede8 --- src/libexpr/value.hh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/libexpr/value.hh b/src/libexpr/value.hh index 450216ec0..17a85f1de 100644 --- a/src/libexpr/value.hh +++ b/src/libexpr/value.hh @@ -384,7 +384,7 @@ public: return internalType == tList1 || internalType == tList2 ? smallList : bigList.elems; } - const Value * const * listElems() const + Value * const * listElems() const { return internalType == tList1 || internalType == tList2 ? smallList : bigList.elems; } From 194654c96f61acc6dc47dd3126ad47618d45a0c8 Mon Sep 17 00:00:00 2001 From: Yorick van Pelt Date: Mon, 28 Aug 2023 18:20:23 +0200 Subject: [PATCH 03/24] primops: change to std::function, allowing the passing of user data (cherry picked from commit 48aa57549d514432d6621c1e29f051951eca2d7f) Change-Id: Ib7d5c6514031ceb6c42ac44588be6b0c1c3c225b --- src/libexpr/eval.hh | 3 ++- src/libexpr/primops.cc | 7 +++++-- 2 files changed, 7 insertions(+), 3 deletions(-) diff --git a/src/libexpr/eval.hh b/src/libexpr/eval.hh index 2291d618c..411364d9f 100644 --- a/src/libexpr/eval.hh +++ b/src/libexpr/eval.hh @@ -17,6 +17,7 @@ #include #include #include +#include namespace nix { @@ -71,7 +72,7 @@ struct PrimOp /** * Implementation of the primop. */ - PrimOpFun fun; + std::function::type> fun; /** * Optional experimental for this to be gated on. diff --git a/src/libexpr/primops.cc b/src/libexpr/primops.cc index 33a2688f1..51a257e87 100644 --- a/src/libexpr/primops.cc +++ b/src/libexpr/primops.cc @@ -3329,8 +3329,11 @@ static void prim_sort(EvalState & state, const PosIdx pos, Value * * args, Value callFunction. */ /* TODO: (layus) this is absurd. An optimisation like this should be outside the lambda creation */ - if (args[0]->isPrimOp() && args[0]->primOp->fun == prim_lessThan) - return CompareValues(state, noPos, "while evaluating the ordering function passed to builtins.sort")(a, b); + if (args[0]->isPrimOp()) { + auto ptr = args[0]->primOp->fun.target(); + if (ptr && *ptr == prim_lessThan) + return CompareValues(state, noPos, "while evaluating the ordering function passed to builtins.sort")(a, b); + } Value * vs[] = {a, b}; Value vBool; From 5a1824ebe1fcdeff86b57a13a33d6428e89e4bce Mon Sep 17 00:00:00 2001 From: Pierre Bourdon Date: Fri, 17 May 2024 02:02:48 +0200 Subject: [PATCH 04/24] derived-path: refuse built derived path with a non-derivation base Example: /nix/store/dr53sp25hyfsnzjpm8mh3r3y36vrw3ng-neovim-0.9.5^out This is nonsensical since selecting outputs can only be done for a buildable derivation, not for a realised store path. The build worker side of things ends up crashing with an assertion when trying to handle such malformed paths. Change-Id: Ia3587c71fe3da5bea45d4e506e1be4dd62291ddf --- src/libstore/derived-path.cc | 46 +++++++++++++---------------- tests/unit/libstore/derived-path.cc | 9 ++++++ 2 files changed, 29 insertions(+), 26 deletions(-) diff --git a/src/libstore/derived-path.cc b/src/libstore/derived-path.cc index 214caab54..22977c7b1 100644 --- a/src/libstore/derived-path.cc +++ b/src/libstore/derived-path.cc @@ -185,21 +185,32 @@ DerivedPath::Built DerivedPath::Built::parse( }; } -static SingleDerivedPath parseWithSingle( +template +static DerivedPathT parseDerivedPath( const Store & store, std::string_view s, std::string_view separator, const ExperimentalFeatureSettings & xpSettings) { size_t n = s.rfind(separator); - return n == s.npos - ? (SingleDerivedPath) SingleDerivedPath::Opaque::parse(store, s) - : (SingleDerivedPath) SingleDerivedPath::Built::parse(store, - make_ref(parseWithSingle( + if (n == s.npos) { + return DerivedPathT::Opaque::parse(store, s); + } else { + auto path = DerivedPathT::Built::parse(store, + make_ref(parseDerivedPath( store, s.substr(0, n), separator, xpSettings)), s.substr(n + 1), xpSettings); + + const auto& basePath = path.getBaseStorePath(); + if (!basePath.isDerivation()) { + throw InvalidPath("cannot use output selection ('%s') on non-derivation store path '%s'", + separator, basePath.to_string()); + } + + return path; + } } SingleDerivedPath SingleDerivedPath::parse( @@ -207,7 +218,7 @@ SingleDerivedPath SingleDerivedPath::parse( std::string_view s, const ExperimentalFeatureSettings & xpSettings) { - return parseWithSingle(store, s, "^", xpSettings); + return parseDerivedPath(store, s, "^", xpSettings); } SingleDerivedPath SingleDerivedPath::parseLegacy( @@ -215,24 +226,7 @@ SingleDerivedPath SingleDerivedPath::parseLegacy( std::string_view s, const ExperimentalFeatureSettings & xpSettings) { - return parseWithSingle(store, s, "!", xpSettings); -} - -static DerivedPath parseWith( - const Store & store, std::string_view s, std::string_view separator, - const ExperimentalFeatureSettings & xpSettings) -{ - size_t n = s.rfind(separator); - return n == s.npos - ? (DerivedPath) DerivedPath::Opaque::parse(store, s) - : (DerivedPath) DerivedPath::Built::parse(store, - make_ref(parseWithSingle( - store, - s.substr(0, n), - separator, - xpSettings)), - s.substr(n + 1), - xpSettings); + return parseDerivedPath(store, s, "!", xpSettings); } DerivedPath DerivedPath::parse( @@ -240,7 +234,7 @@ DerivedPath DerivedPath::parse( std::string_view s, const ExperimentalFeatureSettings & xpSettings) { - return parseWith(store, s, "^", xpSettings); + return parseDerivedPath(store, s, "^", xpSettings); } DerivedPath DerivedPath::parseLegacy( @@ -248,7 +242,7 @@ DerivedPath DerivedPath::parseLegacy( std::string_view s, const ExperimentalFeatureSettings & xpSettings) { - return parseWith(store, s, "!", xpSettings); + return parseDerivedPath(store, s, "!", xpSettings); } DerivedPath DerivedPath::fromSingle(const SingleDerivedPath & req) diff --git a/tests/unit/libstore/derived-path.cc b/tests/unit/libstore/derived-path.cc index c62d79a78..ffa541e9f 100644 --- a/tests/unit/libstore/derived-path.cc +++ b/tests/unit/libstore/derived-path.cc @@ -77,6 +77,15 @@ TEST_F(DerivedPathTest, built_built_xp) { MissingExperimentalFeature); } +/** + * Built paths with a non-derivation base should fail parsing. + */ +TEST_F(DerivedPathTest, non_derivation_base) { + ASSERT_THROW( + DerivedPath::parse(*store, "/nix/store/g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-x^foo"), + InvalidPath); +} + #ifndef COVERAGE RC_GTEST_FIXTURE_PROP( From e1119f4378e5d7fabdb7d7619927210655e31022 Mon Sep 17 00:00:00 2001 From: Jade Lovelace Date: Thu, 16 May 2024 23:43:44 -0700 Subject: [PATCH 05/24] make lix dev shells un-bear-able since we un-make them now We don't need bear anymore, since we don't have any more bad build systems that lack compile commands generation inside Lix. Change-Id: I7809ddfd993180468f846e8cd862bdd54d5b31ec --- package.nix | 2 -- 1 file changed, 2 deletions(-) diff --git a/package.nix b/package.nix index 83a4411ec..fe56a4003 100644 --- a/package.nix +++ b/package.nix @@ -375,7 +375,6 @@ stdenv.mkDerivation (finalAttrs: { just, nixfmt, glibcLocales, - bear, pre-commit-checks, clang-tools, llvmPackages, @@ -418,7 +417,6 @@ stdenv.mkDerivation (finalAttrs: { llvmPackages.clang-unwrapped.dev ] ++ lib.optional (pre-commit-checks ? enabledPackages) pre-commit-checks.enabledPackages - ++ lib.optional (stdenv.cc.isClang && !stdenv.buildPlatform.isDarwin) bear ++ lib.optional (lib.meta.availableOn stdenv.buildPlatform clangbuildanalyzer) clangbuildanalyzer ++ finalAttrs.checkInputs; From 236466faf385f98f3639ec04147c171774d03726 Mon Sep 17 00:00:00 2001 From: Qyriad Date: Fri, 17 May 2024 13:29:13 -0600 Subject: [PATCH 06/24] package: add `--print-errorlogs` to meson's tests This should have been in there originally, which is our mistake, considering that debugging CI failures is basically impossible without it. Change-Id: I4ab8799e6e0abca1984ed9801fe10c58200861a3 --- package.nix | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/package.nix b/package.nix index fe56a4003..325d3e38e 100644 --- a/package.nix +++ b/package.nix @@ -309,7 +309,10 @@ stdenv.mkDerivation (finalAttrs: { doCheck = canRunInstalled; - mesonCheckFlags = [ "--suite=check" ]; + mesonCheckFlags = [ + "--suite=check" + "--print-errorlogs" + ]; # Make sure the internal API docs are already built, because mesonInstallPhase # won't let us build them there. They would normally be built in buildPhase, @@ -342,7 +345,10 @@ stdenv.mkDerivation (finalAttrs: { doInstallCheck = finalAttrs.doCheck; - mesonInstallCheckFlags = [ "--suite=installcheck" ]; + mesonInstallCheckFlags = [ + "--suite=installcheck" + "--print-errorlogs" + ]; installCheckPhase = '' runHook preInstallCheck From 7a3745b07607d3fc85fb5a0a08832ab078080884 Mon Sep 17 00:00:00 2001 From: julia Date: Wed, 15 May 2024 19:11:32 +1000 Subject: [PATCH 07/24] Deprecate the online flake registries and vendor the default registry Fixes #183, #110, #116. The default flake-registry option becomes 'vendored', and refers to a vendored flake-registry.json file in the install path. Vendored copy of the flake-registry is from github:NixOS/flake-registry at commit 9c69f7bd2363e71fe5cd7f608113290c7614dcdd. Change-Id: I752b81c85ebeaab4e582ac01c239d69d65580f37 --- .../deprecate-online-flake-registry.md | 16 + misc/flake-registry/flake-registry.json | 414 ++++++++++++++++++ misc/flake-registry/meson.build | 4 + misc/meson.build | 1 + package.nix | 2 + src/libfetchers/fetch-settings.hh | 5 +- src/libfetchers/registry.cc | 10 +- tests/functional/flakes/flake-registry.sh | 72 +++ tests/functional/meson.build | 1 + tests/nixos/github-flakes.nix | 2 + 10 files changed, 525 insertions(+), 2 deletions(-) create mode 100644 doc/manual/rl-next/deprecate-online-flake-registry.md create mode 100644 misc/flake-registry/flake-registry.json create mode 100644 misc/flake-registry/meson.build create mode 100644 tests/functional/flakes/flake-registry.sh diff --git a/doc/manual/rl-next/deprecate-online-flake-registry.md b/doc/manual/rl-next/deprecate-online-flake-registry.md new file mode 100644 index 000000000..eb2a9e544 --- /dev/null +++ b/doc/manual/rl-next/deprecate-online-flake-registry.md @@ -0,0 +1,16 @@ +--- +synopsis: "Deprecate the online flake registries and vendor the default registry" +cls: 1127 +credits: midnightveil +issues: [fj#183, fj#110, fj#116, 8953, 9087] +category: Breaking Changes +--- + +The online flake registry [https://channels.nixos.org/flake-registry.json](https://channels.nixos.org/flake-registry.json) is not pinned in any way, +and the targets of the indirections can both update or change entirely at any +point. Furthermore, it is refetched on every use of a flake reference, even if +there is a local flake reference, and even if you are offline (which breaks). + +For now, we deprecate the (any) online flake registry, and vendor a copy of the +current online flake registry. This makes it work offline, and ensures that +it won't change in the future. diff --git a/misc/flake-registry/flake-registry.json b/misc/flake-registry/flake-registry.json new file mode 100644 index 000000000..d83ace92b --- /dev/null +++ b/misc/flake-registry/flake-registry.json @@ -0,0 +1,414 @@ +{ + "flakes": [ + { + "from": { + "id": "agda", + "type": "indirect" + }, + "to": { + "owner": "agda", + "repo": "agda", + "type": "github" + } + }, + { + "from": { + "id": "arion", + "type": "indirect" + }, + "to": { + "owner": "hercules-ci", + "repo": "arion", + "type": "github" + } + }, + { + "from": { + "id": "blender-bin", + "type": "indirect" + }, + "to": { + "dir": "blender", + "owner": "edolstra", + "repo": "nix-warez", + "type": "github" + } + }, + { + "from": { + "id": "bundlers", + "type": "indirect" + }, + "to": { + "owner": "NixOS", + "repo": "bundlers", + "type": "github" + } + }, + { + "from": { + "id": "cachix", + "type": "indirect" + }, + "to": { + "owner": "cachix", + "repo": "cachix", + "type": "github" + } + }, + { + "from": { + "id": "composable", + "type": "indirect" + }, + "to": { + "owner": "ComposableFi", + "repo": "composable", + "type": "github" + } + }, + { + "from": { + "id": "disko", + "type": "indirect" + }, + "to": { + "owner": "nix-community", + "repo": "disko", + "type": "github" + } + }, + { + "from": { + "id": "dreampkgs", + "type": "indirect" + }, + "to": { + "owner": "nix-community", + "repo": "dreampkgs", + "type": "github" + } + }, + { + "from": { + "id": "dwarffs", + "type": "indirect" + }, + "to": { + "owner": "edolstra", + "repo": "dwarffs", + "type": "github" + } + }, + { + "from": { + "id": "emacs-overlay", + "type": "indirect" + }, + "to": { + "owner": "nix-community", + "repo": "emacs-overlay", + "type": "github" + } + }, + { + "from": { + "id": "fenix", + "type": "indirect" + }, + "to": { + "owner": "nix-community", + "repo": "fenix", + "type": "github" + } + }, + { + "from": { + "id": "flake-parts", + "type": "indirect" + }, + "to": { + "owner": "hercules-ci", + "repo": "flake-parts", + "type": "github" + } + }, + { + "from": { + "id": "flake-utils", + "type": "indirect" + }, + "to": { + "owner": "numtide", + "repo": "flake-utils", + "type": "github" + } + }, + { + "from": { + "id": "gemini", + "type": "indirect" + }, + "to": { + "owner": "nix-community", + "repo": "flake-gemini", + "type": "github" + } + }, + { + "from": { + "id": "helix", + "type": "indirect" + }, + "to": { + "owner": "helix-editor", + "repo": "helix", + "type": "github" + } + }, + { + "from": { + "id": "hercules-ci-agent", + "type": "indirect" + }, + "to": { + "owner": "hercules-ci", + "repo": "hercules-ci-agent", + "type": "github" + } + }, + { + "from": { + "id": "hercules-ci-effects", + "type": "indirect" + }, + "to": { + "owner": "hercules-ci", + "repo": "hercules-ci-effects", + "type": "github" + } + }, + { + "from": { + "id": "home-manager", + "type": "indirect" + }, + "to": { + "owner": "nix-community", + "repo": "home-manager", + "type": "github" + } + }, + { + "from": { + "id": "hydra", + "type": "indirect" + }, + "to": { + "owner": "NixOS", + "repo": "hydra", + "type": "github" + } + }, + { + "from": { + "id": "mach-nix", + "type": "indirect" + }, + "to": { + "owner": "DavHau", + "repo": "mach-nix", + "type": "github" + } + }, + { + "from": { + "id": "nickel", + "type": "indirect" + }, + "to": { + "owner": "tweag", + "repo": "nickel", + "type": "github" + } + }, + { + "from": { + "id": "nimble", + "type": "indirect" + }, + "to": { + "owner": "nix-community", + "repo": "flake-nimble", + "type": "github" + } + }, + { + "from": { + "id": "nix", + "type": "indirect" + }, + "to": { + "owner": "NixOS", + "repo": "nix", + "type": "github" + } + }, + { + "from": { + "id": "nix-darwin", + "type": "indirect" + }, + "to": { + "owner": "LnL7", + "repo": "nix-darwin", + "type": "github" + } + }, + { + "from": { + "id": "nix-serve", + "type": "indirect" + }, + "to": { + "owner": "edolstra", + "repo": "nix-serve", + "type": "github" + } + }, + { + "from": { + "id": "nixops", + "type": "indirect" + }, + "to": { + "owner": "NixOS", + "repo": "nixops", + "type": "github" + } + }, + { + "from": { + "id": "nixos-hardware", + "type": "indirect" + }, + "to": { + "owner": "NixOS", + "repo": "nixos-hardware", + "type": "github" + } + }, + { + "from": { + "id": "nixos-homepage", + "type": "indirect" + }, + "to": { + "owner": "NixOS", + "repo": "nixos-homepage", + "type": "github" + } + }, + { + "from": { + "id": "nixos-search", + "type": "indirect" + }, + "to": { + "owner": "NixOS", + "repo": "nixos-search", + "type": "github" + } + }, + { + "from": { + "id": "nixpkgs", + "type": "indirect" + }, + "to": { + "owner": "NixOS", + "ref": "nixpkgs-unstable", + "repo": "nixpkgs", + "type": "github" + } + }, + { + "from": { + "id": "nur", + "type": "indirect" + }, + "to": { + "owner": "nix-community", + "repo": "NUR", + "type": "github" + } + }, + { + "from": { + "id": "patchelf", + "type": "indirect" + }, + "to": { + "owner": "NixOS", + "repo": "patchelf", + "type": "github" + } + }, + { + "from": { + "id": "poetry2nix", + "type": "indirect" + }, + "to": { + "owner": "nix-community", + "repo": "poetry2nix", + "type": "github" + } + }, + { + "from": { + "id": "pridefetch", + "type": "indirect" + }, + "to": { + "owner": "SpyHoodle", + "repo": "pridefetch", + "type": "github" + } + }, + { + "from": { + "id": "sops-nix", + "type": "indirect" + }, + "to": { + "owner": "Mic92", + "repo": "sops-nix", + "type": "github" + } + }, + { + "from": { + "id": "systems", + "type": "indirect" + }, + "to": { + "owner": "nix-systems", + "repo": "default", + "type": "github" + } + }, + { + "from": { + "id": "templates", + "type": "indirect" + }, + "to": { + "owner": "NixOS", + "repo": "templates", + "type": "github" + } + } + ], + "version": 2 +} diff --git a/misc/flake-registry/meson.build b/misc/flake-registry/meson.build new file mode 100644 index 000000000..674ee8dbf --- /dev/null +++ b/misc/flake-registry/meson.build @@ -0,0 +1,4 @@ +install_data( + 'flake-registry.json', + install_dir : datadir, +) diff --git a/misc/meson.build b/misc/meson.build index a6d1f944b..a8f09722c 100644 --- a/misc/meson.build +++ b/misc/meson.build @@ -3,3 +3,4 @@ subdir('fish') subdir('zsh') subdir('systemd') +subdir('flake-registry') diff --git a/package.nix b/package.nix index 325d3e38e..be3bcfb35 100644 --- a/package.nix +++ b/package.nix @@ -313,6 +313,8 @@ stdenv.mkDerivation (finalAttrs: { "--suite=check" "--print-errorlogs" ]; + # the tests access localhost. + __darwinAllowLocalNetworking = true; # Make sure the internal API docs are already built, because mesonInstallPhase # won't let us build them there. They would normally be built in buildPhase, diff --git a/src/libfetchers/fetch-settings.hh b/src/libfetchers/fetch-settings.hh index 6108a179c..c67a75082 100644 --- a/src/libfetchers/fetch-settings.hh +++ b/src/libfetchers/fetch-settings.hh @@ -71,10 +71,13 @@ struct FetchSettings : public Config Setting warnDirty{this, true, "warn-dirty", "Whether to warn about dirty Git/Mercurial trees."}; - Setting flakeRegistry{this, "https://channels.nixos.org/flake-registry.json", "flake-registry", + Setting flakeRegistry{this, "vendored", "flake-registry", R"( Path or URI of the global flake registry. + URIs are deprecated. When set to 'vendored', defaults to a vendored + copy of https://channels.nixos.org/flake-registry.json. + When empty, disables the global flake registry. )", {}, true, Xp::Flakes}; diff --git a/src/libfetchers/registry.cc b/src/libfetchers/registry.cc index da92273d6..4b2d61f52 100644 --- a/src/libfetchers/registry.cc +++ b/src/libfetchers/registry.cc @@ -16,8 +16,12 @@ std::shared_ptr Registry::read( { auto registry = std::make_shared(type); - if (!pathExists(path)) + if (!pathExists(path)) { + if (type == RegistryType::Global) { + warn("cannot read flake registry '%s': path does not exist", path); + } return std::make_shared(type); + } try { @@ -155,9 +159,13 @@ static std::shared_ptr getGlobalRegistry(ref store) auto path = fetchSettings.flakeRegistry.get(); if (path == "") { return std::make_shared(Registry::Global); // empty registry + } else if (path == "vendored") { + return Registry::read(settings.nixDataDir + "/flake-registry.json", Registry::Global); } if (!path.starts_with("/")) { + warn("config option flake-registry referring to a URL is deprecated and will be removed in Lix 3.0; yours is: `%s'", path); + auto storePath = downloadFile(store, path, "flake-registry.json", false).storePath; if (auto store2 = store.dynamic_pointer_cast()) store2->addPermRoot(storePath, getCacheDir() + "/nix/flake-registry.json"); diff --git a/tests/functional/flakes/flake-registry.sh b/tests/functional/flakes/flake-registry.sh new file mode 100644 index 000000000..73ab353bf --- /dev/null +++ b/tests/functional/flakes/flake-registry.sh @@ -0,0 +1,72 @@ +source ./common.sh + +# remove the flake registry from nix.conf, to set to default ("vendored") +sed -i '/flake-registry/d' "$NIX_CONF_DIR/nix.conf" + +# Make sure the vendored registry contains the correct amount. +[[ $(nix registry list | wc -l) == 37 ]] +# sanity check, contains the important ones +nix registry list | grep '^global flake:nixpkgs' +nix registry list | grep '^global flake:home-manager' + + +# it should work the same if we set to vendored directly. +echo 'flake-registry = vendored' >> "$NIX_CONF_DIR/nix.conf" +[[ $(nix registry list | wc -l) == 37 ]] +# sanity check, contains the important ones +nix registry list | grep '^global flake:nixpkgs' +nix registry list | grep '^global flake:home-manager' + + +# the online flake registry should still work, but it is deprecated. +set -m +# port 0: auto pick a free port, unbufferred output +python3 -u -m http.server 0 --bind 127.0.0.1 > server.out & +# wait for the http server to admit it is working +while ! grep -qP 'port \d+' server.out ; do + echo 'waiting for python http' >&2 + sleep 0.2 +done + +port=$(awk 'match($0,/port ([[:digit:]]+)/, ary) { print ary[1] }' server.out) + +sed -i '/flake-registry/d' "$NIX_CONF_DIR/nix.conf" +echo "flake-registry = http://127.0.0.1:$port/flake-registry.json" >> "$NIX_CONF_DIR/nix.conf" +cat < flake-registry.json +{ + "flakes": [ + { + "from": { + "type": "indirect", + "id": "nixpkgs" + }, + "to": { + "type": "github", + "owner": "NixOS", + "repo": "nixpkgs" + } + }, + { + "from": { + "type": "indirect", + "id": "private-flake" + }, + "to": { + "type": "github", + "owner": "fancy-enterprise", + "repo": "private-flake" + } + } + ], + "version": 2 +} +EOF + +[[ $(nix registry list | wc -l) == 2 ]] +nix registry list | grep '^global flake:nixpkgs' +nix registry list | grep '^global flake:private-flake' + +# make sure we have a warning: +nix registry list 2>&1 | grep "config option flake-registry referring to a URL is deprecated and will be removed" + +kill %1 diff --git a/tests/functional/meson.build b/tests/functional/meson.build index 1e68cfe8c..a13dee001 100644 --- a/tests/functional/meson.build +++ b/tests/functional/meson.build @@ -69,6 +69,7 @@ functional_tests_scripts = [ 'flakes/unlocked-override.sh', 'flakes/absolute-paths.sh', 'flakes/build-paths.sh', + 'flakes/flake-registry.sh', 'flakes/flake-in-submodule.sh', 'gc.sh', 'nix-collect-garbage-d.sh', diff --git a/tests/nixos/github-flakes.nix b/tests/nixos/github-flakes.nix index 1954208b9..e3437c5e8 100644 --- a/tests/nixos/github-flakes.nix +++ b/tests/nixos/github-flakes.nix @@ -146,6 +146,8 @@ in virtualisation.additionalPaths = [ pkgs.hello pkgs.fuse ]; virtualisation.memorySize = 4096; nix.settings.substituters = lib.mkForce [ ]; + # note: URL flake-registries are currently deprecated. + nix.settings.flake-registry = "https://channels.nixos.org/flake-registry.json"; nix.extraOptions = "experimental-features = nix-command flakes"; networking.hosts.${(builtins.head nodes.github.networking.interfaces.eth1.ipv4.addresses).address} = [ "channels.nixos.org" "api.github.com" "github.com" ]; From 1fe58bd8a747f9c6d5b27c521fbf3d7b281e130d Mon Sep 17 00:00:00 2001 From: Puck Meerburg Date: Fri, 17 May 2024 20:01:50 +0000 Subject: [PATCH 08/24] nix cat/dump-path/key: stop progress bar before writeFull These commands outputs data that may not end with a newline. This causes problems when the progress bar redraws, as that completely wipes the last line of output. As nix key generate-secret outputs a single line of text with no output, it shows up entirely blank, making it look like nothing happened. Fixes: https://git.lix.systems/lix-project/lix/issues/320 Change-Id: I5ac706d71d839b6dfa760b60a351414cd96297cf --- src/nix/cat.cc | 6 +++++- src/nix/dump-path.cc | 3 +++ src/nix/sigs.cc | 3 +++ 3 files changed, 11 insertions(+), 1 deletion(-) diff --git a/src/nix/cat.cc b/src/nix/cat.cc index 60aa66ce0..678edd9a1 100644 --- a/src/nix/cat.cc +++ b/src/nix/cat.cc @@ -2,6 +2,7 @@ #include "store-api.hh" #include "fs-accessor.hh" #include "nar-accessor.hh" +#include "progress-bar.hh" using namespace nix; @@ -17,7 +18,10 @@ struct MixCat : virtual Args if (st.type != FSAccessor::Type::tRegular) throw Error("path '%1%' is not a regular file", path); - writeFull(STDOUT_FILENO, accessor->readFile(path)); + auto file = accessor->readFile(path); + + stopProgressBar(); + writeFull(STDOUT_FILENO, file); } }; diff --git a/src/nix/dump-path.cc b/src/nix/dump-path.cc index c4edc894b..fb32dddb7 100644 --- a/src/nix/dump-path.cc +++ b/src/nix/dump-path.cc @@ -1,6 +1,7 @@ #include "command.hh" #include "store-api.hh" #include "archive.hh" +#include "progress-bar.hh" using namespace nix; @@ -20,6 +21,7 @@ struct CmdDumpPath : StorePathCommand void run(ref store, const StorePath & storePath) override { + stopProgressBar(); FdSink sink(STDOUT_FILENO); store->narFromPath(storePath, sink); sink.flush(); @@ -55,6 +57,7 @@ struct CmdDumpPath2 : Command void run() override { + stopProgressBar(); FdSink sink(STDOUT_FILENO); dumpPath(path, sink); sink.flush(); diff --git a/src/nix/sigs.cc b/src/nix/sigs.cc index 730aa6532..eeb14e29a 100644 --- a/src/nix/sigs.cc +++ b/src/nix/sigs.cc @@ -3,6 +3,7 @@ #include "store-api.hh" #include "thread-pool.hh" #include "signals.hh" +#include "progress-bar.hh" #include @@ -220,6 +221,8 @@ struct CmdKey : NixMultiCommand { if (!command) throw UsageError("'nix key' requires a sub-command."); + + stopProgressBar(); command->second->run(); } }; From 139d31f87658c420622f6880a4bdfee8b522f87c Mon Sep 17 00:00:00 2001 From: Alyssa Ross Date: Fri, 17 May 2024 18:18:38 +0200 Subject: [PATCH 09/24] Improve nix-store --delete failure message On several occasions I've found myself confused when trying to delete a store path, because I am told it's still alive, but nix-store --query --roots doesn't show anything. Let's save future users this confusion by mentioning that a path might be alive due to having referrers, not just roots. (cherry picked from commit 979a019014569eee7d0071605f6ff500b544f6ac) Upstream-PR: https://github.com/NixOS/nix/pull/10733 Change-Id: I54ae839a85f3de3393493fba27fd40d7d3af0516 --- src/libstore/gc.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/libstore/gc.cc b/src/libstore/gc.cc index 535bbd251..6b37f0af3 100644 --- a/src/libstore/gc.cc +++ b/src/libstore/gc.cc @@ -695,7 +695,7 @@ void LocalStore::collectGarbage(const GCOptions & options, GCResults & results) throw Error( "Cannot delete path '%1%' since it is still alive. " "To find out why, use: " - "nix-store --query --roots", + "nix-store --query --roots and nix-store --query --referrers", printStorePath(i)); } From 774c56094f3f3dcb1f25fe147c52604ad664bd5b Mon Sep 17 00:00:00 2001 From: eldritch horrors Date: Sat, 18 May 2024 17:48:27 +0200 Subject: [PATCH 10/24] libstore: fix old RemoteStore::addToStore serializer having the serializer write into `*conn` is not legal because we are in a sinkToSource that will be drained by the remote we're connected to. writing into `*conn` directly can break the framing protocol. it is unlikely this code was ever run: to protocol it caters to is from 2016(!) and thoroughly untested in-tree, and since it's been present since nix 2.17 and the 1.18 protocol broken here is nix 2.0 we might safely assume that daemons older than nix 2.1 are no longer used now see also #325 (though that wants <2.3 gone, this is sadly only <2.1) Change-Id: I9d674c18f6d802f61c5d85dfd9608587b73e70a5 --- src/libstore/remote-store.cc | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/libstore/remote-store.cc b/src/libstore/remote-store.cc index 1cdfdb925..22d87b027 100644 --- a/src/libstore/remote-store.cc +++ b/src/libstore/remote-store.cc @@ -509,7 +509,8 @@ void RemoteStore::addToStore(const ValidPathInfo & info, Source & source, sink << exportMagic << printStorePath(info.path); - WorkerProto::write(*this, *conn, info.references); + WorkerProto::WriteConn nested { .to = sink, .version = conn->daemonVersion }; + WorkerProto::write(*this, nested, info.references); sink << (info.deriver ? printStorePath(*info.deriver) : "") << 0 // == no legacy signature From 93dbb698b3effe489d307dd7b50200e468545ce7 Mon Sep 17 00:00:00 2001 From: Raito Bezarius Date: Sun, 19 May 2024 16:58:41 +0200 Subject: [PATCH 11/24] chore: remove incorrect maintainers/*.md documentation Fate has something different in store for the release process, backporting process and the general maintainer documentation. See https://git.lix.systems/lix-project/lix/issues/260. Change-Id: I626686ff4059aee22a3ab1664b52581b2dbf6ed7 Signed-off-by: Raito Bezarius --- maintainers/README.md | 146 ------------------------ maintainers/backporting.md | 12 -- maintainers/release-process.md | 196 --------------------------------- 3 files changed, 354 deletions(-) delete mode 100644 maintainers/README.md delete mode 100644 maintainers/backporting.md delete mode 100644 maintainers/release-process.md diff --git a/maintainers/README.md b/maintainers/README.md deleted file mode 100644 index 0d520cb0c..000000000 --- a/maintainers/README.md +++ /dev/null @@ -1,146 +0,0 @@ -# Nix maintainers team - -## Motivation - -The team's main responsibility is to set a direction for the development of Nix and ensure that the code is in good shape. - -We aim to achieve this by improving the contributor experience and attracting more maintainers – that is, by helping other people contributing to Nix and eventually taking responsibility – in order to scale the development process to match users' needs. - -### Objectives - -- It is obvious what is worthwhile to work on. -- It is easy to find the right place in the code to make a change. -- It is clear what is expected of a pull request. -- It is predictable how to get a change merged and released. - -### Tasks - -- Establish, communicate, and maintain a technical roadmap -- Improve documentation targeted at contributors - - Record architecture and design decisions - - Elaborate contribution guides and abide to them - - Define and assert quality criteria for contributions -- Maintain the issue tracker and triage pull requests -- Help contributors succeed with pull requests that address roadmap milestones -- Manage the release lifecycle -- Regularly publish reports on work done -- Engage with third parties in the interest of the project -- Ensure the required maintainer capacity for all of the above - -## Members - -- Eelco Dolstra (@edolstra) – Team lead -- Théophane Hufschmitt (@thufschmitt) -- Valentin Gagarin (@fricklerhandwerk) -- Thomas Bereknyei (@tomberek) -- Robert Hensing (@roberth) -- John Ericson (@Ericson2314) - -## Meeting protocol - -The team meets twice a week: - -- Discussion meeting: [Fridays 13:00-14:00 CET](https://calendar.google.com/calendar/event?eid=MHNtOGVuNWtrZXNpZHR2bW1sM3QyN2ZjaGNfMjAyMjExMjVUMTIwMDAwWiBiOW81MmZvYnFqYWs4b3E4bGZraGczdDBxZ0Bn) - - 1. Triage issues and pull requests from the [No Status](#no-status) column (30 min) - 2. Discuss issues and pull requests from the [To discuss](#to-discuss) column (30 min) - -- Work meeting: [Mondays 13:00-15:00 CET](https://calendar.google.com/calendar/event?eid=NTM1MG1wNGJnOGpmOTZhYms3bTB1bnY5cWxfMjAyMjExMjFUMTIwMDAwWiBiOW81MmZvYnFqYWs4b3E4bGZraGczdDBxZ0Bn) - - 1. Code review on pull requests from [In review](#in-review). - 2. Other chores and tasks. - -Meeting notes are collected on a [collaborative scratchpad](https://pad.lassul.us/Cv7FpYx-Ri-4VjUykQOLAw), and published on Discourse under the [Nix category](https://discourse.nixos.org/c/dev/nix/50). - -## Project board protocol - -The team uses a [GitHub project board](https://github.com/orgs/NixOS/projects/19/views/1) for tracking its work. - -Items on the board progress through the following states: - -### No Status - -During the discussion meeting, the team triages new items. -To be considered, issues and pull requests must have a high-level description to provide the whole team with the necessary context at a glance. - -On every meeting, at least one item from each of the following categories is inspected: - -1. [critical](https://github.com/NixOS/nix/labels/critical) -2. [security](https://github.com/NixOS/nix/labels/security) -3. [regression](https://github.com/NixOS/nix/labels/regression) -4. [bug](https://github.com/NixOS/nix/issues?q=is%3Aopen+label%3Abug+sort%3Areactions-%2B1-desc) -5. [tests of existing functionality](https://github.com/NixOS/nix/issues?q=is%3Aopen+label%3Atests+-label%3Afeature+sort%3Areactions-%2B1-desc) - -- [oldest pull requests](https://github.com/NixOS/nix/pulls?q=is%3Apr+is%3Aopen+sort%3Acreated-asc) -- [most popular pull requests](https://github.com/NixOS/nix/pulls?q=is%3Apr+is%3Aopen+sort%3Areactions-%2B1-desc) -- [oldest issues](https://github.com/NixOS/nix/issues?q=is%3Aissue+is%3Aopen+sort%3Acreated-asc) -- [most popular issues](https://github.com/NixOS/nix/issues?q=is%3Aissue+is%3Aopen+sort%3Areactions-%2B1-desc) - -Team members can also add pull requests or issues they would like the whole team to consider. -To ensure process quality and reliability, all non-trivial pull requests must be triaged before merging. - -If there is disagreement on the general idea behind an issue or pull request, it is moved to [To discuss](#to-discuss). -Otherwise, the issue or pull request in questions get the label [`idea approved`](https://github.com/NixOS/nix/labels/idea%20approved). -For issues this means that an implementation is welcome and will be prioritised for review. -For pull requests this means that: -- Unfinished work is encouraged to be continued. -- A reviewer is assigned to take responsibility for getting the pull request merged. - The item is moved to the [Assigned](#assigned) column. -- If needed, the team can decide to do a collarorative review. - Then the item is moved to the [In review](#in-review) column, and review session is scheduled. - -What constitutes a trivial pull request is up to maintainers' judgement. - -### To discuss - -Pull requests and issues that are deemed important and controversial are discussed by the team during discussion meetings. - -This may be where the merit of the change itself or the implementation strategy is contested by a team member. - -As a general guideline, the order of items is determined as follows: - -- Prioritise pull requests over issues - - Contributors who took the time to implement concrete change proposals should not wait indefinitely. - -- Prioritise fixing bugs and testing over documentation, improvements or new features - - The team values stability and accessibility higher than raw functionality. - -- Interleave issues and PRs - - This way issues without attempts at a solution get a chance to get addressed. - -### In review - -Pull requests in this column are reviewed together during work meetings. -This is both for spreading implementation knowledge and for establishing common values in code reviews. - -When the overall direction is agreed upon, even when further changes are required, the pull request is assigned to one team member. -If significant changes are requested or reviewers cannot come to a conclusion in reasonable time, the pull request is [marked as draft](https://docs.github.com/en/pull-requests/collaborating-with-pull-requests/proposing-changes-to-your-work-with-pull-requests/changing-the-stage-of-a-pull-request#converting-a-pull-request-to-a-draft). - -### Assigned - -One team member is assigned to each of these pull requests. -They will communicate with the authors, and make the final approval once all remaining issues are addressed. - -If more substantive issues arise, the assignee can move the pull request back to [To discuss](#to-discuss) or [In review](#in-review) to involve the team again. - -### Flowchart - -The process is illustrated in the following diagram: - -```mermaid -flowchart TD - discuss[To discuss] - - review[To review] - - New --> |Disagreement on idea| discuss - New & discuss --> |Consensus on idea| review - - review --> |Consensus on implementation| Assigned - - Assigned --> |Implementation issues arise| review - Assigned --> |Remaining issues fixed| Merged -``` diff --git a/maintainers/backporting.md b/maintainers/backporting.md deleted file mode 100644 index 2424050c8..000000000 --- a/maintainers/backporting.md +++ /dev/null @@ -1,12 +0,0 @@ - -# Backporting - -To [automatically backport a pull request](https://github.com/NixOS/nix/blob/master/.github/workflows/backport.yml) to a release branch once it's merged, assign it a label of the form [`backport `](https://github.com/NixOS/nix/labels?q=backport). - -Since [GitHub Actions workflows will not trigger other workflows](https://docs.github.com/en/actions/using-workflows/triggering-a-workflow#triggering-a-workflow-from-a-workflow), checks on the automatic backport need to be triggered by another actor. -This is achieved by closing and reopening the backport pull request. - -This specifically affects the [`installer_test`] check. -Note that it only runs after the other tests, so it may take a while to appear. - -[`installer_test`]: https://github.com/NixOS/nix/blob/895dfc656a21f6252ddf48df0d1f215effa04ecb/.github/workflows/ci.yml#L70-L91 diff --git a/maintainers/release-process.md b/maintainers/release-process.md deleted file mode 100644 index f2b60d8e7..000000000 --- a/maintainers/release-process.md +++ /dev/null @@ -1,196 +0,0 @@ -# Nix release process - -## Release artifacts - -The release process is intended to create the following for each -release: - -* A Git tag - -* Binary tarballs in https://releases.nixos.org/?prefix=nix/ - -* Docker images - -* Closures in https://cache.nixos.org - -* (Optionally) Updated `fallback-paths.nix` in Nixpkgs - -* An updated manual on https://nixos.org/manual/nix/stable/ - -## Creating a new release from the `master` branch - -* Make sure that the [Hydra `master` jobset](https://hydra.nixos.org/jobset/nix/master) succeeds. - -* In a checkout of the Nix repo, make sure you're on `master` and run - `git pull`. - -* Compile the release notes by running - - ```console - $ git checkout -b release-notes - $ VERSION=X.YY ./maintainers/release-notes - ``` - - where `X.YY` is *without* the patch level, e.g. `2.12` rather than ~~`2.12.0`~~. - - A commit is created. - -* Proof-read / edit / rearrange the release notes if needed. Breaking changes - and highlights should go to the top. - -* Push. - - ```console - $ git push --set-upstream $REMOTE release-notes - ``` - -* Create a PR for `release-notes`. - -* Wait for the PR to be merged. - -* Create a branch for the release: - - ```console - $ git checkout master - $ git pull - $ git checkout -b $VERSION-maintenance - ``` - -* Mark the release as official: - - ```console - $ sed -e 's/officialRelease = false;/officialRelease = true;/' -i flake.nix - $ sed -e '/rl-next.md/ d' -i doc/manual/src/SUMMARY.md - ``` - - This removes the link to `rl-next.md` from the manual and sets - `officialRelease = true` in `flake.nix`. - -* Commit - -* Push the release branch: - - ```console - $ git push --set-upstream origin $VERSION-maintenance - ``` - -* Create a jobset for the release branch on Hydra as follows: - - * Go to the jobset of the previous release - (e.g. https://hydra.nixos.org/jobset/nix/maintenance-2.11). - - * Select `Actions -> Clone this jobset`. - - * Set identifier to `maintenance-$VERSION`. - - * Set description to `$VERSION release branch`. - - * Set flake URL to `github:NixOS/nix/$VERSION-maintenance`. - - * Hit `Create jobset`. - -* Wait for the new jobset to evaluate and build. If impatient, go to - the evaluation and select `Actions -> Bump builds to front of - queue`. - -* When the jobset evaluation has succeeded building, take note of the - evaluation ID (e.g. `1780832` in - `https://hydra.nixos.org/eval/1780832`). - -* Tag the release and upload the release artifacts to - [`releases.nixos.org`](https://releases.nixos.org/) and [Docker Hub](https://hub.docker.com/): - - ```console - $ IS_LATEST=1 ./maintainers/upload-release.pl - ``` - - Note: `IS_LATEST=1` causes the `latest-release` branch to be - force-updated. This is used by the `nixos.org` website to get the - [latest Nix manual](https://nixos.org/manual/nixpkgs/unstable/). - - TODO: This script requires the right AWS credentials. Document. - - TODO: This script currently requires a - `/home/eelco/Dev/nix-pristine`. - - TODO: trigger nixos.org netlify: https://docs.netlify.com/configure-builds/build-hooks/ - -* Prepare for the next point release by editing `.version` to - e.g. - - ```console - $ echo 2.12.1 > .version - $ git commit -a -m 'Bump version' - $ git push - ``` - - Commit and push this to the maintenance branch. - -* Bump the version of `master`: - - ```console - $ git checkout master - $ git pull - $ NEW_VERSION=2.13.0 - $ echo $NEW_VERSION > .version - $ git checkout -b bump-$NEW_VERSION - $ git commit -a -m 'Bump version' - $ git push --set-upstream origin bump-$NEW_VERSION - ``` - - Make a pull request and auto-merge it. - -* Create a milestone for the next release, move all unresolved issues - from the previous milestone, and close the previous milestone. Set - the date for the next milestone 6 weeks from now. - -* Create a backport label. - -* Post an [announcement on Discourse](https://discourse.nixos.org/c/announcements/8), including the contents of - `rl-$VERSION.md`. - -## Creating a point release - -* Checkout. - - ```console - $ git checkout XX.YY-maintenance - ``` - -* Determine the next patch version. - - ```console - $ export VERSION=XX.YY.ZZ - ``` - -* Update release notes. - - ```console - $ ./maintainers/release-notes - ``` - -* Push. - - ```console - $ git push - ``` - -* Wait for the desired evaluation of the maintenance jobset to finish - building. - -* Run - - ```console - $ IS_LATEST=1 ./maintainers/upload-release.pl - ``` - - Omit `IS_LATEST=1` when creating a point release that is not on the - most recent stable branch. This prevents `nixos.org` to going back - to an older release. - -* Bump the version number of the release branch as above (e.g. to - `2.12.2`). - -## Recovering from mistakes - -`upload-release.pl` should be idempotent. For instance a wrong `IS_LATEST` value can be fixed that way, by running the script on the actual latest release. From 4eb6779ea8025610d6b2040362e5950c722e4342 Mon Sep 17 00:00:00 2001 From: Qyriad Date: Sun, 19 May 2024 12:32:13 -0600 Subject: [PATCH 12/24] fix -Wdeprecated-copy on clang (BaseError copy assignment) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit 2bbe3efd1¹ added the -Wdeprecated-copy warning, and fixed the instances of it which GCC warned about, in HintFmt and ref. However, when building with Clang, there is an additional deprecated-copy warning in BaseError. This commit explicitly defaults the copy assignment operator for BaseError and silences this warning. 1: 2bbe3efd169534f538184ff788eecb398ead70a4 Change-Id: I50aa4a7ab1a7aae5d7b31f765994abd3db06379d --- src/libutil/error.hh | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/libutil/error.hh b/src/libutil/error.hh index 924366580..323365d65 100644 --- a/src/libutil/error.hh +++ b/src/libutil/error.hh @@ -110,6 +110,8 @@ protected: public: BaseError(const BaseError &) = default; + BaseError & operator=(BaseError const & rhs) = default; + template BaseError(unsigned int status, const Args & ... args) : err { .level = lvlError, .msg = HintFmt(args...), .status = status } From a354779d786a850177407d7eb48b49827833ac58 Mon Sep 17 00:00:00 2001 From: Jade Lovelace Date: Sun, 19 May 2024 13:53:39 -0700 Subject: [PATCH 13/24] Remove upload-release.pl We are doing releases totally differently than Nix so this will need rewriting anyway. Change-Id: Iba4ad160b9d215fcbf20a14243fd87cfbb527760 --- maintainers/upload-release.pl | 256 ---------------------------------- 1 file changed, 256 deletions(-) delete mode 100755 maintainers/upload-release.pl diff --git a/maintainers/upload-release.pl b/maintainers/upload-release.pl deleted file mode 100755 index ebc536f12..000000000 --- a/maintainers/upload-release.pl +++ /dev/null @@ -1,256 +0,0 @@ -#! /usr/bin/env nix-shell -#! nix-shell -i perl -p perl perlPackages.LWPUserAgent perlPackages.LWPProtocolHttps perlPackages.FileSlurp perlPackages.NetAmazonS3 gnupg1 - -use strict; -use Data::Dumper; -use File::Basename; -use File::Path; -use File::Slurp; -use File::Copy; -use JSON::PP; -use LWP::UserAgent; -use Net::Amazon::S3; - -my $evalId = $ARGV[0] or die "Usage: $0 EVAL-ID\n"; - -my $releasesBucketName = "nix-releases"; -my $channelsBucketName = "nix-channels"; - -my $TMPDIR = $ENV{'TMPDIR'} // "/tmp"; - -my $isLatest = ($ENV{'IS_LATEST'} // "") eq "1"; - -# FIXME: cut&paste from nixos-channel-scripts. -sub fetch { - my ($url, $type) = @_; - - my $ua = LWP::UserAgent->new; - $ua->default_header('Accept', $type) if defined $type; - - my $response = $ua->get($url); - die "could not download $url: ", $response->status_line, "\n" unless $response->is_success; - - return $response->decoded_content; -} - -my $evalUrl = "https://hydra.nixos.org/eval/$evalId"; -my $evalInfo = decode_json(fetch($evalUrl, 'application/json')); -#print Dumper($evalInfo); -my $flakeUrl = $evalInfo->{flake} or die; -my $flakeInfo = decode_json(`nix flake metadata --json "$flakeUrl"` or die); -my $nixRev = $flakeInfo->{revision} or die; - -my $buildInfo = decode_json(fetch("$evalUrl/job/build.x86_64-linux", 'application/json')); -#print Dumper($buildInfo); - -my $releaseName = $buildInfo->{nixname}; -$releaseName =~ /nix-(.*)$/ or die; -my $version = $1; - -print STDERR "Flake URL is $flakeUrl, Nix revision is $nixRev, version is $version\n"; - -my $releaseDir = "nix/$releaseName"; - -my $tmpDir = "$TMPDIR/nix-release/$releaseName"; -File::Path::make_path($tmpDir); - -my $narCache = "$TMPDIR/nar-cache"; -File::Path::make_path($narCache); - -my $binaryCache = "https://cache.nixos.org/?local-nar-cache=$narCache"; - -# S3 setup. -my $aws_access_key_id = $ENV{'AWS_ACCESS_KEY_ID'} or die "No AWS_ACCESS_KEY_ID given."; -my $aws_secret_access_key = $ENV{'AWS_SECRET_ACCESS_KEY'} or die "No AWS_SECRET_ACCESS_KEY given."; - -my $s3 = Net::Amazon::S3->new( - { aws_access_key_id => $aws_access_key_id, - aws_secret_access_key => $aws_secret_access_key, - retry => 1, - host => "s3-eu-west-1.amazonaws.com", - }); - -my $releasesBucket = $s3->bucket($releasesBucketName) or die; - -my $s3_us = Net::Amazon::S3->new( - { aws_access_key_id => $aws_access_key_id, - aws_secret_access_key => $aws_secret_access_key, - retry => 1, - }); - -my $channelsBucket = $s3_us->bucket($channelsBucketName) or die; - -sub getStorePath { - my ($jobName, $output) = @_; - my $buildInfo = decode_json(fetch("$evalUrl/job/$jobName", 'application/json')); - return $buildInfo->{buildoutputs}->{$output or "out"}->{path} or die "cannot get store path for '$jobName'"; -} - -sub copyManual { - my $manual = getStorePath("build.x86_64-linux", "doc"); - print "$manual\n"; - - my $manualNar = "$tmpDir/$releaseName-manual.nar.xz"; - print "$manualNar\n"; - - unless (-e $manualNar) { - system("NIX_REMOTE=$binaryCache nix store dump-path '$manual' | xz > '$manualNar'.tmp") == 0 - or die "unable to fetch $manual\n"; - rename("$manualNar.tmp", $manualNar) or die; - } - - unless (-e "$tmpDir/manual") { - system("xz -d < '$manualNar' | nix-store --restore $tmpDir/manual.tmp") == 0 - or die "unable to unpack $manualNar\n"; - rename("$tmpDir/manual.tmp/share/doc/nix/manual", "$tmpDir/manual") or die; - system("rm -rf '$tmpDir/manual.tmp'") == 0 or die; - } - - system("aws s3 sync '$tmpDir/manual' s3://$releasesBucketName/$releaseDir/manual") == 0 - or die "syncing manual to S3\n"; -} - -copyManual; - -sub downloadFile { - my ($jobName, $productNr, $dstName) = @_; - - my $buildInfo = decode_json(fetch("$evalUrl/job/$jobName", 'application/json')); - #print STDERR "$jobName: ", Dumper($buildInfo), "\n"; - - my $srcFile = $buildInfo->{buildproducts}->{$productNr}->{path} or die "job '$jobName' lacks product $productNr\n"; - $dstName //= basename($srcFile); - my $tmpFile = "$tmpDir/$dstName"; - - if (!-e $tmpFile) { - print STDERR "downloading $srcFile to $tmpFile...\n"; - - my $fileInfo = decode_json(`NIX_REMOTE=$binaryCache nix store ls --json '$srcFile'`); - - $srcFile = $fileInfo->{target} if $fileInfo->{type} eq 'symlink'; - - #print STDERR $srcFile, " ", Dumper($fileInfo), "\n"; - - system("NIX_REMOTE=$binaryCache nix store cat '$srcFile' > '$tmpFile'.tmp") == 0 - or die "unable to fetch $srcFile\n"; - rename("$tmpFile.tmp", $tmpFile) or die; - } - - my $sha256_expected = $buildInfo->{buildproducts}->{$productNr}->{sha256hash}; - my $sha256_actual = `nix hash file --base16 --type sha256 '$tmpFile'`; - chomp $sha256_actual; - if (defined($sha256_expected) && $sha256_expected ne $sha256_actual) { - print STDERR "file $tmpFile is corrupt, got $sha256_actual, expected $sha256_expected\n"; - exit 1; - } - - write_file("$tmpFile.sha256", $sha256_actual); - - return $sha256_expected; -} - -downloadFile("binaryTarball.i686-linux", "1"); -downloadFile("binaryTarball.x86_64-linux", "1"); -downloadFile("binaryTarball.aarch64-linux", "1"); -downloadFile("binaryTarball.x86_64-darwin", "1"); -downloadFile("binaryTarball.aarch64-darwin", "1"); -downloadFile("binaryTarballCross.x86_64-linux.armv6l-linux", "1"); -downloadFile("binaryTarballCross.x86_64-linux.armv7l-linux", "1"); -downloadFile("installerScript", "1"); - -# Upload docker images to dockerhub. -my $dockerManifest = ""; -my $dockerManifestLatest = ""; - -for my $platforms (["x86_64-linux", "amd64"], ["aarch64-linux", "arm64"]) { - my $system = $platforms->[0]; - my $dockerPlatform = $platforms->[1]; - my $fn = "nix-$version-docker-image-$dockerPlatform.tar.gz"; - downloadFile("dockerImage.$system", "1", $fn); - - print STDERR "loading docker image for $dockerPlatform...\n"; - system("docker load -i $tmpDir/$fn") == 0 or die; - - my $tag = "nixos/nix:$version-$dockerPlatform"; - my $latestTag = "nixos/nix:latest-$dockerPlatform"; - - print STDERR "tagging $version docker image for $dockerPlatform...\n"; - system("docker tag nix:$version $tag") == 0 or die; - - if ($isLatest) { - print STDERR "tagging latest docker image for $dockerPlatform...\n"; - system("docker tag nix:$version $latestTag") == 0 or die; - } - - print STDERR "pushing $version docker image for $dockerPlatform...\n"; - system("docker push -q $tag") == 0 or die; - - if ($isLatest) { - print STDERR "pushing latest docker image for $dockerPlatform...\n"; - system("docker push -q $latestTag") == 0 or die; - } - - $dockerManifest .= " --amend $tag"; - $dockerManifestLatest .= " --amend $latestTag" -} - -print STDERR "creating multi-platform docker manifest...\n"; -system("docker manifest rm nixos/nix:$version"); -system("docker manifest create nixos/nix:$version $dockerManifest") == 0 or die; -if ($isLatest) { - print STDERR "creating latest multi-platform docker manifest...\n"; - system("docker manifest rm nixos/nix:latest"); - system("docker manifest create nixos/nix:latest $dockerManifestLatest") == 0 or die; -} - -print STDERR "pushing multi-platform docker manifest...\n"; -system("docker manifest push nixos/nix:$version") == 0 or die; - -if ($isLatest) { - print STDERR "pushing latest multi-platform docker manifest...\n"; - system("docker manifest push nixos/nix:latest") == 0 or die; -} - -# Upload nix-fallback-paths.nix. -write_file("$tmpDir/fallback-paths.nix", - "{\n" . - " x86_64-linux = \"" . getStorePath("build.x86_64-linux") . "\";\n" . - " i686-linux = \"" . getStorePath("build.i686-linux") . "\";\n" . - " aarch64-linux = \"" . getStorePath("build.aarch64-linux") . "\";\n" . - " x86_64-darwin = \"" . getStorePath("build.x86_64-darwin") . "\";\n" . - " aarch64-darwin = \"" . getStorePath("build.aarch64-darwin") . "\";\n" . - "}\n"); - -# Upload release files to S3. -for my $fn (glob "$tmpDir/*") { - my $name = basename($fn); - next if $name eq "manual"; - my $dstKey = "$releaseDir/" . $name; - unless (defined $releasesBucket->head_key($dstKey)) { - print STDERR "uploading $fn to s3://$releasesBucketName/$dstKey...\n"; - - my $configuration = (); - $configuration->{content_type} = "application/octet-stream"; - - if ($fn =~ /.sha256|install|\.nix$/) { - $configuration->{content_type} = "text/plain"; - } - - $releasesBucket->add_key_filename($dstKey, $fn, $configuration) - or die $releasesBucket->err . ": " . $releasesBucket->errstr; - } -} - -# Update the "latest" symlink. -$channelsBucket->add_key( - "nix-latest/install", "", - { "x-amz-website-redirect-location" => "https://releases.nixos.org/$releaseDir/install" }) - or die $channelsBucket->err . ": " . $channelsBucket->errstr - if $isLatest; - -# Tag the release in Git. -chdir("/home/eelco/Dev/nix-pristine") or die; -system("git remote update origin") == 0 or die; -system("git tag --force --sign $version $nixRev -m 'Tagging release $version'") == 0 or die; -system("git push --tags") == 0 or die; -system("git push --force-with-lease origin $nixRev:refs/heads/latest-release") == 0 or die if $isLatest; From 5411fbf20467c5dd561048311519056688b1154d Mon Sep 17 00:00:00 2001 From: Artemis Tosini Date: Sun, 19 May 2024 22:07:58 +0000 Subject: [PATCH 14/24] libutil: Create chmodPath function Move the identical static `chmod_` functions in libstore to libutil. the function is called `chmodPath` instead of `chmod` as otherwise it will shadow the standard library chmod in the nix namespace, which is somewhat confusing. Change-Id: I7b5ce379c6c602e3d3a1bbc49dbb70b1ae8f7bad --- src/libstore/build/derivation-goal.cc | 11 ++--------- src/libstore/build/local-derivation-goal.cc | 20 +++++++------------- src/libutil/util.cc | 8 ++++++-- src/libutil/util.hh | 7 +++++++ 4 files changed, 22 insertions(+), 24 deletions(-) diff --git a/src/libstore/build/derivation-goal.cc b/src/libstore/build/derivation-goal.cc index 3f24da276..5fa5deb7c 100644 --- a/src/libstore/build/derivation-goal.cc +++ b/src/libstore/build/derivation-goal.cc @@ -786,13 +786,6 @@ void DerivationGoal::tryLocalBuild() { } -static void chmod_(const Path & path, mode_t mode) -{ - if (chmod(path.c_str(), mode) == -1) - throw SysError("setting permissions on '%s'", path); -} - - /* Move/rename path 'src' to 'dst'. Temporarily make 'src' writable if it's a directory and we're not root (to be able to update the directory's parent link ".."). */ @@ -803,12 +796,12 @@ static void movePath(const Path & src, const Path & dst) bool changePerm = (geteuid() && S_ISDIR(st.st_mode) && !(st.st_mode & S_IWUSR)); if (changePerm) - chmod_(src, st.st_mode | S_IWUSR); + chmodPath(src, st.st_mode | S_IWUSR); renameFile(src, dst); if (changePerm) - chmod_(dst, st.st_mode); + chmodPath(dst, st.st_mode); } diff --git a/src/libstore/build/local-derivation-goal.cc b/src/libstore/build/local-derivation-goal.cc index cdbd0f5a7..5c36a3ac2 100644 --- a/src/libstore/build/local-derivation-goal.cc +++ b/src/libstore/build/local-derivation-goal.cc @@ -272,12 +272,6 @@ void LocalDerivationGoal::tryLocalBuild() started(); } -static void chmod_(const Path & path, mode_t mode) -{ - if (chmod(path.c_str(), mode) == -1) - throw SysError("setting permissions on '%s'", path); -} - /* Move/rename path 'src' to 'dst'. Temporarily make 'src' writable if it's a directory and we're not root (to be able to update the @@ -289,12 +283,12 @@ static void movePath(const Path & src, const Path & dst) bool changePerm = (geteuid() && S_ISDIR(st.st_mode) && !(st.st_mode & S_IWUSR)); if (changePerm) - chmod_(src, st.st_mode | S_IWUSR); + chmodPath(src, st.st_mode | S_IWUSR); renameFile(src, dst); if (changePerm) - chmod_(dst, st.st_mode); + chmodPath(dst, st.st_mode); } @@ -696,7 +690,7 @@ void LocalDerivationGoal::startBuilder() instead.) */ Path chrootTmpDir = chrootRootDir + "/tmp"; createDirs(chrootTmpDir); - chmod_(chrootTmpDir, 01777); + chmodPath(chrootTmpDir, 01777); /* Create a /etc/passwd with entries for the build user and the nobody account. The latter is kind of a hack to support @@ -721,7 +715,7 @@ void LocalDerivationGoal::startBuilder() build user. */ Path chrootStoreDir = chrootRootDir + worker.store.storeDir; createDirs(chrootStoreDir); - chmod_(chrootStoreDir, 01775); + chmodPath(chrootStoreDir, 01775); if (buildUser && chown(chrootStoreDir.c_str(), 0, buildUser->getGID()) == -1) throw SysError("cannot change ownership of '%1%'", chrootStoreDir); @@ -1862,7 +1856,7 @@ void LocalDerivationGoal::runChild() auto dst = chrootRootDir + i.first; createDirs(dirOf(dst)); writeFile(dst, std::string_view((const char *) sh, sizeof(sh))); - chmod_(dst, 0555); + chmodPath(dst, 0555); } else #endif doBind(i.second.source, chrootRootDir + i.first, i.second.optional); @@ -1900,7 +1894,7 @@ void LocalDerivationGoal::runChild() /* Make sure /dev/pts/ptmx is world-writable. With some Linux versions, it is created with permissions 0. */ - chmod_(chrootRootDir + "/dev/pts/ptmx", 0666); + chmodPath(chrootRootDir + "/dev/pts/ptmx", 0666); } else { if (errno != EINVAL) throw SysError("mounting /dev/pts"); @@ -1911,7 +1905,7 @@ void LocalDerivationGoal::runChild() /* Make /etc unwritable */ if (!parsedDrv->useUidRange()) - chmod_(chrootRootDir + "/etc", 0555); + chmodPath(chrootRootDir + "/etc", 0555); /* Unshare this mount namespace. This is necessary because pivot_root() below changes the root of the mount diff --git a/src/libutil/util.cc b/src/libutil/util.cc index bc2dd1802..2c0fcc897 100644 --- a/src/libutil/util.cc +++ b/src/libutil/util.cc @@ -184,6 +184,11 @@ Path canonPath(PathView path, bool resolveSymlinks) return s.empty() ? "/" : std::move(s); } +void chmodPath(const Path & path, mode_t mode) +{ + if (chmod(path.c_str(), mode) == -1) + throw SysError("setting permissions on '%s'", path); +} Path dirOf(const PathView path) { @@ -1799,8 +1804,7 @@ AutoCloseFD createUnixDomainSocket(const Path & path, mode_t mode) bind(fdSocket.get(), path); - if (chmod(path.c_str(), mode) == -1) - throw SysError("changing permissions on '%1%'", path); + chmodPath(path.c_str(), mode); if (listen(fdSocket.get(), 100) == -1) throw SysError("cannot listen on socket '%1%'", path); diff --git a/src/libutil/util.hh b/src/libutil/util.hh index 914d6cce0..14868776c 100644 --- a/src/libutil/util.hh +++ b/src/libutil/util.hh @@ -77,6 +77,13 @@ Path absPath(Path path, */ Path canonPath(PathView path, bool resolveSymlinks = false); +/** + * Change the permissions of a path + * Not called `chmod` as it shadows and could be confused with + * `int chmod(char *, mode_t)`, which does not handle errors + */ +void chmodPath(const Path & path, mode_t mode); + /** * @return The directory part of the given canonical path, i.e., * everything before the final `/`. If the path is the root or an From 40311973a835aceb866b7d4465c286c700a59ad4 Mon Sep 17 00:00:00 2001 From: Puck Meerburg Date: Sun, 19 May 2024 22:47:11 +0000 Subject: [PATCH 15/24] change-authors: add puck Change-Id: I04b8cd04a168b3adea7790f816e774d5d90fcea2 --- doc/manual/change-authors.yml | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/doc/manual/change-authors.yml b/doc/manual/change-authors.yml index 8c07d7e90..46164f5b1 100644 --- a/doc/manual/change-authors.yml +++ b/doc/manual/change-authors.yml @@ -66,3 +66,8 @@ midnightveil: display_name: julia forgejo: midnightveil github: midnightveil + +puck: + display_name: puck + forgejo: puck + github: puckipedia From bfb91db4f6a580b4f90a511a79185db4aa3f34d3 Mon Sep 17 00:00:00 2001 From: Puck Meerburg Date: Sun, 19 May 2024 14:16:11 +0000 Subject: [PATCH 16/24] repl-interacter: save history after entering every line Fixes: https://git.lix.systems/lix-project/lix/issues/328 Change-Id: Iedd79ff5f72e84766ebd234c63856170afc624f0 --- doc/manual/rl-next/repl-fix-history.md | 9 +++++++++ src/libcmd/repl-interacter.cc | 2 ++ 2 files changed, 11 insertions(+) create mode 100644 doc/manual/rl-next/repl-fix-history.md diff --git a/doc/manual/rl-next/repl-fix-history.md b/doc/manual/rl-next/repl-fix-history.md new file mode 100644 index 000000000..1517f68e7 --- /dev/null +++ b/doc/manual/rl-next/repl-fix-history.md @@ -0,0 +1,9 @@ +--- +synopsis: "`nix repl` history is saved more reliably" +cls: 1164 +credits: puck +--- + +`nix repl` now saves its history file after each line, rather than at the end +of the session; ensuring that it will remember what you typed even after it +crashes. diff --git a/src/libcmd/repl-interacter.cc b/src/libcmd/repl-interacter.cc index 829383add..d3567e021 100644 --- a/src/libcmd/repl-interacter.cc +++ b/src/libcmd/repl-interacter.cc @@ -175,6 +175,8 @@ bool ReadlineLikeInteracter::getLine(std::string & input, ReplPromptType promptT if (!s) return false; + + write_history(historyFile.c_str()); input += s; input += '\n'; return true; From 20981461d4a2a62c68f4bc7c4258473f7cd7d8e1 Mon Sep 17 00:00:00 2001 From: Qyriad Date: Tue, 21 May 2024 05:29:52 -0600 Subject: [PATCH 17/24] print type and value in "flake attr is not a derivation" errors MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This turns errors like: error: flake output attribute 'hydraJobs' is not a derivation or path into errors like: error: expected flake output attribute 'hydraJobs' to be a derivation or path but found a set: { binaryTarball = «thunk»; build = «thunk»; etc> } This change affects all InstallableFlake commands. Change-Id: I899757af418b6f98201006ec6ee13a448c07077c --- .../print-value-in-installable-flake-error.md | 20 +++++++++++++++++++ src/libcmd/installable-flake.cc | 9 +++++++-- 2 files changed, 27 insertions(+), 2 deletions(-) create mode 100644 doc/manual/rl-next/print-value-in-installable-flake-error.md diff --git a/doc/manual/rl-next/print-value-in-installable-flake-error.md b/doc/manual/rl-next/print-value-in-installable-flake-error.md new file mode 100644 index 000000000..ae23b4dda --- /dev/null +++ b/doc/manual/rl-next/print-value-in-installable-flake-error.md @@ -0,0 +1,20 @@ +--- +synopsis: New-cli flake commands that expect derivations now print the failing value and its type +credits: Qyriad +category: Improvements +cls: 1177 +--- + +In errors like `flake output attribute 'legacyPackages.x86_64-linux.lib' is not a derivation or path`, the message now includes the failing value and type. + +Before: + +``` + error: flake output attribute 'nixosConfigurations.yuki.config' is not a derivation or path +```` + +After: + +``` + error: expected flake output attribute 'nixosConfigurations.yuki.config' to be a derivation or path but found a set: { appstream = «thunk»; assertions = «thunk»; boot = { bcache = «thunk»; binfmt = «thunk»; binfmtMiscRegistrations = «thunk»; blacklistedKernelModules = «thunk»; bootMount = «thunk»; bootspec = «thunk»; cleanTmpDir = «thunk»; consoleLogLevel = «thunk»; «43 attributes elided» }; «48 attributes elided» } +``` diff --git a/src/libcmd/installable-flake.cc b/src/libcmd/installable-flake.cc index 615f70945..46bdd411b 100644 --- a/src/libcmd/installable-flake.cc +++ b/src/libcmd/installable-flake.cc @@ -105,9 +105,14 @@ DerivedPathsWithInfo InstallableFlake::toDerivedPaths() fmt("while evaluating the flake output attribute '%s'", attrPath))) { return { *derivedPathWithInfo }; + } else { + throw Error( + "expected flake output attribute '%s' to be a derivation or path but found %s: %s", + attrPath, + showType(v), + ValuePrinter(*this->state, v, errorPrintOptions) + ); } - else - throw Error("flake output attribute '%s' is not a derivation or path", attrPath); } auto drvPath = attr->forceDerivation(); From 6219596ab1adc97cb0b1e618385aeb91d51db8fc Mon Sep 17 00:00:00 2001 From: Qyriad Date: Sun, 19 May 2024 12:57:14 -0600 Subject: [PATCH 18/24] repl: log errors writing to history file These errors are now logged and explicitly ignored, rather than implicitly ignored. The simple logic is duplicated in ReadlineLikeInteractor's getLine() and destructor, rather than refactoring history writing into a separate function. This is a tiny amount of logic and these two locations are right next to each other. Change-Id: Ia26015466a17f2b11952df5317a4d150d79dc184 --- src/libcmd/repl-interacter.cc | 19 +++++++++++++++++-- 1 file changed, 17 insertions(+), 2 deletions(-) diff --git a/src/libcmd/repl-interacter.cc b/src/libcmd/repl-interacter.cc index d3567e021..843ebbbd2 100644 --- a/src/libcmd/repl-interacter.cc +++ b/src/libcmd/repl-interacter.cc @@ -1,6 +1,8 @@ #include #include #include +#include +#include #ifdef READLINE #include @@ -176,7 +178,14 @@ bool ReadlineLikeInteracter::getLine(std::string & input, ReplPromptType promptT if (!s) return false; - write_history(historyFile.c_str()); + if (write_history(historyFile.c_str()) != 0) { + // write_history returns the result of fclose() (which also flushes). + // We should explicitly ignore these errors, but log them so the user + // isn't confused why their history is getting eaten. + error_t const fcloseErr = errno; + std::string_view const errMsg(strerror(fcloseErr)); + warn("ignoring error writing repl history to %s: %s", historyFile, errMsg); + } input += s; input += '\n'; return true; @@ -184,7 +193,13 @@ bool ReadlineLikeInteracter::getLine(std::string & input, ReplPromptType promptT ReadlineLikeInteracter::~ReadlineLikeInteracter() { - write_history(historyFile.c_str()); + if (write_history(historyFile.c_str()) != 0) { + // write_history returns the result of fclose() (which also flushes). + // We should explicitly ignore these errors, but log them. + error_t const fcloseErr = errno; + std::string_view const errMsg(strerror(fcloseErr)); + warn("ignoring error writing repl history to %s: %s", historyFile, errMsg); + } } AutomationInteracter::Guard AutomationInteracter::init(detail::ReplCompleterMixin *) From 25f390963c94bc062c6a8416c2e586771dc76c26 Mon Sep 17 00:00:00 2001 From: Qyriad Date: Mon, 20 May 2024 13:16:09 -0600 Subject: [PATCH 19/24] add docstring to lookupFileArg Change-Id: Ifc149764f5a15725d3d630677c6da29def4b0f3e --- src/libcmd/common-eval-args.hh | 22 ++++++++++++- tests/unit/libcmd/args.cc | 57 ++++++++++++++++++++++++++++++++++ tests/unit/meson.build | 27 ++++++++++++++++ 3 files changed, 105 insertions(+), 1 deletion(-) create mode 100644 tests/unit/libcmd/args.cc diff --git a/src/libcmd/common-eval-args.hh b/src/libcmd/common-eval-args.hh index 6359b2579..08a4b65e4 100644 --- a/src/libcmd/common-eval-args.hh +++ b/src/libcmd/common-eval-args.hh @@ -28,6 +28,26 @@ private: std::map autoArgs; }; -SourcePath lookupFileArg(EvalState & state, std::string_view s); +/** @brief Resolve an argument that is generally a file, but could be something that + * is easy to resolve to a file, like a or a tarball URL. + * + * In particular, this will resolve and fetch pseudo-URLs starting with + * @c channel:, flakerefs starting with @c flake:, and anything that + * @ref nix::fetchers::downloadTarball() can take. + * + * Non-absolute files are looked up relative to the current directory(?) + * FIXME: the process's current directory or EvalState's current directory? + * + * @param state The nix::EvalState to base settings, store, and nixPath from. + * + * @param fileArg The the path-ish to resolve. + * + * @return A nix::SourcePath to the resolved and fetched file. + * + * @exception nix::FileTransferError from nix::fetchers::downloadTarball(). Probably others. + * + * @exception nix::ThrownError for failed search path lookup. Probably others. + */ +SourcePath lookupFileArg(EvalState & state, std::string_view fileArg); } diff --git a/tests/unit/libcmd/args.cc b/tests/unit/libcmd/args.cc new file mode 100644 index 000000000..da7ad598e --- /dev/null +++ b/tests/unit/libcmd/args.cc @@ -0,0 +1,57 @@ +#include +#include +#include + +#include +#include + +#include "common-eval-args.hh" +#include "eval.hh" +#include "filetransfer.hh" +#include "shared.hh" +#include "store-api.hh" +#include "util.hh" + +constexpr std::string_view INVALID_CHANNEL = "channel:example"; +constexpr std::string_view CHANNEL_URL = "https://nixos.org/channels/example/nixexprs.tar.xz"; + +namespace nix +{ + +TEST(Arguments, lookupFileArg) { + initNix(); + initGC(); + + std::string const unitDataPath = getEnv("_NIX_TEST_UNIT_DATA").value(); + // Meson should be allowed to pass us a relative path here tbh. + auto const canonDataPath = CanonPath::fromCwd(unitDataPath); + + std::string const searchPathElem = fmt("example=%s", unitDataPath); + + SearchPath searchPath; + searchPath.elements.push_back(SearchPath::Elem::parse(searchPathElem)); + + auto store = openStore("dummy://"); + auto statePtr = std::make_shared(searchPath, store, store); + auto & state = *statePtr; + + SourcePath const foundUnitData = lookupFileArg(state, ""); + EXPECT_EQ(foundUnitData.path, canonDataPath); + + // lookupFileArg should not resolve if anything else is before or after it. + SourcePath const yepEvenSpaces = lookupFileArg(state, " "); + EXPECT_EQ(yepEvenSpaces.path, CanonPath::fromCwd(" ")); + EXPECT_EQ(lookupFileArg(state, "/nixos").path, CanonPath::fromCwd("/nixos")); + + try { + lookupFileArg(state, INVALID_CHANNEL); + } catch (FileTransferError const & ex) { + std::string_view msg(ex.what()); + EXPECT_NE(msg.find(CHANNEL_URL), msg.npos); + } + + SourcePath const normalFile = lookupFileArg(state, unitDataPath); + EXPECT_EQ(normalFile.path, canonDataPath); +} + +} diff --git a/tests/unit/meson.build b/tests/unit/meson.build index f5355cce8..c53677d48 100644 --- a/tests/unit/meson.build +++ b/tests/unit/meson.build @@ -212,3 +212,30 @@ test( protocol : 'gtest', verbose : true, ) + +libcmd_tester = executable( + 'liblixcmd-tests', + files('libcmd/args.cc'), + dependencies : [ + liblixcmd, + liblixutil, + liblixmain, + liblixexpr, + liblixstore, + gtest, + boost, + ], +) + +test( + 'libcmd-eval-args', + libcmd_tester, + args : [tests_args, 'TestEvalArgs'], + env : { + # No special meaning here, it's just a file laying around that is unlikely to go anywhere + # any time soon. + '_NIX_TEST_UNIT_DATA': meson.project_source_root() / 'src/nix-env/buildenv.nix', + }, + suite : 'check', + protocol : 'gtest', +) From 1f071faab17abb91dba3e2cbfd332f03bcf6f678 Mon Sep 17 00:00:00 2001 From: Qyriad Date: Mon, 20 May 2024 13:51:42 -0600 Subject: [PATCH 20/24] cleanup lookupFileArg Change-Id: I2acd56e7a542b12138f43c95af78fdd50e944619 --- src/libcmd/common-eval-args.cc | 32 +++++++++++++++++--------------- 1 file changed, 17 insertions(+), 15 deletions(-) diff --git a/src/libcmd/common-eval-args.cc b/src/libcmd/common-eval-args.cc index 94a4b7922..9beea5aa2 100644 --- a/src/libcmd/common-eval-args.cc +++ b/src/libcmd/common-eval-args.cc @@ -164,28 +164,30 @@ Bindings * MixEvalArgs::getAutoArgs(EvalState & state) return res.finish(); } -SourcePath lookupFileArg(EvalState & state, std::string_view s) +SourcePath lookupFileArg(EvalState & state, std::string_view fileArg) { - if (EvalSettings::isPseudoUrl(s)) { - auto storePath = fetchers::downloadTarball( - state.store, EvalSettings::resolvePseudoUrl(s), "source", false).tree.storePath; + if (EvalSettings::isPseudoUrl(fileArg)) { + auto const url = EvalSettings::resolvePseudoUrl(fileArg); + auto const downloaded = fetchers::downloadTarball( + state.store, + url, + /* name */ "source", + /* locked */ false + ); + StorePath const storePath = downloaded.tree.storePath; return state.rootPath(CanonPath(state.store->toRealPath(storePath))); - } - - else if (s.starts_with("flake:")) { + } else if (fileArg.starts_with("flake:")) { experimentalFeatureSettings.require(Xp::Flakes); - auto flakeRef = parseFlakeRef(std::string(s.substr(6)), {}, true, false); + static constexpr size_t FLAKE_LEN = std::string_view("flake:").size(); + auto flakeRef = parseFlakeRef(std::string(fileArg.substr(FLAKE_LEN)), {}, true, false); auto storePath = flakeRef.resolve(state.store).fetchTree(state.store).first.storePath; return state.rootPath(CanonPath(state.store->toRealPath(storePath))); - } - - else if (s.size() > 2 && s.at(0) == '<' && s.at(s.size() - 1) == '>') { - Path p(s.substr(1, s.size() - 2)); + } else if (fileArg.size() > 2 && fileArg.at(0) == '<' && fileArg.at(fileArg.size() - 1) == '>') { + Path p(fileArg.substr(1, fileArg.size() - 2)); return state.findFile(p); + } else { + return state.rootPath(CanonPath::fromCwd(fileArg)); } - - else - return state.rootPath(CanonPath::fromCwd(s)); } } From f4ce7194a904e34d5008279bd1502a2a0cc328ca Mon Sep 17 00:00:00 2001 From: Qyriad Date: Mon, 20 May 2024 18:25:12 -0600 Subject: [PATCH 21/24] WIP: add TestingStore store implementation Change-Id: I1e57895030665fb63d1c47cb4fc2b959b5761618 --- src/libstore/meson.build | 1 + src/libstore/testing-store.cc | 217 ++++++++++++++++++++++++++++++++++ 2 files changed, 218 insertions(+) create mode 100644 src/libstore/testing-store.cc diff --git a/src/libstore/meson.build b/src/libstore/meson.build index 5fde92dd0..125a19bc9 100644 --- a/src/libstore/meson.build +++ b/src/libstore/meson.build @@ -40,6 +40,7 @@ libstore_sources = files( 'derived-path.cc', 'downstream-placeholder.cc', 'dummy-store.cc', + 'testing-store.cc', 'export-import.cc', 'filetransfer.cc', 'gc.cc', diff --git a/src/libstore/testing-store.cc b/src/libstore/testing-store.cc new file mode 100644 index 000000000..64f1e3e8d --- /dev/null +++ b/src/libstore/testing-store.cc @@ -0,0 +1,217 @@ +#include +#include +#include +#include + +#include + +#include "content-address.hh" +#include "hash.hh" +#include "path-info.hh" +#include "store-api.hh" + +namespace nix +{ + +struct TestingStoreConfig : virtual StoreConfig +{ + using StoreConfig::StoreConfig; + + std::string const name() override + { + return "Testing Store"; + } + + std::string doc() override + { + return ""; + } +}; + +/** Like DummyStore, but keeps some track of what it's done and stubs rather than fails unsupported operations. + * + * Don't expect any operations on this to be at all fast lmao. + */ +struct TestingStore : public virtual TestingStoreConfig, public virtual Store +{ + struct AddedPath + { + StorePath storePath; + ValidPathInfo pathInfo; + }; + + struct AddedText + { + std::string name; + std::string text; + StorePath storePath; + }; + + static std::set uriSchemes() + { + return {"testing"}; + } + + std::vector addedPaths; + std::vector addedText; + + TestingStore(std::string const scheme, std::string const uri, Params const & params) + : TestingStore(params) + { } + + TestingStore(Params const & params) + : StoreConfig(params) + , TestingStoreConfig(params) + , Store(params) + { } + + virtual std::optional isTrustedClient() override + { + return Trusted; + } + + std::string getUri() override + { + return *this->uriSchemes().begin(); + } + + void addToStore( + ValidPathInfo const & info, + Source & source, + RepairFlag repair, + CheckSigsFlag checkSigs + ) override + { + notice("testing store: adding path '%s'", this->printStorePath(info.path)); + AddedPath added{ + .storePath = info.path, + .pathInfo = info, + }; + + this->addedPaths.push_back(added); + } + + virtual StorePath addToStoreFromDump( + Source & dump, + std::string_view name, + FileIngestionMethod method = FileIngestionMethod::Recursive, + HashType hashAlgo = htSHA256, + RepairFlag repair = NoRepair, + StorePathSet const & references = {} + ) override + { + notice("testing store: adding path from Source %s", boost::core::demangle(typeid(dump).name())); + HashSink hashSink(hashAlgo); + TeeSource tee(dump, hashSink); + + static constexpr uint64_t BUFSIZE = 32ULL * 1024ULL * 1024ULL; + + std::vector all; + char buffer[4096] = {0}; + size_t got = tee.read(buffer, BUFSIZE); + while (got > 0) { + for (size_t i = 0; i < got; i++) { + all.push_back(buffer[i]); + } + got = tee.read(buffer, 4096); + } + + auto const [hash, size] = hashSink.finish(); + + ContentAddressWithReferences caDesc(FixedOutputInfo{ + .method = method, + .hash = hash, + .references = { + .others = references, + .self = false, + }, + }); + + StorePath const dstPath = makeFixedOutputPathFromCA(name, caDesc); + + HashSink narSink(htSHA256); + + ValidPathInfo pathInfo{ + *this, + name, + std::move(caDesc), + Hash(htSHA256), + }; + + AddedPath added{ + .storePath = dstPath, + .pathInfo = pathInfo, + }; + + this->addedPaths.push_back(added); + + return dstPath; + } + + StorePath addTextToStore( + std::string_view name, + std::string_view text, + StorePathSet const & references, + [[maybe_unused]] RepairFlag repair + ) override + { + auto const hash = hashString(htSHA256, text); + StorePath const dstPath = makeTextPath(name, TextInfo { + .hash = hash, + .references = references, + }); + + AddedText added{ + .name = std::string(name), + .text = std::string(text), + .storePath = dstPath, + }; + + this->addedText.push_back(added); + + return dstPath; + } + + void narFromPath(StorePath const & path, Sink & sink) override + { + this->unsupported("narFromPath"); + } + + std::optional queryPathFromHashPart(std::string const & hashPart) override + { + for (auto const & addedPath : this->addedPaths) { + if (addedPath.storePath.hashPart() == hashPart) { + return std::make_optional(addedPath.storePath); + } + } + + return std::nullopt; + } + + std::shared_ptr queryPathInfoUncached(StorePath const & path) override + { + for (auto const & addedPath : this->addedPaths) { + if (addedPath.storePath == path) { + return std::shared_ptr(&addedPath.pathInfo); + } + } + + return nullptr; + } + + std::shared_ptr queryRealisationUncached(DrvOutput const &) override + { + this->unsupported("queryRealisationUncached"); + // return nullptr; + } + + virtual ref getFSAccessor() override + { + this->unsupported("getFSAccessor"); + } + +}; + +static RegisterStoreImplementation regTestingStore; + +} From 46cb407f21912db0663579fe94e3c0f8fd11dd73 Mon Sep 17 00:00:00 2001 From: Qyriad Date: Mon, 20 May 2024 20:54:00 -0600 Subject: [PATCH 22/24] build: make internal-api-docs PHONEY Change-Id: I3b0bcea30ee9a4830023ccc5bededf995e96cccc --- doc/internal-api/meson.build | 1 + 1 file changed, 1 insertion(+) diff --git a/doc/internal-api/meson.build b/doc/internal-api/meson.build index 35d8a0e5b..faa30f194 100644 --- a/doc/internal-api/meson.build +++ b/doc/internal-api/meson.build @@ -28,6 +28,7 @@ internal_api_docs = custom_target( output : 'html', install : true, install_dir : datadir / 'doc/nix/internal-api', + build_always_stale : true, ) alias_target('internal-api-html', internal_api_docs) From 380cf45cb6e4f73f583b2691f6b9392d9d0832d5 Mon Sep 17 00:00:00 2001 From: Qyriad Date: Mon, 20 May 2024 21:58:27 -0600 Subject: [PATCH 23/24] docs: linkify nix3-build mention in nix-build.md Change-Id: I462a8cf0da42b5045ce84b48dc1841ecdccbb89e --- doc/manual/src/command-ref/nix-build.md | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/doc/manual/src/command-ref/nix-build.md b/doc/manual/src/command-ref/nix-build.md index b548edf82..92e642ddd 100644 --- a/doc/manual/src/command-ref/nix-build.md +++ b/doc/manual/src/command-ref/nix-build.md @@ -14,9 +14,8 @@ # Disambiguation -This man page describes the command `nix-build`, which is distinct from `nix -build`. For documentation on the latter, run `nix build --help` or see `man -nix3-build`. +This man page describes the command [`nix-build`](./new-cli/nix3-build.md), which is distinct from `nix build`. +For documentation on the latter, run `nix build --help` or see `man nix3-build`. # Description From f936eabbc381b61631e86a7e8ffe0fd7d4779dae Mon Sep 17 00:00:00 2001 From: Qyriad Date: Mon, 20 May 2024 22:21:14 -0600 Subject: [PATCH 24/24] docs: document lookupFileArg syntax in nix-build Change-Id: Ib6d68594a16132805ba5d97526e16f7b3633117e --- doc/manual/src/command-ref/nix-build.md | 38 ++++++++++++++++++++++--- 1 file changed, 34 insertions(+), 4 deletions(-) diff --git a/doc/manual/src/command-ref/nix-build.md b/doc/manual/src/command-ref/nix-build.md index 92e642ddd..5baa86e47 100644 --- a/doc/manual/src/command-ref/nix-build.md +++ b/doc/manual/src/command-ref/nix-build.md @@ -29,10 +29,40 @@ created (`result`, `result-2`, and so on). If no *paths* are specified, then `nix-build` will use `default.nix` in the current directory, if it exists. -If an element of *paths* starts with `http://` or `https://`, it is -interpreted as the URL of a tarball that will be downloaded and unpacked -to a temporary location. The tarball must include a single top-level -directory containing at least a file named `default.nix`. +## Path Syntax + +A given element of *paths* may take one of a few different forms, the first being a simple filesystem path, e.g. `nix-build /tmp/some-file.nix`. +It may also be a [search path](./env-common.html#env-NIX_PATH) like ``, which is convenient to use with `--attr`/`-A`: + +```console +$ nix-build '' -A firefox +``` + +(Note the quotation marks around ``, which will be necessary in most Unix shells.) + +If an element of *paths* starts with `http://` or `https://`, it is interpreted as the URL of a tarball which will be fetched and unpacked. +Lix will then `import` the unpacked directory, so these tarballs must include at least single top-level directory with a file called `default.nix` +For example, you could build from a specific version of Nixpkgs with something like: + +```console +$ nix-build "https://github.com/NixOS/nixpkgs/archive/refs/heads/release-23.11.tar.gz" -A firefox +``` + +If a path starts with `flake:`, the rest of the argument is interpreted as a [flakeref](./new-cli/nix3-flake.html#flake-references) (see `nix flake --help` or `man nix3-flake`), which requires the "flakes" experimental feature to be enabled. +Lix will fetch the flake, and then `import` its unpacked directory, so the flake must include a file called `default.nix`. +For example, the flake analogue to the above `nix-build` commands are: + +```console +$ nix-build flake:nixpkgs -A firefox +$ nix-build flake:github:NixOS/nixpkgs/release-23.11 -A firefox +``` + +Finally, for legacy reasons, if a path starts with `channel:`, the rest of the argument is interpreted as the name of a channel to fetch from `https://nixos.org/channels/$CHANNEL_NAME/nixexprs.tar.xz`. + +> **NOTE:**: any of the special syntaxes may always be disambiguated by prefixing the path. +> For example: a file in the current directory called `'` can be addressed as `./`, to escape the special interpretation. + +## Notes `nix-build` is essentially a wrapper around [`nix-instantiate`](nix-instantiate.md) (to translate a high-level Nix