forked from lix-project/lix
Merge branch 'small-storePath-cleanups' into path-info
This commit is contained in:
commit
adb3608034
2
.github/PULL_REQUEST_TEMPLATE.md
vendored
2
.github/PULL_REQUEST_TEMPLATE.md
vendored
|
@ -21,7 +21,7 @@ Maintainers: tick if completed or explain if not relevant
|
|||
- [ ] tests, as appropriate
|
||||
- functional tests - `tests/**.sh`
|
||||
- unit tests - `src/*/tests`
|
||||
- integration tests
|
||||
- integration tests - `tests/nixos/*`
|
||||
- [ ] documentation in the manual
|
||||
- [ ] code and comments are self-explanatory
|
||||
- [ ] commit message explains why the change was made
|
||||
|
|
2
.github/workflows/backport.yml
vendored
2
.github/workflows/backport.yml
vendored
|
@ -21,7 +21,7 @@ jobs:
|
|||
fetch-depth: 0
|
||||
- name: Create backport PRs
|
||||
# should be kept in sync with `version`
|
||||
uses: zeebe-io/backport-action@v1.0.1
|
||||
uses: zeebe-io/backport-action@v1.1.0
|
||||
with:
|
||||
# Config README: https://github.com/zeebe-io/backport-action#backport-action
|
||||
github_token: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
|
77
boehmgc-coroutine-sp-fallback.diff
Normal file
77
boehmgc-coroutine-sp-fallback.diff
Normal file
|
@ -0,0 +1,77 @@
|
|||
diff --git a/darwin_stop_world.c b/darwin_stop_world.c
|
||||
index 3dbaa3fb..36a1d1f7 100644
|
||||
--- a/darwin_stop_world.c
|
||||
+++ b/darwin_stop_world.c
|
||||
@@ -352,6 +352,7 @@ GC_INNER void GC_push_all_stacks(void)
|
||||
int nthreads = 0;
|
||||
word total_size = 0;
|
||||
mach_msg_type_number_t listcount = (mach_msg_type_number_t)THREAD_TABLE_SZ;
|
||||
+ size_t stack_limit;
|
||||
if (!EXPECT(GC_thr_initialized, TRUE))
|
||||
GC_thr_init();
|
||||
|
||||
@@ -407,6 +408,19 @@ GC_INNER void GC_push_all_stacks(void)
|
||||
GC_push_all_stack_sections(lo, hi, p->traced_stack_sect);
|
||||
}
|
||||
if (altstack_lo) {
|
||||
+ // When a thread goes into a coroutine, we lose its original sp until
|
||||
+ // control flow returns to the thread.
|
||||
+ // While in the coroutine, the sp points outside the thread stack,
|
||||
+ // so we can detect this and push the entire thread stack instead,
|
||||
+ // as an approximation.
|
||||
+ // We assume that the coroutine has similarly added its entire stack.
|
||||
+ // This could be made accurate by cooperating with the application
|
||||
+ // via new functions and/or callbacks.
|
||||
+ stack_limit = pthread_get_stacksize_np(p->id);
|
||||
+ if (altstack_lo >= altstack_hi || altstack_lo < altstack_hi - stack_limit) { // sp outside stack
|
||||
+ altstack_lo = altstack_hi - stack_limit;
|
||||
+ }
|
||||
+
|
||||
total_size += altstack_hi - altstack_lo;
|
||||
GC_push_all_stack(altstack_lo, altstack_hi);
|
||||
}
|
||||
diff --git a/pthread_stop_world.c b/pthread_stop_world.c
|
||||
index b5d71e62..aed7b0bf 100644
|
||||
--- a/pthread_stop_world.c
|
||||
+++ b/pthread_stop_world.c
|
||||
@@ -768,6 +768,8 @@ STATIC void GC_restart_handler(int sig)
|
||||
/* world is stopped. Should not fail if it isn't. */
|
||||
GC_INNER void GC_push_all_stacks(void)
|
||||
{
|
||||
+ size_t stack_limit;
|
||||
+ pthread_attr_t pattr;
|
||||
GC_bool found_me = FALSE;
|
||||
size_t nthreads = 0;
|
||||
int i;
|
||||
@@ -851,6 +853,31 @@ GC_INNER void GC_push_all_stacks(void)
|
||||
hi = p->altstack + p->altstack_size;
|
||||
/* FIXME: Need to scan the normal stack too, but how ? */
|
||||
/* FIXME: Assume stack grows down */
|
||||
+ } else {
|
||||
+ if (pthread_getattr_np(p->id, &pattr)) {
|
||||
+ ABORT("GC_push_all_stacks: pthread_getattr_np failed!");
|
||||
+ }
|
||||
+ if (pthread_attr_getstacksize(&pattr, &stack_limit)) {
|
||||
+ ABORT("GC_push_all_stacks: pthread_attr_getstacksize failed!");
|
||||
+ }
|
||||
+ if (pthread_attr_destroy(&pattr)) {
|
||||
+ ABORT("GC_push_all_stacks: pthread_attr_destroy failed!");
|
||||
+ }
|
||||
+ // When a thread goes into a coroutine, we lose its original sp until
|
||||
+ // control flow returns to the thread.
|
||||
+ // While in the coroutine, the sp points outside the thread stack,
|
||||
+ // so we can detect this and push the entire thread stack instead,
|
||||
+ // as an approximation.
|
||||
+ // We assume that the coroutine has similarly added its entire stack.
|
||||
+ // This could be made accurate by cooperating with the application
|
||||
+ // via new functions and/or callbacks.
|
||||
+ #ifndef STACK_GROWS_UP
|
||||
+ if (lo >= hi || lo < hi - stack_limit) { // sp outside stack
|
||||
+ lo = hi - stack_limit;
|
||||
+ }
|
||||
+ #else
|
||||
+ #error "STACK_GROWS_UP not supported in boost_coroutine2 (as of june 2021), so we don't support it in Nix."
|
||||
+ #endif
|
||||
}
|
||||
GC_push_all_stack_sections(lo, hi, traced_stack_sect);
|
||||
# ifdef STACK_GROWS_UP
|
|
@ -82,8 +82,8 @@ paths. Realisation is a somewhat overloaded term:
|
|||
produced through substitutes. If there are no (successful)
|
||||
substitutes, realisation fails.
|
||||
|
||||
[valid]: ../glossary.md#validity
|
||||
[substitutes]: ../glossary.md#substitute
|
||||
[valid]: ../glossary.md#gloss-validity
|
||||
[substitutes]: ../glossary.md#gloss-substitute
|
||||
|
||||
The output path of each derivation is printed on standard output. (For
|
||||
non-derivations argument, the argument itself is printed.)
|
||||
|
|
|
@ -156,6 +156,8 @@
|
|||
to path `Q`, then `Q` is in the closure of `P`. Further, if `Q`
|
||||
references `R` then `R` is also in the closure of `P`.
|
||||
|
||||
[closure]: #gloss-closure
|
||||
|
||||
- [output path]{#gloss-output-path}\
|
||||
A [store path] produced by a [derivation].
|
||||
|
||||
|
@ -172,6 +174,8 @@
|
|||
- The store path is listed in the Nix database as being valid.
|
||||
- All paths in the store path's [closure] are valid.
|
||||
|
||||
[validity]: #gloss-validity
|
||||
|
||||
- [user environment]{#gloss-user-env}\
|
||||
An automatically generated store object that consists of a set of
|
||||
symlinks to “active” applications, i.e., other store paths. These
|
||||
|
|
|
@ -212,7 +212,7 @@ Derivations can declare some infrequently used optional attributes.
|
|||
If this **experimental** attribute is set to true, then the derivation
|
||||
outputs will be stored in a content-addressed location rather than the
|
||||
traditional input-addressed one.
|
||||
This only has an effect if the `ca-derivation` experimental feature is enabled.
|
||||
This only has an effect if the `ca-derivations` experimental feature is enabled.
|
||||
|
||||
Setting this attribute also requires setting `outputHashMode` and `outputHashAlgo` like for *fixed-output derivations* (see above).
|
||||
|
||||
|
|
|
@ -25,11 +25,11 @@
|
|||
* Allow explicitly selecting outputs in a store derivation installable, just like we can do with other sorts of installables.
|
||||
For example,
|
||||
```shell-session
|
||||
# nix-build /nix/store/gzaflydcr6sb3567hap9q6srzx8ggdgg-glibc-2.33-78.drv^dev
|
||||
# nix build /nix/store/gzaflydcr6sb3567hap9q6srzx8ggdgg-glibc-2.33-78.drv^dev
|
||||
```
|
||||
now works just as
|
||||
```shell-session
|
||||
# nix-build glibc^dev
|
||||
# nix build nixpkgs#glibc^dev
|
||||
```
|
||||
does already.
|
||||
|
||||
|
|
|
@ -8,3 +8,15 @@
|
|||
discovered by making multiple syscalls. This change makes these operations
|
||||
lazy such that these lookups will only be performed if the attribute is used.
|
||||
This optimization affects a minority of filesystems and operating systems.
|
||||
|
||||
* In derivations that use structured attributes, you can now use `unsafeDiscardReferences`
|
||||
to disable scanning a given output for runtime dependencies:
|
||||
```nix
|
||||
__structuredAttrs = true;
|
||||
unsafeDiscardReferences.out = true;
|
||||
```
|
||||
This is useful e.g. when generating self-contained filesystem images with
|
||||
their own embedded Nix store: hashes found inside such an image refer
|
||||
to the embedded store and not to the host's Nix store.
|
||||
|
||||
This requires the `discard-references` experimental feature.
|
||||
|
|
73
flake.nix
73
flake.nix
|
@ -131,9 +131,14 @@
|
|||
});
|
||||
|
||||
propagatedDeps =
|
||||
[ (boehmgc.override {
|
||||
[ ((boehmgc.override {
|
||||
enableLargeConfig = true;
|
||||
}).overrideAttrs(o: {
|
||||
patches = (o.patches or []) ++ [
|
||||
./boehmgc-coroutine-sp-fallback.diff
|
||||
];
|
||||
})
|
||||
)
|
||||
nlohmann_json
|
||||
];
|
||||
};
|
||||
|
@ -404,6 +409,18 @@
|
|||
};
|
||||
};
|
||||
|
||||
nixos-lib = import (nixpkgs + "/nixos/lib") { };
|
||||
|
||||
# https://nixos.org/manual/nixos/unstable/index.html#sec-calling-nixos-tests
|
||||
runNixOSTestFor = system: test: nixos-lib.runTest {
|
||||
imports = [ test ];
|
||||
hostPkgs = nixpkgsFor.${system};
|
||||
defaults = {
|
||||
nixpkgs.pkgs = nixpkgsFor.${system};
|
||||
};
|
||||
_module.args.nixpkgs = nixpkgs;
|
||||
};
|
||||
|
||||
in {
|
||||
|
||||
# A Nixpkgs overlay that overrides the 'nix' and
|
||||
|
@ -460,6 +477,10 @@
|
|||
|
||||
src = self;
|
||||
|
||||
configureFlags = [
|
||||
"CXXFLAGS=-I${lib.getDev pkgs.rapidcheck}/extras/gtest/include"
|
||||
];
|
||||
|
||||
enableParallelBuilding = true;
|
||||
|
||||
nativeBuildInputs = nativeBuildDeps;
|
||||
|
@ -478,49 +499,22 @@
|
|||
};
|
||||
|
||||
# System tests.
|
||||
tests.remoteBuilds = import ./tests/remote-builds.nix {
|
||||
system = "x86_64-linux";
|
||||
inherit nixpkgs;
|
||||
overlay = self.overlays.default;
|
||||
};
|
||||
tests.remoteBuilds = runNixOSTestFor "x86_64-linux" ./tests/nixos/remote-builds.nix;
|
||||
|
||||
tests.nix-copy-closure = import ./tests/nix-copy-closure.nix {
|
||||
system = "x86_64-linux";
|
||||
inherit nixpkgs;
|
||||
overlay = self.overlays.default;
|
||||
};
|
||||
tests.nix-copy-closure = runNixOSTestFor "x86_64-linux" ./tests/nixos/nix-copy-closure.nix;
|
||||
|
||||
tests.nssPreload = (import ./tests/nss-preload.nix rec {
|
||||
system = "x86_64-linux";
|
||||
inherit nixpkgs;
|
||||
overlay = self.overlays.default;
|
||||
});
|
||||
tests.nssPreload = runNixOSTestFor "x86_64-linux" ./tests/nixos/nss-preload.nix;
|
||||
|
||||
tests.githubFlakes = (import ./tests/github-flakes.nix rec {
|
||||
system = "x86_64-linux";
|
||||
inherit nixpkgs;
|
||||
overlay = self.overlays.default;
|
||||
});
|
||||
tests.githubFlakes = runNixOSTestFor "x86_64-linux" ./tests/nixos/github-flakes.nix;
|
||||
|
||||
tests.sourcehutFlakes = (import ./tests/sourcehut-flakes.nix rec {
|
||||
system = "x86_64-linux";
|
||||
inherit nixpkgs;
|
||||
overlay = self.overlays.default;
|
||||
});
|
||||
tests.sourcehutFlakes = runNixOSTestFor "x86_64-linux" ./tests/nixos/sourcehut-flakes.nix;
|
||||
|
||||
tests.containers = (import ./tests/containers.nix rec {
|
||||
system = "x86_64-linux";
|
||||
inherit nixpkgs;
|
||||
overlay = self.overlays.default;
|
||||
});
|
||||
tests.containers = runNixOSTestFor "x86_64-linux" ./tests/nixos/containers/containers.nix;
|
||||
|
||||
tests.setuid = nixpkgs.lib.genAttrs
|
||||
["i686-linux" "x86_64-linux"]
|
||||
(system:
|
||||
import ./tests/setuid.nix rec {
|
||||
inherit nixpkgs system;
|
||||
overlay = self.overlays.default;
|
||||
});
|
||||
(system: runNixOSTestFor system ./tests/nixos/setuid.nix);
|
||||
|
||||
|
||||
# Make sure that nix-env still produces the exact same result
|
||||
# on a particular version of Nixpkgs.
|
||||
|
@ -653,6 +647,7 @@
|
|||
inherit system crossSystem;
|
||||
overlays = [ self.overlays.default ];
|
||||
};
|
||||
inherit (nixpkgsCross) lib;
|
||||
in with commonDeps { pkgs = nixpkgsCross; }; nixpkgsCross.stdenv.mkDerivation {
|
||||
name = "nix-${version}";
|
||||
|
||||
|
@ -665,7 +660,11 @@
|
|||
nativeBuildInputs = nativeBuildDeps;
|
||||
buildInputs = buildDeps ++ propagatedDeps;
|
||||
|
||||
configureFlags = [ "--sysconfdir=/etc" "--disable-doc-gen" ];
|
||||
configureFlags = [
|
||||
"CXXFLAGS=-I${lib.getDev nixpkgsCross.rapidcheck}/extras/gtest/include"
|
||||
"--sysconfdir=/etc"
|
||||
"--disable-doc-gen"
|
||||
];
|
||||
|
||||
enableParallelBuilding = true;
|
||||
|
||||
|
|
|
@ -519,6 +519,7 @@ EvalState::EvalState(
|
|||
static_assert(sizeof(Env) <= 16, "environment must be <= 16 bytes");
|
||||
|
||||
/* Initialise the Nix expression search path. */
|
||||
evalSettings.nixPath.setDefault(evalSettings.getDefaultNixPath());
|
||||
if (!evalSettings.pureEval) {
|
||||
for (auto & i : _searchPath) addToSearchPath(i);
|
||||
for (auto & i : evalSettings.nixPath.get()) addToSearchPath(i);
|
||||
|
@ -2472,31 +2473,36 @@ std::ostream & operator << (std::ostream & str, const ExternalValueBase & v) {
|
|||
|
||||
EvalSettings::EvalSettings()
|
||||
{
|
||||
auto var = getEnv("NIX_PATH");
|
||||
if (var) nixPath = parseNixPath(*var);
|
||||
}
|
||||
|
||||
/* impure => NIX_PATH or a default path
|
||||
* restrict-eval => NIX_PATH
|
||||
* pure-eval => empty
|
||||
*/
|
||||
Strings EvalSettings::getDefaultNixPath()
|
||||
{
|
||||
Strings res;
|
||||
auto add = [&](const Path & p, const std::string & s = std::string()) {
|
||||
if (pathExists(p)) {
|
||||
if (s.empty()) {
|
||||
res.push_back(p);
|
||||
if (pureEval)
|
||||
return {};
|
||||
|
||||
auto var = getEnv("NIX_PATH");
|
||||
if (var) {
|
||||
return parseNixPath(*var);
|
||||
} else if (restrictEval) {
|
||||
return {};
|
||||
} else {
|
||||
res.push_back(s + "=" + p);
|
||||
}
|
||||
}
|
||||
Strings res;
|
||||
auto add = [&](const Path & p, const std::optional<std::string> & s = std::nullopt) {
|
||||
if (pathExists(p))
|
||||
res.push_back(s ? *s + "=" + p : p);
|
||||
};
|
||||
|
||||
if (!evalSettings.restrictEval && !evalSettings.pureEval) {
|
||||
add(getHome() + "/.nix-defexpr/channels");
|
||||
add(settings.nixStateDir + "/profiles/per-user/root/channels/nixpkgs", "nixpkgs");
|
||||
add(settings.nixStateDir + "/profiles/per-user/root/channels");
|
||||
}
|
||||
|
||||
return res;
|
||||
}
|
||||
}
|
||||
|
||||
bool EvalSettings::isPseudoUrl(std::string_view s)
|
||||
{
|
||||
|
|
|
@ -570,7 +570,7 @@ struct EvalSettings : Config
|
|||
{
|
||||
EvalSettings();
|
||||
|
||||
static Strings getDefaultNixPath();
|
||||
Strings getDefaultNixPath();
|
||||
|
||||
static bool isPseudoUrl(std::string_view s);
|
||||
|
||||
|
@ -580,8 +580,15 @@ struct EvalSettings : Config
|
|||
"Whether builtin functions that allow executing native code should be enabled."};
|
||||
|
||||
Setting<Strings> nixPath{
|
||||
this, getDefaultNixPath(), "nix-path",
|
||||
"List of directories to be searched for `<...>` file references."};
|
||||
this, {}, "nix-path",
|
||||
R"(
|
||||
List of directories to be searched for `<...>` file references.
|
||||
|
||||
If [pure evaluation](#conf-pure-eval) is disabled,
|
||||
this is initialised using the [`NIX_PATH`](@docroot@/command-ref/env-common.md#env-NIX_PATH)
|
||||
environment variable, or, if it is unset and [restricted evaluation](#conf-restrict-eval)
|
||||
is disabled, a default search path including the user's and `root`'s channels.
|
||||
)"};
|
||||
|
||||
Setting<bool> restrictEval{
|
||||
this, false, "restrict-eval",
|
||||
|
|
|
@ -1517,7 +1517,7 @@ void LocalDerivationGoal::startDaemon()
|
|||
try {
|
||||
daemon::processConnection(store, from, to,
|
||||
daemon::NotTrusted, daemon::Recursive,
|
||||
[&](Store & store) { store.createUser("nobody", 65535); });
|
||||
[&](Store & store) {});
|
||||
debug("terminated daemon connection");
|
||||
} catch (SysError &) {
|
||||
ignoreException();
|
||||
|
@ -2323,11 +2323,28 @@ DrvOutputs LocalDerivationGoal::registerOutputs()
|
|||
buildUser ? std::optional(buildUser->getUIDRange()) : std::nullopt,
|
||||
inodesSeen);
|
||||
|
||||
bool discardReferences = false;
|
||||
if (auto structuredAttrs = parsedDrv->getStructuredAttrs()) {
|
||||
if (auto udr = get(*structuredAttrs, "unsafeDiscardReferences")) {
|
||||
settings.requireExperimentalFeature(Xp::DiscardReferences);
|
||||
if (auto output = get(*udr, outputName)) {
|
||||
if (!output->is_boolean())
|
||||
throw Error("attribute 'unsafeDiscardReferences.\"%s\"' of derivation '%s' must be a Boolean", outputName, drvPath.to_string());
|
||||
discardReferences = output->get<bool>();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
StorePathSet references;
|
||||
if (discardReferences)
|
||||
debug("discarding references of output '%s'", outputName);
|
||||
else {
|
||||
debug("scanning for references for output '%s' in temp location '%s'", outputName, actualPath);
|
||||
|
||||
/* Pass blank Sink as we are not ready to hash data at this stage. */
|
||||
NullSink blank;
|
||||
auto references = scanForReferences(blank, actualPath, referenceablePaths);
|
||||
references = scanForReferences(blank, actualPath, referenceablePaths);
|
||||
}
|
||||
|
||||
outputReferencesIfUnregistered.insert_or_assign(
|
||||
outputName,
|
||||
|
|
|
@ -222,7 +222,8 @@ struct ClientSettings
|
|||
else if (!hasSuffix(s, "/") && trusted.count(s + "/"))
|
||||
subs.push_back(s + "/");
|
||||
else
|
||||
warn("ignoring untrusted substituter '%s'", s);
|
||||
warn("ignoring untrusted substituter '%s', you are not a trusted user.\n"
|
||||
"Run `man nix.conf` for more information on the `substituters` configuration option.", s);
|
||||
res = subs;
|
||||
return true;
|
||||
};
|
||||
|
|
|
@ -570,11 +570,15 @@ public:
|
|||
{"cache.nixos.org-1:6NCHdD59X431o0gWypbMrAURkbJ16ZPMQFGspcDShjY="},
|
||||
"trusted-public-keys",
|
||||
R"(
|
||||
A whitespace-separated list of public keys. When paths are copied
|
||||
from another Nix store (such as a binary cache), they must be
|
||||
signed with one of these keys. For example:
|
||||
`cache.nixos.org-1:6NCHdD59X431o0gWypbMrAURkbJ16ZPMQFGspcDShjY=
|
||||
hydra.nixos.org-1:CNHJZBh9K4tP3EKF6FkkgeVYsS3ohTl+oS0Qa8bezVs=`.
|
||||
A whitespace-separated list of public keys.
|
||||
|
||||
At least one of the following condition must be met
|
||||
for Nix to accept copying a store object from another
|
||||
Nix store (such as a substituter):
|
||||
|
||||
- the store object has been signed using a key in the trusted keys list
|
||||
- the [`require-sigs`](#conf-require-sigs) option has been set to `false`
|
||||
- the store object is [output-addressed](@docroot@/glossary.md#gloss-output-addressed-store-object)
|
||||
)",
|
||||
{"binary-cache-public-keys"}};
|
||||
|
||||
|
@ -670,13 +674,14 @@ public:
|
|||
independently. Lower value means higher priority.
|
||||
The default is `https://cache.nixos.org`, with a Priority of 40.
|
||||
|
||||
Nix will copy a store path from a remote store only if one
|
||||
of the following is true:
|
||||
At least one of the following conditions must be met for Nix to use
|
||||
a substituter:
|
||||
|
||||
- the store object is signed by one of the [`trusted-public-keys`](#conf-trusted-public-keys)
|
||||
- the substituter is in the [`trusted-substituters`](#conf-trusted-substituters) list
|
||||
- the [`require-sigs`](#conf-require-sigs) option has been set to `false`
|
||||
- the store object is [output-addressed](@docroot@/glossary.md#gloss-output-addressed-store-object)
|
||||
- the user calling Nix is in the [`trusted-users`](#conf-trusted-users) list
|
||||
|
||||
In addition, each store path should be trusted as described
|
||||
in [`trusted-public-keys`](#conf-trusted-public-keys)
|
||||
)",
|
||||
{"binary-caches"}};
|
||||
|
||||
|
|
|
@ -201,8 +201,6 @@ LocalStore::LocalStore(const Params & params)
|
|||
throw SysError("could not set permissions on '%s' to 755", perUserDir);
|
||||
}
|
||||
|
||||
createUser(getUserName(), getuid());
|
||||
|
||||
/* Optionally, create directories and set permissions for a
|
||||
multi-user install. */
|
||||
if (getuid() == 0 && settings.buildUsersGroup != "") {
|
||||
|
@ -1844,20 +1842,6 @@ void LocalStore::signPathInfo(ValidPathInfo & info)
|
|||
}
|
||||
|
||||
|
||||
void LocalStore::createUser(const std::string & userName, uid_t userId)
|
||||
{
|
||||
for (auto & dir : {
|
||||
fmt("%s/profiles/per-user/%s", stateDir, userName),
|
||||
fmt("%s/gcroots/per-user/%s", stateDir, userName)
|
||||
}) {
|
||||
createDirs(dir);
|
||||
if (chmod(dir.c_str(), 0755) == -1)
|
||||
throw SysError("changing permissions of directory '%s'", dir);
|
||||
if (chown(dir.c_str(), userId, getgid()) == -1)
|
||||
throw SysError("changing owner of directory '%s'", dir);
|
||||
}
|
||||
}
|
||||
|
||||
std::optional<std::pair<int64_t, Realisation>> LocalStore::queryRealisationCore_(
|
||||
LocalStore::State & state,
|
||||
const DrvOutput & id)
|
||||
|
|
|
@ -281,8 +281,6 @@ private:
|
|||
void signPathInfo(ValidPathInfo & info);
|
||||
void signRealisation(Realisation &);
|
||||
|
||||
void createUser(const std::string & userName, uid_t userId) override;
|
||||
|
||||
// XXX: Make a generic `Store` method
|
||||
FixedOutputHash hashCAPath(
|
||||
const FileIngestionMethod & method,
|
||||
|
|
|
@ -280,16 +280,24 @@ std::string optimisticLockProfile(const Path & profile)
|
|||
}
|
||||
|
||||
|
||||
Path profilesDir()
|
||||
{
|
||||
auto profileRoot = getDataDir() + "/nix/profiles";
|
||||
createDirs(profileRoot);
|
||||
return profileRoot;
|
||||
}
|
||||
|
||||
|
||||
Path getDefaultProfile()
|
||||
{
|
||||
Path profileLink = getHome() + "/.nix-profile";
|
||||
try {
|
||||
if (!pathExists(profileLink)) {
|
||||
replaceSymlink(
|
||||
auto profile =
|
||||
getuid() == 0
|
||||
? settings.nixStateDir + "/profiles/default"
|
||||
: fmt("%s/profiles/per-user/%s/profile", settings.nixStateDir, getUserName()),
|
||||
profileLink);
|
||||
: profilesDir() + "/profile";
|
||||
if (!pathExists(profileLink)) {
|
||||
replaceSymlink(profile, profileLink);
|
||||
}
|
||||
return absPath(readLink(profileLink), dirOf(profileLink));
|
||||
} catch (Error &) {
|
||||
|
|
|
@ -68,6 +68,10 @@ void lockProfile(PathLocks & lock, const Path & profile);
|
|||
rebuilt. */
|
||||
std::string optimisticLockProfile(const Path & profile);
|
||||
|
||||
/* Creates and returns the path to a directory suitable for storing the user’s
|
||||
profiles. */
|
||||
Path profilesDir();
|
||||
|
||||
/* Resolve ~/.nix-profile. If ~/.nix-profile doesn't exist yet, create
|
||||
it. */
|
||||
Path getDefaultProfile();
|
||||
|
|
|
@ -653,9 +653,6 @@ public:
|
|||
return toRealPath(printStorePath(storePath));
|
||||
}
|
||||
|
||||
virtual void createUser(const std::string & userName, uid_t userId)
|
||||
{ }
|
||||
|
||||
/*
|
||||
* Synchronises the options of the client with those of the daemon
|
||||
* (a no-op when there’s no daemon)
|
||||
|
|
|
@ -74,6 +74,8 @@ struct AbstractPos
|
|||
virtual void print(std::ostream & out) const = 0;
|
||||
|
||||
std::optional<LinesOfCode> getCodeLines() const;
|
||||
|
||||
virtual ~AbstractPos() = default;
|
||||
};
|
||||
|
||||
std::ostream & operator << (std::ostream & str, const AbstractPos & pos);
|
||||
|
|
|
@ -16,6 +16,7 @@ std::map<ExperimentalFeature, std::string> stringifiedXpFeatures = {
|
|||
{ Xp::ReplFlake, "repl-flake" },
|
||||
{ Xp::AutoAllocateUids, "auto-allocate-uids" },
|
||||
{ Xp::Cgroups, "cgroups" },
|
||||
{ Xp::DiscardReferences, "discard-references" },
|
||||
};
|
||||
|
||||
const std::optional<ExperimentalFeature> parseExperimentalFeature(const std::string_view & name)
|
||||
|
|
|
@ -25,6 +25,7 @@ enum struct ExperimentalFeature
|
|||
ReplFlake,
|
||||
AutoAllocateUids,
|
||||
Cgroups,
|
||||
DiscardReferences,
|
||||
};
|
||||
|
||||
/**
|
||||
|
|
|
@ -99,6 +99,27 @@ namespace nix {
|
|||
ASSERT_EQ(parsed, expected);
|
||||
}
|
||||
|
||||
TEST(parseURL, parsesFilePlusHttpsUrl) {
|
||||
auto s = "file+https://www.example.org/video.mp4";
|
||||
auto parsed = parseURL(s);
|
||||
|
||||
ParsedURL expected {
|
||||
.url = "file+https://www.example.org/video.mp4",
|
||||
.base = "https://www.example.org/video.mp4",
|
||||
.scheme = "file+https",
|
||||
.authority = "www.example.org",
|
||||
.path = "/video.mp4",
|
||||
.query = (StringMap) { },
|
||||
.fragment = "",
|
||||
};
|
||||
|
||||
ASSERT_EQ(parsed, expected);
|
||||
}
|
||||
|
||||
TEST(parseURL, rejectsAuthorityInUrlsWithFileTransportation) {
|
||||
auto s = "file://www.example.org/video.mp4";
|
||||
ASSERT_THROW(parseURL(s), Error);
|
||||
}
|
||||
|
||||
TEST(parseURL, parseIPv4Address) {
|
||||
auto s = "http://127.0.0.1:8080/file.tar.gz?download=fast&when=now#hello";
|
||||
|
|
|
@ -30,13 +30,13 @@ ParsedURL parseURL(const std::string & url)
|
|||
auto & query = match[6];
|
||||
auto & fragment = match[7];
|
||||
|
||||
auto isFile = scheme.find("file") != std::string::npos;
|
||||
auto transportIsFile = parseUrlScheme(scheme).transport == "file";
|
||||
|
||||
if (authority && *authority != "" && isFile)
|
||||
if (authority && *authority != "" && transportIsFile)
|
||||
throw BadURL("file:// URL '%s' has unexpected authority '%s'",
|
||||
url, *authority);
|
||||
|
||||
if (isFile && path.empty())
|
||||
if (transportIsFile && path.empty())
|
||||
path = "/";
|
||||
|
||||
return ParsedURL{
|
||||
|
|
|
@ -537,6 +537,16 @@ std::string getUserName()
|
|||
return name;
|
||||
}
|
||||
|
||||
Path getHomeOf(uid_t userId)
|
||||
{
|
||||
std::vector<char> buf(16384);
|
||||
struct passwd pwbuf;
|
||||
struct passwd * pw;
|
||||
if (getpwuid_r(userId, &pwbuf, buf.data(), buf.size(), &pw) != 0
|
||||
|| !pw || !pw->pw_dir || !pw->pw_dir[0])
|
||||
throw Error("cannot determine user's home directory");
|
||||
return pw->pw_dir;
|
||||
}
|
||||
|
||||
Path getHome()
|
||||
{
|
||||
|
@ -558,13 +568,7 @@ Path getHome()
|
|||
}
|
||||
}
|
||||
if (!homeDir) {
|
||||
std::vector<char> buf(16384);
|
||||
struct passwd pwbuf;
|
||||
struct passwd * pw;
|
||||
if (getpwuid_r(geteuid(), &pwbuf, buf.data(), buf.size(), &pw) != 0
|
||||
|| !pw || !pw->pw_dir || !pw->pw_dir[0])
|
||||
throw Error("cannot determine user's home directory");
|
||||
homeDir = pw->pw_dir;
|
||||
homeDir = getHomeOf(geteuid());
|
||||
if (unownedUserHomeDir.has_value() && unownedUserHomeDir != homeDir) {
|
||||
warn("$HOME ('%s') is not owned by you, falling back to the one defined in the 'passwd' file ('%s')", *unownedUserHomeDir, *homeDir);
|
||||
}
|
||||
|
|
|
@ -137,6 +137,9 @@ void deletePath(const Path & path, uint64_t & bytesFreed);
|
|||
|
||||
std::string getUserName();
|
||||
|
||||
/* Return the given user's home directory from /etc/passwd. */
|
||||
Path getHomeOf(uid_t userId);
|
||||
|
||||
/* Return $HOME or the user's home directory from /etc/passwd. */
|
||||
Path getHome();
|
||||
|
||||
|
|
|
@ -1,9 +1,11 @@
|
|||
#include "profiles.hh"
|
||||
#include "shared.hh"
|
||||
#include "globals.hh"
|
||||
#include "filetransfer.hh"
|
||||
#include "store-api.hh"
|
||||
#include "legacy.hh"
|
||||
#include "fetchers.hh"
|
||||
#include "util.hh"
|
||||
|
||||
#include <fcntl.h>
|
||||
#include <regex>
|
||||
|
@ -166,7 +168,7 @@ static int main_nix_channel(int argc, char ** argv)
|
|||
nixDefExpr = home + "/.nix-defexpr";
|
||||
|
||||
// Figure out the name of the channels profile.
|
||||
profile = fmt("%s/profiles/per-user/%s/channels", settings.nixStateDir, getUserName());
|
||||
profile = profilesDir() + "/channels";
|
||||
|
||||
enum {
|
||||
cNone,
|
||||
|
|
|
@ -248,7 +248,6 @@ static void daemonLoop()
|
|||
querySetting("build-users-group", "") == "")
|
||||
throw Error("if you run 'nix-daemon' as root, then you MUST set 'build-users-group'!");
|
||||
#endif
|
||||
store.createUser(user, peer.uid);
|
||||
});
|
||||
|
||||
exit(0);
|
||||
|
|
|
@ -966,6 +966,7 @@ struct CmdFlakeArchive : FlakeCommand, MixJSON, MixDryRun
|
|||
struct CmdFlakeShow : FlakeCommand, MixJSON
|
||||
{
|
||||
bool showLegacy = false;
|
||||
bool showAllSystems = false;
|
||||
|
||||
CmdFlakeShow()
|
||||
{
|
||||
|
@ -974,6 +975,11 @@ struct CmdFlakeShow : FlakeCommand, MixJSON
|
|||
.description = "Show the contents of the `legacyPackages` output.",
|
||||
.handler = {&showLegacy, true}
|
||||
});
|
||||
addFlag({
|
||||
.longName = "all-systems",
|
||||
.description = "Show the contents of outputs for all systems.",
|
||||
.handler = {&showAllSystems, true}
|
||||
});
|
||||
}
|
||||
|
||||
std::string description() override
|
||||
|
@ -994,6 +1000,7 @@ struct CmdFlakeShow : FlakeCommand, MixJSON
|
|||
|
||||
auto state = getEvalState();
|
||||
auto flake = std::make_shared<LockedFlake>(lockFlake());
|
||||
auto localSystem = std::string(settings.thisSystem.get());
|
||||
|
||||
std::function<nlohmann::json(
|
||||
eval_cache::AttrCursor & visitor,
|
||||
|
@ -1084,11 +1091,19 @@ struct CmdFlakeShow : FlakeCommand, MixJSON
|
|||
|| (attrPath.size() == 3 && (attrPathS[0] == "checks" || attrPathS[0] == "packages" || attrPathS[0] == "devShells"))
|
||||
)
|
||||
{
|
||||
if (!showAllSystems && std::string(attrPathS[1]) != localSystem) {
|
||||
if (!json)
|
||||
logger->cout(fmt("%s " ANSI_WARNING "omitted" ANSI_NORMAL " (use '--all-systems' to show)", headerPrefix));
|
||||
else {
|
||||
logger->warn(fmt("%s omitted (use '--all-systems' to show)", concatStringsSep(".", attrPathS)));
|
||||
}
|
||||
} else {
|
||||
if (visitor.isDerivation())
|
||||
showDerivation();
|
||||
else
|
||||
throw Error("expected a derivation");
|
||||
}
|
||||
}
|
||||
|
||||
else if (attrPath.size() > 0 && attrPathS[0] == "hydraJobs") {
|
||||
if (visitor.isDerivation())
|
||||
|
@ -1106,6 +1121,12 @@ struct CmdFlakeShow : FlakeCommand, MixJSON
|
|||
else {
|
||||
logger->warn(fmt("%s omitted (use '--legacy' to show)", concatStringsSep(".", attrPathS)));
|
||||
}
|
||||
} else if (!showAllSystems && std::string(attrPathS[1]) != localSystem) {
|
||||
if (!json)
|
||||
logger->cout(fmt("%s " ANSI_WARNING "omitted" ANSI_NORMAL " (use '--all-systems' to show)", headerPrefix));
|
||||
else {
|
||||
logger->warn(fmt("%s omitted (use '--all-systems' to show)", concatStringsSep(".", attrPathS)));
|
||||
}
|
||||
} else {
|
||||
if (visitor.isDerivation())
|
||||
showDerivation();
|
||||
|
|
|
@ -9,15 +9,44 @@ using namespace nix;
|
|||
|
||||
struct CmdShowConfig : Command, MixJSON
|
||||
{
|
||||
std::optional<std::string> name;
|
||||
|
||||
CmdShowConfig() {
|
||||
expectArgs({
|
||||
.label = {"name"},
|
||||
.optional = true,
|
||||
.handler = {&name},
|
||||
});
|
||||
}
|
||||
|
||||
std::string description() override
|
||||
{
|
||||
return "show the Nix configuration";
|
||||
return "show the Nix configuration or the value of a specific setting";
|
||||
}
|
||||
|
||||
Category category() override { return catUtility; }
|
||||
|
||||
void run() override
|
||||
{
|
||||
if (name) {
|
||||
if (json) {
|
||||
throw UsageError("'--json' is not supported when specifying a setting name");
|
||||
}
|
||||
|
||||
std::map<std::string, Config::SettingInfo> settings;
|
||||
globalConfig.getSettings(settings);
|
||||
auto setting = settings.find(*name);
|
||||
|
||||
if (setting == settings.end()) {
|
||||
throw Error("could not find setting '%1%'", *name);
|
||||
} else {
|
||||
const auto & value = setting->second.value;
|
||||
logger->cout("%s", value);
|
||||
}
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
if (json) {
|
||||
// FIXME: use appropriate JSON types (bool, ints, etc).
|
||||
logger->cout("%s", globalConfig.toJSON().dump());
|
||||
|
|
|
@ -67,4 +67,11 @@ rec {
|
|||
disallowedReferences = [test5];
|
||||
};
|
||||
|
||||
test11 = makeTest 11 {
|
||||
__structuredAttrs = true;
|
||||
unsafeDiscardReferences.out = true;
|
||||
outputChecks.out.allowedReferences = [];
|
||||
buildCommand = ''echo ${dep} > "''${outputs[out]}"'';
|
||||
};
|
||||
|
||||
}
|
||||
|
|
|
@ -40,3 +40,12 @@ nix-build -o $RESULT check-refs.nix -A test7
|
|||
|
||||
# test10 should succeed (no disallowed references).
|
||||
nix-build -o $RESULT check-refs.nix -A test10
|
||||
|
||||
if isDaemonNewer 2.12pre20230103; then
|
||||
enableFeatures discard-references
|
||||
restartDaemon
|
||||
|
||||
# test11 should succeed.
|
||||
test11=$(nix-build -o $RESULT check-refs.nix -A test11)
|
||||
[[ -z $(nix-store -q --references "$test11") ]]
|
||||
fi
|
||||
|
|
|
@ -62,7 +62,7 @@ readLink() {
|
|||
}
|
||||
|
||||
clearProfiles() {
|
||||
profiles="$NIX_STATE_DIR"/profiles
|
||||
profiles="$HOME"/.local/share/nix/profiles
|
||||
rm -rf $profiles
|
||||
}
|
||||
|
||||
|
|
|
@ -51,3 +51,8 @@ exp_features=$(nix show-config | grep '^experimental-features' | cut -d '=' -f 2
|
|||
[[ $prev != $exp_cores ]]
|
||||
[[ $exp_cores == "4242" ]]
|
||||
[[ $exp_features == "flakes nix-command" ]]
|
||||
|
||||
# Test that it's possible to retrieve a single setting's value
|
||||
val=$(nix show-config | grep '^warn-dirty' | cut -d '=' -f 2 | xargs)
|
||||
val2=$(nix show-config warn-dirty)
|
||||
[[ $val == $val2 ]]
|
||||
|
|
|
@ -4,7 +4,7 @@ clearStore
|
|||
clearProfiles
|
||||
|
||||
checkRef() {
|
||||
nix-store -q --references $TEST_ROOT/result | grep -q "$1" || fail "missing reference $1"
|
||||
nix-store -q --references $TEST_ROOT/result | grep -q "$1"'$' || fail "missing reference $1"
|
||||
}
|
||||
|
||||
# Test the export of the runtime dependency graph.
|
||||
|
|
|
@ -20,9 +20,13 @@ writeSimpleFlake() {
|
|||
foo = import ./simple.nix;
|
||||
default = foo;
|
||||
};
|
||||
packages.someOtherSystem = rec {
|
||||
foo = import ./simple.nix;
|
||||
default = foo;
|
||||
};
|
||||
|
||||
# To test "nix flake init".
|
||||
legacyPackages.x86_64-linux.hello = import ./simple.nix;
|
||||
legacyPackages.$system.hello = import ./simple.nix;
|
||||
};
|
||||
}
|
||||
EOF
|
||||
|
|
|
@ -41,8 +41,8 @@ cat > $templatesDir/trivial/flake.nix <<EOF
|
|||
description = "A flake for building Hello World";
|
||||
|
||||
outputs = { self, nixpkgs }: {
|
||||
packages.x86_64-linux = rec {
|
||||
hello = nixpkgs.legacyPackages.x86_64-linux.hello;
|
||||
packages.$system = rec {
|
||||
hello = nixpkgs.legacyPackages.$system.hello;
|
||||
default = hello;
|
||||
};
|
||||
};
|
||||
|
|
39
tests/flakes/show.sh
Normal file
39
tests/flakes/show.sh
Normal file
|
@ -0,0 +1,39 @@
|
|||
source ./common.sh
|
||||
|
||||
flakeDir=$TEST_ROOT/flake
|
||||
mkdir -p "$flakeDir"
|
||||
|
||||
writeSimpleFlake "$flakeDir"
|
||||
cd "$flakeDir"
|
||||
|
||||
|
||||
# By default: Only show the packages content for the current system and no
|
||||
# legacyPackages at all
|
||||
nix flake show --json > show-output.json
|
||||
nix eval --impure --expr '
|
||||
let show_output = builtins.fromJSON (builtins.readFile ./show-output.json);
|
||||
in
|
||||
assert show_output.packages.someOtherSystem.default == {};
|
||||
assert show_output.packages.${builtins.currentSystem}.default.name == "simple";
|
||||
assert show_output.legacyPackages.${builtins.currentSystem} == {};
|
||||
true
|
||||
'
|
||||
|
||||
# With `--all-systems`, show the packages for all systems
|
||||
nix flake show --json --all-systems > show-output.json
|
||||
nix eval --impure --expr '
|
||||
let show_output = builtins.fromJSON (builtins.readFile ./show-output.json);
|
||||
in
|
||||
assert show_output.packages.someOtherSystem.default.name == "simple";
|
||||
assert show_output.legacyPackages.${builtins.currentSystem} == {};
|
||||
true
|
||||
'
|
||||
|
||||
# With `--legacy`, show the legacy packages
|
||||
nix flake show --json --legacy > show-output.json
|
||||
nix eval --impure --expr '
|
||||
let show_output = builtins.fromJSON (builtins.readFile ./show-output.json);
|
||||
in
|
||||
assert show_output.legacyPackages.${builtins.currentSystem}.hello.name == "simple";
|
||||
true
|
||||
'
|
|
@ -17,6 +17,7 @@ nix_tests = \
|
|||
fetchMercurial.sh \
|
||||
gc-auto.sh \
|
||||
user-envs.sh \
|
||||
user-envs-migration.sh \
|
||||
binary-cache.sh \
|
||||
multiple-outputs.sh \
|
||||
ca/build.sh \
|
||||
|
@ -113,6 +114,7 @@ nix_tests = \
|
|||
store-ping.sh \
|
||||
fetchClosure.sh \
|
||||
completions.sh \
|
||||
flakes/show.sh \
|
||||
impure-derivations.sh \
|
||||
path-from-hash-part.sh \
|
||||
toString-path.sh
|
||||
|
|
|
@ -12,3 +12,8 @@ nix-instantiate --eval -E '<by-relative-path/simple.nix>' --restrict-eval
|
|||
|
||||
[[ $(nix-instantiate --find-file by-absolute-path/simple.nix) = $PWD/simple.nix ]]
|
||||
[[ $(nix-instantiate --find-file by-relative-path/simple.nix) = $PWD/simple.nix ]]
|
||||
|
||||
unset NIX_PATH
|
||||
|
||||
[[ $(nix-instantiate --option nix-path by-relative-path=. --find-file by-relative-path/simple.nix) = "$PWD/simple.nix" ]]
|
||||
[[ $(NIX_PATH= nix-instantiate --option nix-path by-relative-path=. --find-file by-relative-path/simple.nix) = "$PWD/simple.nix" ]]
|
||||
|
|
|
@ -1,12 +1,7 @@
|
|||
# Test whether we can run a NixOS container inside a Nix build using systemd-nspawn.
|
||||
{ nixpkgs, system, overlay }:
|
||||
{ lib, nixpkgs, ... }:
|
||||
|
||||
with import (nixpkgs + "/nixos/lib/testing-python.nix") {
|
||||
inherit system;
|
||||
extraConfigurations = [ { nixpkgs.overlays = [ overlay ]; } ];
|
||||
};
|
||||
|
||||
makeTest ({
|
||||
{
|
||||
name = "containers";
|
||||
|
||||
nodes =
|
||||
|
@ -65,4 +60,4 @@ makeTest ({
|
|||
host.succeed("[[ $(cat ./result/msg) = 'Hello World' ]]")
|
||||
'';
|
||||
|
||||
})
|
||||
}
|
|
@ -1,14 +1,9 @@
|
|||
{ nixpkgs, system, overlay }:
|
||||
|
||||
with import (nixpkgs + "/nixos/lib/testing-python.nix") {
|
||||
inherit system;
|
||||
extraConfigurations = [ { nixpkgs.overlays = [ overlay ]; } ];
|
||||
};
|
||||
|
||||
{ lib, config, nixpkgs, ... }:
|
||||
let
|
||||
pkgs = config.nodes.client.nixpkgs.pkgs;
|
||||
|
||||
# Generate a fake root CA and a fake api.github.com / github.com / channels.nixos.org certificate.
|
||||
cert = pkgs.runCommand "cert" { buildInputs = [ pkgs.openssl ]; }
|
||||
cert = pkgs.runCommand "cert" { nativeBuildInputs = [ pkgs.openssl ]; }
|
||||
''
|
||||
mkdir -p $out
|
||||
|
||||
|
@ -92,8 +87,6 @@ let
|
|||
'';
|
||||
in
|
||||
|
||||
makeTest (
|
||||
|
||||
{
|
||||
name = "github-flakes";
|
||||
|
||||
|
@ -207,4 +200,4 @@ makeTest (
|
|||
client.succeed("nix build nixpkgs#fuse --tarball-ttl 0")
|
||||
'';
|
||||
|
||||
})
|
||||
}
|
|
@ -1,13 +1,16 @@
|
|||
# Test ‘nix-copy-closure’.
|
||||
|
||||
{ nixpkgs, system, overlay }:
|
||||
{ lib, config, nixpkgs, hostPkgs, ... }:
|
||||
|
||||
with import (nixpkgs + "/nixos/lib/testing-python.nix") {
|
||||
inherit system;
|
||||
extraConfigurations = [ { nixpkgs.overlays = [ overlay ]; } ];
|
||||
};
|
||||
let
|
||||
pkgs = config.nodes.client.nixpkgs.pkgs;
|
||||
|
||||
makeTest (let pkgA = pkgs.cowsay; pkgB = pkgs.wget; pkgC = pkgs.hello; pkgD = pkgs.tmux; in {
|
||||
pkgA = pkgs.cowsay;
|
||||
pkgB = pkgs.wget;
|
||||
pkgC = pkgs.hello;
|
||||
pkgD = pkgs.tmux;
|
||||
|
||||
in {
|
||||
name = "nix-copy-closure";
|
||||
|
||||
nodes =
|
||||
|
@ -74,4 +77,4 @@ makeTest (let pkgA = pkgs.cowsay; pkgB = pkgs.wget; pkgC = pkgs.hello; pkgD = pk
|
|||
# )
|
||||
# client.succeed("nix-store --check-validity ${pkgC}")
|
||||
'';
|
||||
})
|
||||
}
|
|
@ -1,11 +1,9 @@
|
|||
{ nixpkgs, system, overlay }:
|
||||
|
||||
with import (nixpkgs + "/nixos/lib/testing-python.nix") {
|
||||
inherit system;
|
||||
extraConfigurations = [ { nixpkgs.overlays = [ overlay ]; } ];
|
||||
};
|
||||
{ lib, config, nixpkgs, ... }:
|
||||
|
||||
let
|
||||
|
||||
pkgs = config.nodes.client.nixpkgs.pkgs;
|
||||
|
||||
nix-fetch = pkgs.writeText "fetch.nix" ''
|
||||
derivation {
|
||||
# This derivation is an copy from what is available over at
|
||||
|
@ -41,9 +39,7 @@ let
|
|||
'';
|
||||
in
|
||||
|
||||
makeTest (
|
||||
|
||||
rec {
|
||||
{
|
||||
name = "nss-preload";
|
||||
|
||||
nodes = {
|
||||
|
@ -122,4 +118,4 @@ rec {
|
|||
nix-build ${nix-fetch} >&2
|
||||
""")
|
||||
'';
|
||||
})
|
||||
}
|
|
@ -1,15 +1,9 @@
|
|||
# Test Nix's remote build feature.
|
||||
|
||||
{ nixpkgs, system, overlay }:
|
||||
|
||||
with import (nixpkgs + "/nixos/lib/testing-python.nix") {
|
||||
inherit system;
|
||||
extraConfigurations = [ { nixpkgs.overlays = [ overlay ]; } ];
|
||||
};
|
||||
|
||||
makeTest (
|
||||
{ config, lib, hostPkgs, ... }:
|
||||
|
||||
let
|
||||
pkgs = config.nodes.client.nixpkgs.pkgs;
|
||||
|
||||
# The configuration of the remote builders.
|
||||
builder =
|
||||
|
@ -75,7 +69,7 @@ in
|
|||
|
||||
# Create an SSH key on the client.
|
||||
subprocess.run([
|
||||
"${pkgs.openssh}/bin/ssh-keygen", "-t", "ed25519", "-f", "key", "-N", ""
|
||||
"${hostPkgs.openssh}/bin/ssh-keygen", "-t", "ed25519", "-f", "key", "-N", ""
|
||||
], capture_output=True, check=True)
|
||||
client.succeed("mkdir -p -m 700 /root/.ssh")
|
||||
client.copy_from_host("key", "/root/.ssh/id_ed25519")
|
||||
|
@ -109,4 +103,4 @@ in
|
|||
builder1.block()
|
||||
client.succeed("nix-build ${expr nodes.client.config 4}")
|
||||
'';
|
||||
})
|
||||
}
|
|
@ -1,13 +1,12 @@
|
|||
# Verify that Linux builds cannot create setuid or setgid binaries.
|
||||
|
||||
{ nixpkgs, system, overlay }:
|
||||
{ lib, config, nixpkgs, ... }:
|
||||
|
||||
with import (nixpkgs + "/nixos/lib/testing-python.nix") {
|
||||
inherit system;
|
||||
extraConfigurations = [ { nixpkgs.overlays = [ overlay ]; } ];
|
||||
};
|
||||
let
|
||||
pkgs = config.nodes.machine.nixpkgs.pkgs;
|
||||
|
||||
makeTest {
|
||||
in
|
||||
{
|
||||
name = "setuid";
|
||||
|
||||
nodes.machine =
|
|
@ -1,12 +1,8 @@
|
|||
{ nixpkgs, system, overlay }:
|
||||
|
||||
with import (nixpkgs + "/nixos/lib/testing-python.nix")
|
||||
{
|
||||
inherit system;
|
||||
extraConfigurations = [{ nixpkgs.overlays = [ overlay ]; }];
|
||||
};
|
||||
{ lib, config, hostPkgs, nixpkgs, ... }:
|
||||
|
||||
let
|
||||
pkgs = config.nodes.sourcehut.nixpkgs.pkgs;
|
||||
|
||||
# Generate a fake root CA and a fake git.sr.ht certificate.
|
||||
cert = pkgs.runCommand "cert" { buildInputs = [ pkgs.openssl ]; }
|
||||
''
|
||||
|
@ -64,8 +60,6 @@ let
|
|||
|
||||
in
|
||||
|
||||
makeTest (
|
||||
|
||||
{
|
||||
name = "sourcehut-flakes";
|
||||
|
||||
|
@ -164,4 +158,4 @@ makeTest (
|
|||
client.succeed("nix build nixpkgs#fuse --tarball-ttl 0")
|
||||
'';
|
||||
|
||||
})
|
||||
}
|
|
@ -30,7 +30,3 @@ NIX_REMOTE= nix-store --dump-db > $TEST_ROOT/d2
|
|||
cmp $TEST_ROOT/d1 $TEST_ROOT/d2
|
||||
|
||||
killDaemon
|
||||
|
||||
user=$(whoami)
|
||||
[ -e $NIX_STATE_DIR/gcroots/per-user/$user ]
|
||||
[ -e $NIX_STATE_DIR/profiles/per-user/$user ]
|
||||
|
|
|
@ -17,6 +17,9 @@ nix-instantiate --restrict-eval --eval -E 'builtins.readDir ../src/nix-channel'
|
|||
(! nix-instantiate --restrict-eval --eval -E 'let __nixPath = [ { prefix = "foo"; path = ./.; } ]; in <foo>')
|
||||
nix-instantiate --restrict-eval --eval -E 'let __nixPath = [ { prefix = "foo"; path = ./.; } ]; in <foo>' -I src=.
|
||||
|
||||
# no default NIX_PATH
|
||||
(unset NIX_PATH; ! nix-instantiate --restrict-eval --find-file .)
|
||||
|
||||
p=$(nix eval --raw --expr "builtins.fetchurl file://$(pwd)/restricted.sh" --impure --restrict-eval --allowed-uris "file://$(pwd)")
|
||||
cmp $p restricted.sh
|
||||
|
||||
|
|
35
tests/user-envs-migration.sh
Normal file
35
tests/user-envs-migration.sh
Normal file
|
@ -0,0 +1,35 @@
|
|||
# Test that the migration of user environments
|
||||
# (https://github.com/NixOS/nix/pull/5226) does preserve everything
|
||||
|
||||
source common.sh
|
||||
|
||||
if isDaemonNewer "2.4pre20211005"; then
|
||||
exit 99
|
||||
fi
|
||||
|
||||
|
||||
killDaemon
|
||||
unset NIX_REMOTE
|
||||
|
||||
clearStore
|
||||
clearProfiles
|
||||
rm -rf ~/.nix-profile
|
||||
|
||||
# Fill the environment using the older Nix
|
||||
PATH_WITH_NEW_NIX="$PATH"
|
||||
export PATH="$NIX_DAEMON_PACKAGE/bin:$PATH"
|
||||
|
||||
nix-env -f user-envs.nix -i foo-1.0
|
||||
nix-env -f user-envs.nix -i bar-0.1
|
||||
|
||||
# Migrate to the new profile dir, and ensure that everything’s there
|
||||
export PATH="$PATH_WITH_NEW_NIX"
|
||||
nix-env -q # Trigger the migration
|
||||
( [[ -L ~/.nix-profile ]] && \
|
||||
[[ $(readlink ~/.nix-profile) == ~/.local/share/nix/profiles/profile ]] ) || \
|
||||
fail "The nix profile should point to the new location"
|
||||
|
||||
(nix-env -q | grep foo && nix-env -q | grep bar && \
|
||||
[[ -e ~/.nix-profile/bin/foo ]] && \
|
||||
[[ $(nix-env --list-generations | wc -l) == 2 ]]) ||
|
||||
fail "The nix profile should have the same content as before the migration"
|
Loading…
Reference in a new issue