Merge branch 'restore-test-build-remote-ca-fixed' into trustless-remote-builder-simple

This commit is contained in:
John Ericson 2021-02-27 03:53:22 +00:00
commit e547fe12d1
43 changed files with 3627 additions and 3007 deletions

View file

@ -8,10 +8,52 @@ jobs:
matrix:
os: [ubuntu-latest, macos-latest]
runs-on: ${{ matrix.os }}
env:
CACHIX_NAME: nix-ci
steps:
- uses: actions/checkout@v2
- uses: actions/checkout@v2.3.4
with:
fetch-depth: 0
- uses: cachix/install-nix-action@v12
- uses: cachix/cachix-action@v8
with:
name: '${{ env.CACHIX_NAME }}'
signingKey: '${{ secrets.CACHIX_SIGNING_KEY }}'
#- run: nix flake check
- run: nix-build -A checks.$(if [[ `uname` = Linux ]]; then echo x86_64-linux; else echo x86_64-darwin; fi)
installer:
if: github.event_name == 'push'
needs: tests
runs-on: ubuntu-latest
env:
CACHIX_NAME: nix-ci
outputs:
installerURL: ${{ steps.prepare-installer.outputs.installerURL }}
steps:
- uses: actions/checkout@v2.3.4
with:
fetch-depth: 0
- uses: cachix/install-nix-action@v12
- uses: cachix/cachix-action@v8
with:
name: '${{ env.CACHIX_NAME }}'
signingKey: '${{ secrets.CACHIX_SIGNING_KEY }}'
- id: prepare-installer
run: scripts/prepare-installer-for-github-actions
installer_test:
if: github.event_name == 'push'
needs: installer
strategy:
matrix:
os: [ubuntu-latest, macos-latest]
runs-on: ${{ matrix.os }}
env:
CACHIX_NAME: nix-ci
steps:
- uses: actions/checkout@v2.3.4
- uses: cachix/install-nix-action@master
with:
install_url: '${{needs.installer.outputs.installerURL}}'
install_options: '--tarball-url-prefix https://${{ env.CACHIX_NAME }}.cachix.org/serve'
- run: nix-instantiate -E 'builtins.currentTime' --eval

View file

@ -2,11 +2,11 @@
"nodes": {
"nixpkgs": {
"locked": {
"lastModified": 1602702596,
"narHash": "sha256-fqJ4UgOb4ZUnCDIapDb4gCrtAah5Rnr2/At3IzMitig=",
"lastModified": 1614309161,
"narHash": "sha256-93kRxDPyEW9QIpxU71kCaV1r+hgOgP6/aVgC7vvO8IU=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "ad0d20345219790533ebe06571f82ed6b034db31",
"rev": "0e499fde7af3c28d63e9b13636716b86c3162b93",
"type": "github"
},
"original": {

View file

@ -110,6 +110,40 @@
];
};
installScriptFor = systems:
with nixpkgsFor.x86_64-linux;
runCommand "installer-script"
{ buildInputs = [ nix ];
}
''
mkdir -p $out/nix-support
# Converts /nix/store/50p3qk8kka9dl6wyq40vydq945k0j3kv-nix-2.4pre20201102_550e11f/bin/nix
# To 50p3qk8kka9dl6wyq40vydq945k0j3kv/bin/nix
tarballPath() {
# Remove the store prefix
local path=''${1#${builtins.storeDir}/}
# Get the path relative to the derivation root
local rest=''${path#*/}
# Get the derivation hash
local drvHash=''${path%%-*}
echo "$drvHash/$rest"
}
substitute ${./scripts/install.in} $out/install \
${pkgs.lib.concatMapStrings
(system:
'' \
--replace '@tarballHash_${system}@' $(nix --experimental-features nix-command hash-file --base16 --type sha256 ${self.hydraJobs.binaryTarball.${system}}/*.tar.xz) \
--replace '@tarballPath_${system}@' $(tarballPath ${self.hydraJobs.binaryTarball.${system}}/*.tar.xz) \
''
)
systems
} --replace '@nixVersion@' ${version}
echo "file installer $out/install" >> $out/nix-support/hydra-build-products
'';
in {
# A Nixpkgs overlay that overrides the 'nix' and
@ -314,40 +348,8 @@
# to https://nixos.org/nix/install. It downloads the binary
# tarball for the user's system and calls the second half of the
# installation script.
installerScript =
with nixpkgsFor.x86_64-linux;
runCommand "installer-script"
{ buildInputs = [ nix ];
}
''
mkdir -p $out/nix-support
# Converts /nix/store/50p3qk8kka9dl6wyq40vydq945k0j3kv-nix-2.4pre20201102_550e11f/bin/nix
# To 50p3qk8kka9dl6wyq40vydq945k0j3kv/bin/nix
tarballPath() {
# Remove the store prefix
local path=''${1#${builtins.storeDir}/}
# Get the path relative to the derivation root
local rest=''${path#*/}
# Get the derivation hash
local drvHash=''${path%%-*}
echo "$drvHash/$rest"
}
substitute ${./scripts/install.in} $out/install \
${pkgs.lib.concatMapStrings
(system:
'' \
--replace '@tarballHash_${system}@' $(nix --experimental-features nix-command hash-file --base16 --type sha256 ${self.hydraJobs.binaryTarball.${system}}/*.tar.xz) \
--replace '@tarballPath_${system}@' $(tarballPath ${self.hydraJobs.binaryTarball.${system}}/*.tar.xz) \
''
)
[ "x86_64-linux" "i686-linux" "x86_64-darwin" "aarch64-linux" ]
} \
--replace '@nixVersion@' ${version}
echo "file installer $out/install" >> $out/nix-support/hydra-build-products
'';
installerScript = installScriptFor [ "x86_64-linux" "i686-linux" "x86_64-darwin" "aarch64-linux" ];
installerScriptForGHA = installScriptFor [ "x86_64-linux" "x86_64-darwin" ];
# Line coverage analysis.
coverage =

View file

@ -60,7 +60,7 @@ case "$(uname -s).$(uname -m)" in
esac
# Use this command-line option to fetch the tarballs using nar-serve or Cachix
if "${1:---tarball-url-prefix}"; then
if [ "${1:-}" = "--tarball-url-prefix" ]; then
if [ -z "${2:-}" ]; then
oops "missing argument for --tarball-url-prefix"
fi

View file

@ -0,0 +1,10 @@
#!/usr/bin/env bash
set -e
script=$(nix-build -A outputs.hydraJobs.installerScriptForGHA --no-out-link)
installerHash=$(echo $script | cut -b12-43 -)
installerURL=https://$CACHIX_NAME.cachix.org/serve/$installerHash/install
echo "::set-output name=installerURL::$installerURL"

View file

@ -254,7 +254,7 @@ connected:
std::cerr << "# accept\n" << storeUri << "\n";
auto inputs = readStrings<PathSet>(source);
auto outputs = readStrings<PathSet>(source);
auto wantedOutputs = readStrings<StringSet>(source);
AutoCloseFD uploadLock = openLockFile(currentLoad + "/" + escapeUri(storeUri) + ".upload-lock", true);
@ -282,28 +282,59 @@ connected:
BasicDerivation drv = store->readDerivation(*drvPath);
std::optional<BuildResult> optResult;
if (sshStore2->isTrusting || derivationIsCA(drv.type())) {
drv.inputSrcs = store->parseStorePathSet(inputs);
auto result = sshStore2->buildDerivation(*drvPath, drv);
optResult = sshStore2->buildDerivation(*drvPath, drv);
auto & result = *optResult;
if (!result.success())
throw Error("build of '%s' on '%s' failed: %s", store->printStorePath(*drvPath), storeUri, result.errorMsg);
} else {
copyPaths(store, sshStore2, {*drvPath}, NoRepair, NoCheckSigs, substitute, copyStorePathImpl);
copyPaths(store, sshStore2, StorePathSet {*drvPath}, NoRepair, NoCheckSigs, substitute, copyStorePathImpl);
sshStore2->buildPaths({{*drvPath}});
}
StorePathSet missing;
for (auto & path : outputs)
if (!store->isValidPath(store->parseStorePath(path))) missing.insert(store->parseStorePath(path));
auto outputHashes = staticOutputHashes(*store, drv);
std::set<Realisation> missingRealisations;
StorePathSet missingPaths;
if (settings.isExperimentalFeatureEnabled("ca-derivations") && !derivationHasKnownOutputPaths(drv.type())) {
for (auto & outputName : wantedOutputs) {
auto thisOutputHash = outputHashes.at(outputName);
auto thisOutputId = DrvOutput{ thisOutputHash, outputName };
if (!store->queryRealisation(thisOutputId)) {
debug("missing output %s", outputName);
assert(optResult);
auto & result = *optResult;
assert(result.builtOutputs.count(thisOutputId));
auto newRealisation = result.builtOutputs.at(thisOutputId);
missingRealisations.insert(newRealisation);
missingPaths.insert(newRealisation.outPath);
}
}
} else {
auto outputPaths = drv.outputsAndOptPaths(*store);
for (auto & [outputName, hopefullyOutputPath] : outputPaths) {
assert(hopefullyOutputPath.second);
if (!store->isValidPath(*hopefullyOutputPath.second))
missingPaths.insert(*hopefullyOutputPath.second);
}
}
if (!missing.empty()) {
if (!missingPaths.empty()) {
Activity act(*logger, lvlTalkative, actUnknown, fmt("copying outputs from '%s'", storeUri));
if (auto localStore = store.dynamic_pointer_cast<LocalStore>())
for (auto & i : missing)
localStore->locksHeld.insert(store->printStorePath(i)); /* FIXME: ugly */
for (auto & path : missingPaths)
localStore->locksHeld.insert(store->printStorePath(path)); /* FIXME: ugly */
/* No `copyStorePathImpl` because we always trust ourselves. */
copyPaths(ref<Store>(sshStore), store, missing, NoRepair, NoCheckSigs, NoSubstitute);
copyPaths(ref<Store>(sshStore), store, missingPaths, NoRepair, NoCheckSigs, NoSubstitute);
}
// XXX: Should be done as part of `copyPaths`
for (auto & realisation : missingRealisations) {
// Should hold, because if the feature isn't enabled the set
// of missing realisations should be empty
settings.requireExperimentalFeature("ca-derivations");
store->registerDrvOutput(realisation);
}
return 0;

File diff suppressed because it is too large Load diff

View file

@ -2,7 +2,8 @@
#include "parsed-derivations.hh"
#include "lock.hh"
#include "local-store.hh"
#include "store-api.hh"
#include "pathlocks.hh"
#include "goal.hh"
namespace nix {
@ -64,7 +65,7 @@ struct DerivationGoal : public Goal
bool retrySubstitution;
/* The derivation stored at drvPath. */
std::unique_ptr<BasicDerivation> drv;
std::unique_ptr<Derivation> drv;
std::unique_ptr<ParsedDerivation> parsedDrv;
@ -79,18 +80,6 @@ struct DerivationGoal : public Goal
std::map<std::string, InitialOutput> initialOutputs;
/* User selected for running the builder. */
std::unique_ptr<UserLock> buildUser;
/* The process ID of the builder. */
Pid pid;
/* The temporary directory. */
Path tmpDir;
/* The path of the temporary directory in the sandbox. */
Path tmpDirInSandbox;
/* File descriptor for the log file. */
AutoCloseFD fdLogFile;
std::shared_ptr<BufferedSink> logFileSink, logSink;
@ -106,79 +95,15 @@ struct DerivationGoal : public Goal
std::string currentHookLine;
/* Pipe for the builder's standard output/error. */
Pipe builderOut;
/* Pipe for synchronising updates to the builder namespaces. */
Pipe userNamespaceSync;
/* The mount namespace of the builder, used to add additional
paths to the sandbox as a result of recursive Nix calls. */
AutoCloseFD sandboxMountNamespace;
/* On Linux, whether we're doing the build in its own user
namespace. */
bool usingUserNamespace = true;
/* The build hook. */
std::unique_ptr<HookInstance> hook;
/* Whether we're currently doing a chroot build. */
bool useChroot = false;
Path chrootRootDir;
/* RAII object to delete the chroot directory. */
std::shared_ptr<AutoDelete> autoDelChroot;
/* The sort of derivation we are building. */
DerivationType derivationType;
/* Whether to run the build in a private network namespace. */
bool privateNetwork = false;
typedef void (DerivationGoal::*GoalState)();
GoalState state;
/* Stuff we need to pass to initChild(). */
struct ChrootPath {
Path source;
bool optional;
ChrootPath(Path source = "", bool optional = false)
: source(source), optional(optional)
{ }
};
typedef map<Path, ChrootPath> DirsInChroot; // maps target path to source path
DirsInChroot dirsInChroot;
typedef map<string, string> Environment;
Environment env;
#if __APPLE__
typedef string SandboxProfile;
SandboxProfile additionalSandboxProfile;
#endif
/* Hash rewriting. */
StringMap inputRewrites, outputRewrites;
typedef map<StorePath, StorePath> RedirectedOutputs;
RedirectedOutputs redirectedOutputs;
/* The outputs paths used during the build.
- Input-addressed derivations or fixed content-addressed outputs are
sometimes built when some of their outputs already exist, and can not
be hidden via sandboxing. We use temporary locations instead and
rewrite after the build. Otherwise the regular predetermined paths are
put here.
- Floating content-addressed derivations do not know their final build
output paths until the outputs are hashed, so random locations are
used, and then renamed. The randomness helps guard against hidden
self-references.
*/
OutputPathMap scratchOutputs;
/* The final output paths of the build.
- For input-addressed derivations, always the precomputed paths
@ -191,11 +116,6 @@ struct DerivationGoal : public Goal
BuildMode buildMode;
/* If we're repairing without a chroot, there may be outputs that
are valid but corrupt. So we redirect these outputs to
temporary paths. */
StorePathSet redirectedBadOutputs;
BuildResult result;
/* The current round, if we're building multiple times. */
@ -203,17 +123,6 @@ struct DerivationGoal : public Goal
size_t nrRounds;
/* Path registration info from the previous round, if we're
building multiple times. Since this contains the hash, it
allows us to compare whether two rounds produced the same
result. */
std::map<Path, ValidPathInfo> prevInfos;
uid_t sandboxUid() { return usingUserNamespace ? 1000 : buildUser->getUID(); }
gid_t sandboxGid() { return usingUserNamespace ? 100 : buildUser->getGID(); }
const static Path homeDir;
std::unique_ptr<MaintainCount<uint64_t>> mcExpectedBuilds, mcRunningBuilds;
std::unique_ptr<Activity> act;
@ -226,39 +135,13 @@ struct DerivationGoal : public Goal
/* The remote machine on which we're building. */
std::string machineName;
/* The recursive Nix daemon socket. */
AutoCloseFD daemonSocket;
/* The daemon main thread. */
std::thread daemonThread;
/* The daemon worker threads. */
std::vector<std::thread> daemonWorkerThreads;
/* Paths that were added via recursive Nix calls. */
StorePathSet addedPaths;
/* Recursive Nix calls are only allowed to build or realize paths
in the original input closure or added via a recursive Nix call
(so e.g. you can't do 'nix-store -r /nix/store/<bla>' where
/nix/store/<bla> is some arbitrary path in a binary cache). */
bool isAllowed(const StorePath & path)
{
return inputPaths.count(path) || addedPaths.count(path);
}
friend struct RestrictedStore;
DerivationGoal(const StorePath & drvPath,
const StringSet & wantedOutputs, Worker & worker,
BuildMode buildMode = bmNormal);
DerivationGoal(const StorePath & drvPath, const BasicDerivation & drv,
const StringSet & wantedOutputs, Worker & worker,
BuildMode buildMode = bmNormal);
~DerivationGoal();
/* Whether we need to perform hash rewriting if there are valid output paths. */
bool needsHashRewrite();
virtual ~DerivationGoal();
void timedOut(Error && ex) override;
@ -280,7 +163,7 @@ struct DerivationGoal : public Goal
void closureRepaired();
void inputsRealised();
void tryToBuild();
void tryLocalBuild();
virtual void tryLocalBuild();
void buildDone();
void resolvedFinished();
@ -288,40 +171,11 @@ struct DerivationGoal : public Goal
/* Is the build hook willing to perform the build? */
HookReply tryBuildHook();
/* Start building a derivation. */
void startBuilder();
/* Fill in the environment for the builder. */
void initEnv();
/* Setup tmp dir location. */
void initTmpDir();
/* Write a JSON file containing the derivation attributes. */
void writeStructuredAttrs();
void startDaemon();
void stopDaemon();
/* Add 'path' to the set of paths that may be referenced by the
outputs, and make it appear in the sandbox. */
void addDependency(const StorePath & path);
/* Make a file owned by the builder. */
void chownToBuilder(const Path & path);
/* Run the builder's process. */
void runChild();
virtual int getChildStatus();
/* Check that the derivation outputs all exist and register them
as valid. */
void registerOutputs();
/* Check that an output meets the requirements specified by the
'outputChecks' attribute (or the legacy
'{allowed,disallowed}{References,Requisites}' attributes). */
void checkOutputs(const std::map<std::string, ValidPathInfo> & outputs);
virtual void registerOutputs();
/* Open a log file and a pipe to it. */
Path openLogFile();
@ -329,8 +183,18 @@ struct DerivationGoal : public Goal
/* Close the log file. */
void closeLogFile();
/* Delete the temporary directory, if we have one. */
void deleteTmpDir(bool force);
/* Close the read side of the logger pipe. */
virtual void closeReadPipes();
/* Cleanup hooks for buildDone() */
virtual void cleanupHookFinally();
virtual void cleanupPreChildKill();
virtual void cleanupPostChildKill();
virtual bool cleanupDecideWhetherDiskFull();
virtual void cleanupPostOutputsRegisteredModeCheck();
virtual void cleanupPostOutputsRegisteredModeNonCheck();
virtual bool isReadDesc(int fd);
/* Callback used by the worker to write to the log. */
void handleChildOutput(int fd, const string & data) override;
@ -347,17 +211,7 @@ struct DerivationGoal : public Goal
void checkPathValidity();
/* Forcibly kill the child process, if any. */
void killChild();
/* Create alternative path calculated from but distinct from the
input, so we can avoid overwriting outputs (or other store paths)
that already exist. */
StorePath makeFallbackPath(const StorePath & path);
/* Make a path to another based on the output name along with the
derivation hash. */
/* FIXME add option to randomize, so we can audit whether our
rewrites caught everything */
StorePath makeFallbackPath(std::string_view outputName);
virtual void killChild();
void repairClosure();
@ -370,4 +224,6 @@ struct DerivationGoal : public Goal
StorePathSet exportReferences(const StorePathSet & storePaths);
};
MakeError(NotDeterministic, BuildError);
}

View file

@ -2,6 +2,7 @@
#include "worker.hh"
#include "substitution-goal.hh"
#include "derivation-goal.hh"
#include "local-store.hh"
namespace nix {
@ -58,6 +59,26 @@ BuildResult Store::buildDerivation(const StorePath & drvPath, const BasicDerivat
result.status = BuildResult::MiscFailure;
result.errorMsg = e.msg();
}
// XXX: Should use `goal->queryPartialDerivationOutputMap()` once it's
// extended to return the full realisation for each output
auto staticDrvOutputs = drv.outputsAndOptPaths(*this);
auto outputHashes = staticOutputHashes(*this, drv);
for (auto & [outputName, staticOutput] : staticDrvOutputs) {
auto outputId = DrvOutput{outputHashes.at(outputName), outputName};
if (staticOutput.second)
result.builtOutputs.insert_or_assign(
outputId,
Realisation{ outputId, *staticOutput.second}
);
if (settings.isExperimentalFeatureEnabled("ca-derivations") && !derivationHasKnownOutputPaths(drv.type())) {
auto realisation = this->queryRealisation(outputId);
if (realisation)
result.builtOutputs.insert_or_assign(
outputId,
*realisation
);
}
}
return result;
}

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,199 @@
#pragma once
#include "derivation-goal.hh"
#include "local-store.hh"
namespace nix {
struct LocalDerivationGoal : public DerivationGoal
{
LocalStore & getLocalStore();
/* User selected for running the builder. */
std::unique_ptr<UserLock> buildUser;
/* The process ID of the builder. */
Pid pid;
/* The temporary directory. */
Path tmpDir;
/* The path of the temporary directory in the sandbox. */
Path tmpDirInSandbox;
/* Pipe for the builder's standard output/error. */
Pipe builderOut;
/* Pipe for synchronising updates to the builder namespaces. */
Pipe userNamespaceSync;
/* The mount namespace of the builder, used to add additional
paths to the sandbox as a result of recursive Nix calls. */
AutoCloseFD sandboxMountNamespace;
/* On Linux, whether we're doing the build in its own user
namespace. */
bool usingUserNamespace = true;
/* Whether we're currently doing a chroot build. */
bool useChroot = false;
Path chrootRootDir;
/* RAII object to delete the chroot directory. */
std::shared_ptr<AutoDelete> autoDelChroot;
/* Whether to run the build in a private network namespace. */
bool privateNetwork = false;
/* Stuff we need to pass to initChild(). */
struct ChrootPath {
Path source;
bool optional;
ChrootPath(Path source = "", bool optional = false)
: source(source), optional(optional)
{ }
};
typedef map<Path, ChrootPath> DirsInChroot; // maps target path to source path
DirsInChroot dirsInChroot;
typedef map<string, string> Environment;
Environment env;
#if __APPLE__
typedef string SandboxProfile;
SandboxProfile additionalSandboxProfile;
#endif
/* Hash rewriting. */
StringMap inputRewrites, outputRewrites;
typedef map<StorePath, StorePath> RedirectedOutputs;
RedirectedOutputs redirectedOutputs;
/* The outputs paths used during the build.
- Input-addressed derivations or fixed content-addressed outputs are
sometimes built when some of their outputs already exist, and can not
be hidden via sandboxing. We use temporary locations instead and
rewrite after the build. Otherwise the regular predetermined paths are
put here.
- Floating content-addressed derivations do not know their final build
output paths until the outputs are hashed, so random locations are
used, and then renamed. The randomness helps guard against hidden
self-references.
*/
OutputPathMap scratchOutputs;
/* Path registration info from the previous round, if we're
building multiple times. Since this contains the hash, it
allows us to compare whether two rounds produced the same
result. */
std::map<Path, ValidPathInfo> prevInfos;
uid_t sandboxUid() { return usingUserNamespace ? 1000 : buildUser->getUID(); }
gid_t sandboxGid() { return usingUserNamespace ? 100 : buildUser->getGID(); }
const static Path homeDir;
/* The recursive Nix daemon socket. */
AutoCloseFD daemonSocket;
/* The daemon main thread. */
std::thread daemonThread;
/* The daemon worker threads. */
std::vector<std::thread> daemonWorkerThreads;
/* Paths that were added via recursive Nix calls. */
StorePathSet addedPaths;
/* Recursive Nix calls are only allowed to build or realize paths
in the original input closure or added via a recursive Nix call
(so e.g. you can't do 'nix-store -r /nix/store/<bla>' where
/nix/store/<bla> is some arbitrary path in a binary cache). */
bool isAllowed(const StorePath & path)
{
return inputPaths.count(path) || addedPaths.count(path);
}
friend struct RestrictedStore;
using DerivationGoal::DerivationGoal;
virtual ~LocalDerivationGoal() override;
/* Whether we need to perform hash rewriting if there are valid output paths. */
bool needsHashRewrite();
/* The additional states. */
void tryLocalBuild() override;
/* Start building a derivation. */
void startBuilder();
/* Fill in the environment for the builder. */
void initEnv();
/* Setup tmp dir location. */
void initTmpDir();
/* Write a JSON file containing the derivation attributes. */
void writeStructuredAttrs();
void startDaemon();
void stopDaemon();
/* Add 'path' to the set of paths that may be referenced by the
outputs, and make it appear in the sandbox. */
void addDependency(const StorePath & path);
/* Make a file owned by the builder. */
void chownToBuilder(const Path & path);
int getChildStatus() override;
/* Run the builder's process. */
void runChild();
/* Check that the derivation outputs all exist and register them
as valid. */
void registerOutputs() override;
/* Check that an output meets the requirements specified by the
'outputChecks' attribute (or the legacy
'{allowed,disallowed}{References,Requisites}' attributes). */
void checkOutputs(const std::map<std::string, ValidPathInfo> & outputs);
/* Close the read side of the logger pipe. */
void closeReadPipes() override;
/* Cleanup hooks for buildDone() */
void cleanupHookFinally() override;
void cleanupPreChildKill() override;
void cleanupPostChildKill() override;
bool cleanupDecideWhetherDiskFull() override;
void cleanupPostOutputsRegisteredModeCheck() override;
void cleanupPostOutputsRegisteredModeNonCheck() override;
bool isReadDesc(int fd) override;
/* Delete the temporary directory, if we have one. */
void deleteTmpDir(bool force);
/* Forcibly kill the child process, if any. */
void killChild() override;
/* Create alternative path calculated from but distinct from the
input, so we can avoid overwriting outputs (or other store paths)
that already exist. */
StorePath makeFallbackPath(const StorePath & path);
/* Make a path to another based on the output name along with the
derivation hash. */
/* FIXME add option to randomize, so we can audit whether our
rewrites caught everything */
StorePath makeFallbackPath(std::string_view outputName);
};
}

View file

@ -1,7 +1,7 @@
#include "machines.hh"
#include "worker.hh"
#include "substitution-goal.hh"
#include "derivation-goal.hh"
#include "local-derivation-goal.hh"
#include "hook-instance.hh"
#include <poll.h>
@ -59,8 +59,10 @@ std::shared_ptr<DerivationGoal> Worker::makeDerivationGoalCommon(
std::shared_ptr<DerivationGoal> Worker::makeDerivationGoal(const StorePath & drvPath,
const StringSet & wantedOutputs, BuildMode buildMode)
{
return makeDerivationGoalCommon(drvPath, wantedOutputs, [&]() {
return std::make_shared<DerivationGoal>(drvPath, wantedOutputs, *this, buildMode);
return makeDerivationGoalCommon(drvPath, wantedOutputs, [&]() -> std::shared_ptr<DerivationGoal> {
return !dynamic_cast<LocalStore *>(&store)
? std::make_shared</* */DerivationGoal>(drvPath, wantedOutputs, *this, buildMode)
: std::make_shared<LocalDerivationGoal>(drvPath, wantedOutputs, *this, buildMode);
});
}
@ -68,8 +70,10 @@ std::shared_ptr<DerivationGoal> Worker::makeDerivationGoal(const StorePath & drv
std::shared_ptr<DerivationGoal> Worker::makeBasicDerivationGoal(const StorePath & drvPath,
const BasicDerivation & drv, const StringSet & wantedOutputs, BuildMode buildMode)
{
return makeDerivationGoalCommon(drvPath, wantedOutputs, [&]() {
return std::make_shared<DerivationGoal>(drvPath, drv, wantedOutputs, *this, buildMode);
return makeDerivationGoalCommon(drvPath, wantedOutputs, [&]() -> std::shared_ptr<DerivationGoal> {
return !dynamic_cast<LocalStore *>(&store)
? std::make_shared</* */DerivationGoal>(drvPath, drv, wantedOutputs, *this, buildMode)
: std::make_shared<LocalDerivationGoal>(drvPath, drv, wantedOutputs, *this, buildMode);
});
}

View file

@ -575,6 +575,9 @@ static void performOp(TunnelLogger * logger, ref<Store> store,
auto res = store->buildDerivation(drvPath, drv, buildMode);
logger->stopWork();
to << res.status << res.errorMsg;
if (GET_PROTOCOL_MINOR(clientVersion) >= 0xc) {
worker_proto::write(*store, to, res.builtOutputs);
}
break;
}

View file

@ -57,6 +57,17 @@ bool derivationIsFixed(DerivationType dt) {
assert(false);
}
bool derivationHasKnownOutputPaths(DerivationType dt) {
switch (dt) {
case DerivationType::InputAddressed: return true;
case DerivationType::CAFixed: return true;
case DerivationType::CAFloating: return false;
case DerivationType::DeferredInputAddressed: return false;
};
assert(false);
}
bool derivationIsImpure(DerivationType dt) {
switch (dt) {
case DerivationType::InputAddressed: return false;

View file

@ -94,6 +94,11 @@ bool derivationIsFixed(DerivationType);
derivation is controlled separately. Never true for non-CA derivations. */
bool derivationIsImpure(DerivationType);
/* Does the derivation knows its own output paths?
* Only true when there's no floating-ca derivation involved in the closure.
*/
bool derivationHasKnownOutputPaths(DerivationType);
struct BasicDerivation
{
DerivationOutputs outputs; /* keyed on symbolic IDs */

View file

@ -165,10 +165,15 @@ bool Settings::isExperimentalFeatureEnabled(const std::string & name)
return std::find(f.begin(), f.end(), name) != f.end();
}
MissingExperimentalFeature::MissingExperimentalFeature(std::string feature)
: Error("experimental Nix feature '%1%' is disabled; use '--experimental-features %1%' to override", feature)
, missingFeature(feature)
{}
void Settings::requireExperimentalFeature(const std::string & name)
{
if (!isExperimentalFeatureEnabled(name))
throw Error("experimental Nix feature '%1%' is disabled; use '--experimental-features %1%' to override", name);
throw MissingExperimentalFeature(name);
}
bool Settings::isWSL1()

View file

@ -45,6 +45,15 @@ struct PluginFilesSetting : public BaseSetting<Paths>
void set(const std::string & str, bool append = false) override;
};
class MissingExperimentalFeature: public Error
{
public:
std::string missingFeature;
MissingExperimentalFeature(std::string feature);
virtual const char* sname() const override { return "MissingExperimentalFeature"; }
};
class Settings : public Config {
unsigned int getDefaultCores();
@ -632,7 +641,7 @@ public:
is `root`.
> **Warning**
>
>
> Adding a user to `trusted-users` is essentially equivalent to
> giving that user root access to the system. For example, the user
> can set `sandbox-paths` and thereby obtain read access to
@ -722,13 +731,13 @@ public:
The program executes with no arguments. The program's environment
contains the following environment variables:
- `DRV_PATH`
- `DRV_PATH`
The derivation for the built paths.
Example:
`/nix/store/5nihn1a7pa8b25l9zafqaqibznlvvp3f-bash-4.4-p23.drv`
- `OUT_PATHS`
- `OUT_PATHS`
Output paths of the built derivation, separated by a space
character.
@ -759,7 +768,7 @@ public:
documentation](https://ec.haxx.se/usingcurl-netrc.html).
> **Note**
>
>
> This must be an absolute path, and `~` is not resolved. For
> example, `~/.netrc` won't resolve to your home directory's
> `.netrc`.

View file

@ -258,7 +258,9 @@ public:
if (GET_PROTOCOL_MINOR(conn->remoteVersion) >= 3)
conn->from >> status.timesBuilt >> status.isNonDeterministic >> status.startTime >> status.stopTime;
if (GET_PROTOCOL_MINOR(conn->remoteVersion) >= 6) {
status.builtOutputs = worker_proto::read(*this, conn->from, Phantom<DrvOutputs> {});
}
return status;
}

View file

@ -655,6 +655,7 @@ void LocalStore::checkDerivationOutputs(const StorePath & drvPath, const Derivat
void LocalStore::registerDrvOutput(const Realisation & info)
{
settings.requireExperimentalFeature("ca-derivations");
auto state(_state.lock());
retrySQLite<void>([&]() {
state->stmts->RegisterRealisedOutput.use()

View file

@ -280,7 +280,7 @@ private:
void createUser(const std::string & userName, uid_t userId) override;
friend struct DerivationGoal;
friend struct LocalDerivationGoal;
friend struct SubstitutionGoal;
};

View file

@ -33,6 +33,8 @@ struct Realisation {
GENERATE_CMP(Realisation, me->id, me->outPath);
};
typedef std::map<DrvOutput, Realisation> DrvOutputs;
struct OpaquePath {
StorePath path;

View file

@ -12,6 +12,7 @@
#include "logging.hh"
#include "callback.hh"
#include "filetransfer.hh"
#include <nlohmann/json.hpp>
namespace nix {
@ -49,6 +50,21 @@ void write(const Store & store, Sink & out, const ContentAddress & ca)
out << renderContentAddress(ca);
}
Realisation read(const Store & store, Source & from, Phantom<Realisation> _)
{
std::string rawInput = readString(from);
return Realisation::fromJSON(
nlohmann::json::parse(rawInput),
"remote-protocol"
);
}
void write(const Store & store, Sink & out, const Realisation & realisation)
{ out << realisation.toJSON().dump(); }
DrvOutput read(const Store & store, Source & from, Phantom<DrvOutput> _)
{ return DrvOutput::parse(readString(from)); }
void write(const Store & store, Sink & out, const DrvOutput & drvOutput)
{ out << drvOutput.to_string(); }
std::optional<StorePath> read(const Store & store, Source & from, Phantom<std::optional<StorePath>> _)
{
@ -664,6 +680,10 @@ BuildResult RemoteStore::buildDerivation(const StorePath & drvPath, const BasicD
unsigned int status;
conn->from >> status >> res.errorMsg;
res.status = (BuildResult::Status) status;
if (GET_PROTOCOL_MINOR(conn->daemonVersion) >= 0xc) {
auto builtOutputs = worker_proto::read(*this, conn->from, Phantom<DrvOutputs> {});
res.builtOutputs = builtOutputs;
}
return res;
}

View file

@ -5,7 +5,7 @@ namespace nix {
#define SERVE_MAGIC_1 0x390c9deb
#define SERVE_MAGIC_2 0x5452eecb
#define SERVE_PROTOCOL_VERSION 0x205
#define SERVE_PROTOCOL_VERSION 0x206
#define GET_PROTOCOL_MAJOR(x) ((x) & 0xff00)
#define GET_PROTOCOL_MINOR(x) ((x) & 0x00ff)

View file

@ -814,6 +814,37 @@ void copyOrBuildStorePath(ref<Store> srcStore, ref<Store> dstStore,
}
std::map<StorePath, StorePath> copyPaths(ref<Store> srcStore, ref<Store> dstStore, const RealisedPath::Set & paths,
RepairFlag repair, CheckSigsFlag checkSigs, SubstituteFlag substitute,
std::function<void(ref<Store>, ref<Store>, const ValidPathInfo &, RepairFlag, CheckSigsFlag)> copyStorePathImpl)
{
StorePathSet storePaths;
std::set<Realisation> realisations;
for (auto & path : paths) {
storePaths.insert(path.path());
if (auto realisation = std::get_if<Realisation>(&path.raw)) {
settings.requireExperimentalFeature("ca-derivations");
realisations.insert(*realisation);
}
}
auto pathsMap = copyPaths(srcStore, dstStore, storePaths, repair, checkSigs, substitute, copyStorePathImpl);
try {
for (auto & realisation : realisations) {
dstStore->registerDrvOutput(realisation);
}
} catch (MissingExperimentalFeature & e) {
// Don't fail if the remote doesn't support CA derivations is it might
// not be within our control to change that, and we might still want
// to at least copy the output paths.
if (e.missingFeature == "ca-derivations")
ignoreException();
else
throw;
}
return pathsMap;
}
std::map<StorePath, StorePath> copyPaths(ref<Store> srcStore, ref<Store> dstStore, const StorePathSet & storePaths,
RepairFlag repair, CheckSigsFlag checkSigs, SubstituteFlag substitute,
std::function<void(ref<Store>, ref<Store>, const ValidPathInfo &, RepairFlag, CheckSigsFlag)> copyStorePathImpl)
@ -828,7 +859,6 @@ std::map<StorePath, StorePath> copyPaths(ref<Store> srcStore, ref<Store> dstStor
for (auto & path : storePaths)
pathsMap.insert_or_assign(path, path);
if (missing.empty()) return pathsMap;
Activity act(*logger, lvlInfo, actCopyPaths, fmt("copying %d paths", missing.size()));
@ -903,21 +933,9 @@ std::map<StorePath, StorePath> copyPaths(ref<Store> srcStore, ref<Store> dstStor
nrDone++;
showProgress();
});
return pathsMap;
}
void copyClosure(ref<Store> srcStore, ref<Store> dstStore,
const StorePathSet & storePaths, RepairFlag repair, CheckSigsFlag checkSigs,
SubstituteFlag substitute)
{
StorePathSet closure;
srcStore->computeFSClosure(storePaths, closure);
copyPaths(srcStore, dstStore, closure, repair, checkSigs, substitute);
}
std::optional<ValidPathInfo> decodeValidPathInfo(const Store & store, std::istream & str, std::optional<HashResult> hashGiven)
{
std::string path;

View file

@ -162,6 +162,8 @@ struct BuildResult
non-determinism.) */
bool isNonDeterministic = false;
DrvOutputs builtOutputs;
/* The start/stop times of the build (or one of the rounds, if it
was repeated). */
time_t startTime = 0, stopTime = 0;
@ -769,19 +771,17 @@ void copyOrBuildStorePath(ref<Store> srcStore, ref<Store> dstStore,
isn't just the default that way `nix copy` etc. still can be relied upon to
not build anything. */
std::map<StorePath, StorePath> copyPaths(ref<Store> srcStore, ref<Store> dstStore,
const StorePathSet & storePaths,
const RealisedPath::Set &,
RepairFlag repair = NoRepair,
CheckSigsFlag checkSigs = CheckSigs,
SubstituteFlag substitute = NoSubstitute,
std::function<void(ref<Store>, ref<Store>, const ValidPathInfo &, RepairFlag, CheckSigsFlag)> copyStorePathImpl = copyStorePathAdapter);
/* Copy the closure of the specified paths from one store to another. */
void copyClosure(ref<Store> srcStore, ref<Store> dstStore,
const StorePathSet & storePaths,
std::map<StorePath, StorePath> copyPaths(ref<Store> srcStore, ref<Store> dstStore,
const StorePathSet& paths,
RepairFlag repair = NoRepair,
CheckSigsFlag checkSigs = CheckSigs,
SubstituteFlag substitute = NoSubstitute);
SubstituteFlag substitute = NoSubstitute,
std::function<void(ref<Store>, ref<Store>, const ValidPathInfo &, RepairFlag, CheckSigsFlag)> copyStorePathImpl = copyStorePathAdapter);
/* Remove the temporary roots file for this process. Any temporary

View file

@ -9,7 +9,7 @@ namespace nix {
#define WORKER_MAGIC_1 0x6e697863
#define WORKER_MAGIC_2 0x6478696f
#define PROTOCOL_VERSION 0x11b
#define PROTOCOL_VERSION 0x11c
#define GET_PROTOCOL_MAJOR(x) ((x) & 0xff00)
#define GET_PROTOCOL_MINOR(x) ((x) & 0x00ff)
@ -86,6 +86,8 @@ namespace worker_proto {
MAKE_WORKER_PROTO(, std::string);
MAKE_WORKER_PROTO(, StorePath);
MAKE_WORKER_PROTO(, ContentAddress);
MAKE_WORKER_PROTO(, Realisation);
MAKE_WORKER_PROTO(, DrvOutput);
MAKE_WORKER_PROTO(template<typename T>, std::set<T>);

View file

@ -19,6 +19,14 @@ void Args::addFlag(Flag && flag_)
if (flag->shortName) shortFlags[flag->shortName] = flag;
}
void Args::removeFlag(const std::string & longName)
{
auto flag = longFlags.find(longName);
assert(flag != longFlags.end());
if (flag->second->shortName) shortFlags.erase(flag->second->shortName);
longFlags.erase(flag);
}
void Completions::add(std::string completion, std::string description)
{
assert(description.find('\n') == std::string::npos);

View file

@ -140,6 +140,8 @@ public:
void addFlag(Flag && flag);
void removeFlag(const std::string & longName);
void expectArgs(ExpectedArg && arg)
{
expectedArgs.emplace_back(std::move(arg));

View file

@ -946,7 +946,7 @@ void killUser(uid_t uid)
#else
if (kill(-1, SIGKILL) == 0) break;
#endif
if (errno == ESRCH) break; /* no more processes */
if (errno == ESRCH || errno == EPERM) break; /* no more processes */
if (errno != EINTR)
throw SysError("cannot kill processes for uid '%1%'", uid);
}

View file

@ -50,12 +50,12 @@ static int main_nix_copy_closure(int argc, char ** argv)
auto to = toMode ? openStore(remoteUri) : openStore();
auto from = toMode ? openStore() : openStore(remoteUri);
StorePathSet storePaths2;
RealisedPath::Set storePaths2;
for (auto & path : storePaths)
storePaths2.insert(from->followLinksToStorePath(path));
StorePathSet closure;
from->computeFSClosure(storePaths2, closure, false, includeOutputs);
RealisedPath::Set closure;
RealisedPath::closure(*from, storePaths2, closure);
copyPaths(from, to, closure, NoRepair, NoCheckSigs, useSubstitutes);

View file

@ -905,6 +905,10 @@ static void opServe(Strings opFlags, Strings opArgs)
if (GET_PROTOCOL_MINOR(clientVersion) >= 3)
out << status.timesBuilt << status.isNonDeterministic << status.startTime << status.stopTime;
if (GET_PROTOCOL_MINOR(clientVersion >= 5)) {
worker_proto::write(*store, out, status.builtOutputs);
}
break;
}

View file

@ -8,7 +8,7 @@
using namespace nix;
struct CmdCopy : StorePathsCommand
struct CmdCopy : RealisedPathsCommand
{
std::string srcUri, dstUri;
@ -16,10 +16,10 @@ struct CmdCopy : StorePathsCommand
SubstituteFlag substitute = NoSubstitute;
using StorePathsCommand::run;
using RealisedPathsCommand::run;
CmdCopy()
: StorePathsCommand(true)
: RealisedPathsCommand(true)
{
addFlag({
.longName = "from",
@ -75,14 +75,15 @@ struct CmdCopy : StorePathsCommand
if (srcUri.empty() && dstUri.empty())
throw UsageError("you must pass '--from' and/or '--to'");
StorePathsCommand::run(store);
RealisedPathsCommand::run(store);
}
void run(ref<Store> srcStore, StorePaths storePaths) override
void run(ref<Store> srcStore, std::vector<RealisedPath> paths) override
{
ref<Store> dstStore = dstUri.empty() ? openStore() : openStore(dstUri);
copyPaths(srcStore, dstStore, StorePathSet(storePaths.begin(), storePaths.end()),
copyPaths(
srcStore, dstStore, RealisedPath::Set(paths.begin(), paths.end()),
NoRepair, checkSigs, substitute);
}
};

38
src/nix/flake-lock.md Normal file
View file

@ -0,0 +1,38 @@
R""(
# Examples
* Update the `nixpkgs` and `nix` inputs of the flake in the current
directory:
```console
# nix flake lock --update-input nixpkgs --update-input nix
* Updated 'nix': 'github:NixOS/nix/9fab14adbc3810d5cc1f88672fde1eee4358405c' -> 'github:NixOS/nix/8927cba62f5afb33b01016d5c4f7f8b7d0adde3c'
* Updated 'nixpkgs': 'github:NixOS/nixpkgs/3d2d8f281a27d466fa54b469b5993f7dde198375' -> 'github:NixOS/nixpkgs/a3a3dda3bacf61e8a39258a0ed9c924eeca8e293'
```
# Description
This command updates the lock file of a flake (`flake.lock`) so that
it contains a lock for every flake input specified in
`flake.nix`. Existing lock file entries are not updated unless
required by a flag such as `--update-input`.
Note that every command that operates on a flake will also update the
lock file if needed, and supports the same flags. Therefore,
```console
# nix flake lock --update-input nixpkgs
# nix build
```
is equivalent to:
```console
# nix build --update-input nixpkgs
```
Thus, this command is only useful if you want to update the lock file
separately from any other action such as building.
)""

View file

@ -2,52 +2,33 @@ R""(
# Examples
* Update the `nixpkgs` and `nix` inputs of the flake in the current
directory:
```console
# nix flake update --update-input nixpkgs --update-input nix
* Updated 'nix': 'github:NixOS/nix/9fab14adbc3810d5cc1f88672fde1eee4358405c' -> 'github:NixOS/nix/8927cba62f5afb33b01016d5c4f7f8b7d0adde3c'
* Updated 'nixpkgs': 'github:NixOS/nixpkgs/3d2d8f281a27d466fa54b469b5993f7dde198375' -> 'github:NixOS/nixpkgs/a3a3dda3bacf61e8a39258a0ed9c924eeca8e293'
```
* Recreate the lock file (i.e. update all inputs) and commit the new
lock file:
```console
# nix flake update --recreate-lock-file --commit-lock-file
# nix flake update
* Updated 'nix': 'github:NixOS/nix/9fab14adbc3810d5cc1f88672fde1eee4358405c' -> 'github:NixOS/nix/8927cba62f5afb33b01016d5c4f7f8b7d0adde3c'
* Updated 'nixpkgs': 'github:NixOS/nixpkgs/3d2d8f281a27d466fa54b469b5993f7dde198375' -> 'github:NixOS/nixpkgs/a3a3dda3bacf61e8a39258a0ed9c924eeca8e293'
warning: committed new revision '158bcbd9d6cc08ab859c0810186c1beebc982aad'
```
# Description
This command updates the lock file of a flake (`flake.lock`) so that
it contains a lock for every flake input specified in
`flake.nix`. Note that every command that operates on a flake will
also update the lock file if needed, and supports the same
flags. Therefore,
This command recreates the lock file of a flake (`flake.lock`), thus
updating the lock for every mutable input (like `nixpkgs`) to its
current version. This is equivalent to passing `--recreate-lock-file`
to any command that operates on a flake. That is,
```console
# nix flake update --update-input nixpkgs
# nix flake update
# nix build
```
is equivalent to:
```console
# nix build --update-input nixpkgs
# nix build --recreate-lock-file
```
Thus, this command is only useful if you want to update the lock file
separately from any other action such as building.
> **Note**
>
> This command does *not* update locks that are already present unless
> you explicitly ask for it using `--update-input` or
> `--recreate-lock-file`. Thus, if the lock file already has locks for
> every input, then `nix flake update` (without arguments) does
> nothing.
)""

View file

@ -104,6 +104,14 @@ struct CmdFlakeUpdate : FlakeCommand
return "update flake lock file";
}
CmdFlakeUpdate()
{
/* Remove flags that don't make sense. */
removeFlag("recreate-lock-file");
removeFlag("update-input");
removeFlag("no-update-lock-file");
}
std::string doc() override
{
return
@ -113,7 +121,30 @@ struct CmdFlakeUpdate : FlakeCommand
void run(nix::ref<nix::Store> store) override
{
/* Use --refresh by default for 'nix flake update'. */
settings.tarballTtl = 0;
lockFlags.recreateLockFile = true;
lockFlake();
}
};
struct CmdFlakeLock : FlakeCommand
{
std::string description() override
{
return "create missing lock file entries";
}
std::string doc() override
{
return
#include "flake-lock.md"
;
}
void run(nix::ref<nix::Store> store) override
{
settings.tarballTtl = 0;
lockFlake();
@ -1006,6 +1037,7 @@ struct CmdFlake : NixMultiCommand
CmdFlake()
: MultiCommand({
{"update", []() { return make_ref<CmdFlakeUpdate>(); }},
{"lock", []() { return make_ref<CmdFlakeLock>(); }},
{"info", []() { return make_ref<CmdFlakeInfo>(); }},
{"list-inputs", []() { return make_ref<CmdFlakeListInputs>(); }},
{"check", []() { return make_ref<CmdFlakeCheck>(); }},

View file

@ -0,0 +1,53 @@
{ busybox }:
with import ./config.nix;
let
mkDerivation = args:
derivation ({
inherit system;
builder = busybox;
args = ["sh" "-e" args.builder or (builtins.toFile "builder-${args.name}.sh" "if [ -e .attrs.sh ]; then source .attrs.sh; fi; eval \"$buildCommand\"")];
outputHashMode = "recursive";
outputHashAlgo = "sha256";
__contentAddressed = true;
} // removeAttrs args ["builder" "meta"])
// { meta = args.meta or {}; };
input1 = mkDerivation {
shell = busybox;
name = "build-remote-input-1";
buildCommand = "echo FOO > $out";
requiredSystemFeatures = ["foo"];
};
input2 = mkDerivation {
shell = busybox;
name = "build-remote-input-2";
buildCommand = "echo BAR > $out";
requiredSystemFeatures = ["bar"];
};
input3 = mkDerivation {
shell = busybox;
name = "build-remote-input-3";
buildCommand = ''
read x < ${input2}
echo $x BAZ > $out
'';
requiredSystemFeatures = ["baz"];
};
in
mkDerivation {
shell = busybox;
name = "build-remote";
buildCommand =
''
read x < ${input1}
read y < ${input3}
echo "$x $y" > $out
'';
}

View file

@ -1,5 +1,5 @@
source common.sh
file=build-hook-ca.nix
file=build-hook-ca-fixed.nix
source build-remote.sh

View file

@ -0,0 +1,7 @@
source common.sh
file=build-hook-ca-floating.nix
sed -i 's/experimental-features .*/& ca-derivations/' "$NIX_CONF_DIR"/nix.conf
source build-remote.sh

View file

@ -2,7 +2,7 @@ source common.sh
# We act as if remote trusts us, but it doesn't. This is fine because we
# are only building (fixed) CA derivations.
file=build-hook-ca.nix
file=build-hook-ca-fixed.nix
prog=$(readlink -e ./nix-daemon-untrusting.sh)
proto=ssh-ng
trusting=true

View file

@ -232,7 +232,7 @@ nix build -o $TEST_ROOT/result --flake-registry file:///no-registry.json $flake2
nix build -o $TEST_ROOT/result --no-registries $flake2Dir#bar --refresh
# Updating the flake should not change the lockfile.
nix flake update $flake2Dir
nix flake lock $flake2Dir
[[ -z $(git -C $flake2Dir diff master) ]]
# Now we should be able to build the flake in pure mode.
@ -354,10 +354,10 @@ nix build -o $TEST_ROOT/result flake3#xyzzy flake3#fnord
nix build -o $TEST_ROOT/result flake4#xyzzy
# Test 'nix flake update' and --override-flake.
nix flake update $flake3Dir
nix flake lock $flake3Dir
[[ -z $(git -C $flake3Dir diff master) ]]
nix flake update $flake3Dir --recreate-lock-file --override-flake flake2 nixpkgs
nix flake update $flake3Dir --override-flake flake2 nixpkgs
[[ ! -z $(git -C $flake3Dir diff master) ]]
# Make branch "removeXyzzy" where flake3 doesn't have xyzzy anymore
@ -389,7 +389,7 @@ cat > $flake3Dir/flake.nix <<EOF
};
}
EOF
nix flake update $flake3Dir
nix flake lock $flake3Dir
git -C $flake3Dir add flake.nix flake.lock
git -C $flake3Dir commit -m 'Remove packages.xyzzy'
git -C $flake3Dir checkout master
@ -547,7 +547,7 @@ cat > $flake3Dir/flake.nix <<EOF
}
EOF
nix flake update $flake3Dir
nix flake lock $flake3Dir
[[ $(jq -c .nodes.root.inputs.bar $flake3Dir/flake.lock) = '["foo"]' ]]
cat > $flake3Dir/flake.nix <<EOF
@ -559,7 +559,7 @@ cat > $flake3Dir/flake.nix <<EOF
}
EOF
nix flake update $flake3Dir
nix flake lock $flake3Dir
[[ $(jq -c .nodes.root.inputs.bar $flake3Dir/flake.lock) = '["flake2","flake1"]' ]]
cat > $flake3Dir/flake.nix <<EOF
@ -571,7 +571,7 @@ cat > $flake3Dir/flake.nix <<EOF
}
EOF
nix flake update $flake3Dir
nix flake lock $flake3Dir
[[ $(jq -c .nodes.root.inputs.bar $flake3Dir/flake.lock) = '["flake2"]' ]]
# Test overriding inputs of inputs.
@ -587,7 +587,7 @@ cat > $flake3Dir/flake.nix <<EOF
}
EOF
nix flake update $flake3Dir
nix flake lock $flake3Dir
[[ $(jq .nodes.flake1.locked.url $flake3Dir/flake.lock) =~ flake7 ]]
cat > $flake3Dir/flake.nix <<EOF
@ -600,7 +600,7 @@ cat > $flake3Dir/flake.nix <<EOF
}
EOF
nix flake update $flake3Dir --recreate-lock-file
nix flake update $flake3Dir
[[ $(jq -c .nodes.flake2.inputs.flake1 $flake3Dir/flake.lock) =~ '["foo"]' ]]
[[ $(jq .nodes.foo.locked.url $flake3Dir/flake.lock) =~ flake7 ]]
@ -658,20 +658,20 @@ nix build -o $TEST_ROOT/result "file://$TEST_ROOT/flake.tar.gz?narHash=sha256-qQ
# Test --override-input.
git -C $flake3Dir reset --hard
nix flake update $flake3Dir --override-input flake2/flake1 flake5 -vvvvv
nix flake lock $flake3Dir --override-input flake2/flake1 flake5 -vvvvv
[[ $(jq .nodes.flake1_2.locked.url $flake3Dir/flake.lock) =~ flake5 ]]
nix flake update $flake3Dir --override-input flake2/flake1 flake1
nix flake lock $flake3Dir --override-input flake2/flake1 flake1
[[ $(jq -r .nodes.flake1_2.locked.rev $flake3Dir/flake.lock) =~ $hash2 ]]
nix flake update $flake3Dir --override-input flake2/flake1 flake1/master/$hash1
nix flake lock $flake3Dir --override-input flake2/flake1 flake1/master/$hash1
[[ $(jq -r .nodes.flake1_2.locked.rev $flake3Dir/flake.lock) =~ $hash1 ]]
# Test --update-input.
nix flake update $flake3Dir
nix flake lock $flake3Dir
[[ $(jq -r .nodes.flake1_2.locked.rev $flake3Dir/flake.lock) = $hash1 ]]
nix flake update $flake3Dir --update-input flake2/flake1
nix flake lock $flake3Dir --update-input flake2/flake1
[[ $(jq -r .nodes.flake1_2.locked.rev $flake3Dir/flake.lock) =~ $hash2 ]]
# Test 'nix flake list-inputs'.

View file

@ -17,6 +17,8 @@ nix_tests = \
linux-sandbox.sh \
build-dry.sh \
build-remote-input-addressed.sh \
build-remote-content-addressed-fixed.sh \
build-remote-content-addressed-floating.sh \
build-remote-trustless-should-pass-1.sh \
build-remote-trustless-should-pass-2.sh \
build-remote-trustless-should-fail-0.sh \
@ -41,6 +43,7 @@ nix_tests = \
describe-stores.sh \
flakes.sh \
content-addressed.sh \
nix-copy-content-addressed.sh \
build.sh \
compute-levels.sh
# parallel.sh

View file

@ -0,0 +1,34 @@
#!/usr/bin/env bash
source common.sh
# Globally enable the ca derivations experimental flag
sed -i 's/experimental-features = .*/& ca-derivations ca-references/' "$NIX_CONF_DIR/nix.conf"
export REMOTE_STORE_DIR="$TEST_ROOT/remote_store"
export REMOTE_STORE="file://$REMOTE_STORE_DIR"
ensureCorrectlyCopied () {
attrPath="$1"
nix build --store "$REMOTE_STORE" --file ./content-addressed.nix "$attrPath"
}
testOneCopy () {
clearStore
rm -rf "$REMOTE_STORE_DIR"
attrPath="$1"
nix copy --to $REMOTE_STORE "$attrPath" --file ./content-addressed.nix
ensureCorrectlyCopied "$attrPath"
# Ensure that we can copy back what we put in the store
clearStore
nix copy --from $REMOTE_STORE \
--file ./content-addressed.nix "$attrPath" \
--no-check-sigs
}
for attrPath in rootCA dependentCA transitivelyDependentCA dependentNonCA dependentFixedOutput; do
testOneCopy "$attrPath"
done