Merge commit 'e12308dd63f0ad27b22dcdb3da89c411eebcad2b' into ca-drv-exotic

This commit is contained in:
John Ericson 2021-04-05 19:06:30 -04:00
commit cdc9f34a44
95 changed files with 1130 additions and 423 deletions

View file

@ -8,52 +8,62 @@ jobs:
matrix: matrix:
os: [ubuntu-latest, macos-latest] os: [ubuntu-latest, macos-latest]
runs-on: ${{ matrix.os }} runs-on: ${{ matrix.os }}
env:
CACHIX_NAME: nix-ci
steps: steps:
- uses: actions/checkout@v2.3.4 - uses: actions/checkout@v2.3.4
with: with:
fetch-depth: 0 fetch-depth: 0
- uses: cachix/install-nix-action@v12 - uses: cachix/install-nix-action@v13
- uses: cachix/cachix-action@v8 - run: echo CACHIX_NAME="$(echo $GITHUB_REPOSITORY-install-tests | tr "[A-Z]/" "[a-z]-")" >> $GITHUB_ENV
- uses: cachix/cachix-action@v9
with: with:
name: '${{ env.CACHIX_NAME }}' name: '${{ env.CACHIX_NAME }}'
signingKey: '${{ secrets.CACHIX_SIGNING_KEY }}' signingKey: '${{ secrets.CACHIX_SIGNING_KEY }}'
authToken: '${{ secrets.CACHIX_AUTH_TOKEN }}'
#- run: nix flake check #- run: nix flake check
- run: nix-build -A checks.$(if [[ `uname` = Linux ]]; then echo x86_64-linux; else echo x86_64-darwin; fi) - run: nix-build -A checks.$(if [[ `uname` = Linux ]]; then echo x86_64-linux; else echo x86_64-darwin; fi)
installer: check_cachix:
if: github.event_name == 'push' name: Cachix secret present for installer tests
needs: tests runs-on: ubuntu-latest
outputs:
secret: ${{ steps.secret.outputs.secret }}
steps:
- name: Check for Cachix secret
id: secret
env:
_CACHIX_SECRETS: ${{ secrets.CACHIX_SIGNING_KEY }}${{ secrets.CACHIX_AUTH_TOKEN }}
run: echo "::set-output name=secret::${{ env._CACHIX_SECRETS != '' }}"
installer:
needs: [tests, check_cachix]
if: github.event_name == 'push' && needs.check_cachix.outputs.secret == 'true'
runs-on: ubuntu-latest runs-on: ubuntu-latest
env:
CACHIX_NAME: nix-ci
outputs: outputs:
installerURL: ${{ steps.prepare-installer.outputs.installerURL }} installerURL: ${{ steps.prepare-installer.outputs.installerURL }}
steps: steps:
- uses: actions/checkout@v2.3.4 - uses: actions/checkout@v2.3.4
with: with:
fetch-depth: 0 fetch-depth: 0
- uses: cachix/install-nix-action@v12 - run: echo CACHIX_NAME="$(echo $GITHUB_REPOSITORY-install-tests | tr "[A-Z]/" "[a-z]-")" >> $GITHUB_ENV
- uses: cachix/cachix-action@v8 - uses: cachix/install-nix-action@v13
- uses: cachix/cachix-action@v9
with: with:
name: '${{ env.CACHIX_NAME }}' name: '${{ env.CACHIX_NAME }}'
signingKey: '${{ secrets.CACHIX_SIGNING_KEY }}' signingKey: '${{ secrets.CACHIX_SIGNING_KEY }}'
authToken: '${{ secrets.CACHIX_AUTH_TOKEN }}'
- id: prepare-installer - id: prepare-installer
run: scripts/prepare-installer-for-github-actions run: scripts/prepare-installer-for-github-actions
installer_test: installer_test:
if: github.event_name == 'push' needs: [installer, check_cachix]
needs: installer if: github.event_name == 'push' && needs.check_cachix.outputs.secret == 'true'
strategy: strategy:
matrix: matrix:
os: [ubuntu-latest, macos-latest] os: [ubuntu-latest, macos-latest]
runs-on: ${{ matrix.os }} runs-on: ${{ matrix.os }}
env:
CACHIX_NAME: nix-ci
steps: steps:
- uses: actions/checkout@v2.3.4 - uses: actions/checkout@v2.3.4
- uses: cachix/install-nix-action@master - run: echo CACHIX_NAME="$(echo $GITHUB_REPOSITORY-install-tests | tr "[A-Z]/" "[a-z]-")" >> $GITHUB_ENV
- uses: cachix/install-nix-action@v13
with: with:
install_url: '${{needs.installer.outputs.installerURL}}' install_url: '${{needs.installer.outputs.installerURL}}'
install_options: '--tarball-url-prefix https://${{ env.CACHIX_NAME }}.cachix.org/serve' install_options: "--tarball-url-prefix https://${{ env.CACHIX_NAME }}.cachix.org/serve"
- run: nix-instantiate -E 'builtins.currentTime' --eval - run: nix-instantiate -E 'builtins.currentTime' --eval

View file

@ -17,6 +17,7 @@ LIBBROTLI_LIBS = @LIBBROTLI_LIBS@
LIBCURL_LIBS = @LIBCURL_LIBS@ LIBCURL_LIBS = @LIBCURL_LIBS@
LIBLZMA_LIBS = @LIBLZMA_LIBS@ LIBLZMA_LIBS = @LIBLZMA_LIBS@
OPENSSL_LIBS = @OPENSSL_LIBS@ OPENSSL_LIBS = @OPENSSL_LIBS@
LIBSECCOMP_LIBS = @LIBSECCOMP_LIBS@
PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_NAME = @PACKAGE_NAME@
PACKAGE_VERSION = @PACKAGE_VERSION@ PACKAGE_VERSION = @PACKAGE_VERSION@
SHELL = @bash@ SHELL = @bash@

View file

@ -112,6 +112,10 @@ default, set it to `-`.
features appear in the derivations `requiredSystemFeatures` features appear in the derivations `requiredSystemFeatures`
attribute.. attribute..
8. The (base64-encoded) public host key of the remote machine. If omitted, SSH
will use its regular known-hosts file. Specifically, the field is calculated
via `base64 -w0 /etc/ssh/ssh_host_ed25519_key.pub`.
For example, the machine specification For example, the machine specification
nix@scratchy.labs.cs.uu.nl i686-linux /home/nix/.ssh/id_scratchy_auto 8 1 kvm nix@scratchy.labs.cs.uu.nl i686-linux /home/nix/.ssh/id_scratchy_auto 8 1 kvm

View file

@ -79,7 +79,7 @@ paths. Realisation is a somewhat overloaded term:
system). If the path is already valid, we are done immediately. system). If the path is already valid, we are done immediately.
Otherwise, the path and any missing paths in its closure may be Otherwise, the path and any missing paths in its closure may be
produced through substitutes. If there are no (successful) produced through substitutes. If there are no (successful)
subsitutes, realisation fails. substitutes, realisation fails.
The output path of each derivation is printed on standard output. (For The output path of each derivation is printed on standard output. (For
non-derivations argument, the argument itself is printed.) non-derivations argument, the argument itself is printed.)

View file

@ -134,15 +134,6 @@ Most Nix commands accept the following command-line options:
failure in obtaining the substitutes to lead to a full build from failure in obtaining the substitutes to lead to a full build from
source (with the related consumption of resources). source (with the related consumption of resources).
- `--no-build-hook`
Disables the build hook mechanism. This allows to ignore remote
builders if they are setup on the machine.
It's useful in cases where the bandwidth between the client and the
remote builder is too low. In that case it can take more time to
upload the sources to the remote builder and fetch back the result
than to do the computation locally.
- `--readonly-mode` - `--readonly-mode`
When this option is used, no attempt is made to open the Nix When this option is used, no attempt is made to open the Nix
database. Most Nix operations do need database access, so those database. Most Nix operations do need database access, so those

View file

@ -69,3 +69,6 @@
`--disable-seccomp-sandboxing` option to the `configure` script (Not `--disable-seccomp-sandboxing` option to the `configure` script (Not
recommended unless your system doesn't support `libseccomp`). To get recommended unless your system doesn't support `libseccomp`). To get
the library, visit <https://github.com/seccomp/libseccomp>. the library, visit <https://github.com/seccomp/libseccomp>.
- Niels Lohmann's [JSON library](https://github.com/nlohmann/json).

View file

@ -90,7 +90,7 @@
lowdown lowdown
gmock gmock
] ]
++ lib.optionals stdenv.isLinux [libseccomp utillinuxMinimal] ++ lib.optionals stdenv.isLinux [libseccomp (pkgs.util-linuxMinimal or pkgs.utillinuxMinimal)]
++ lib.optional (stdenv.isLinux || stdenv.isDarwin) libsodium ++ lib.optional (stdenv.isLinux || stdenv.isDarwin) libsodium
++ lib.optional stdenv.isx86_64 libcpuid; ++ lib.optional stdenv.isx86_64 libcpuid;
@ -144,12 +144,46 @@
echo "file installer $out/install" >> $out/nix-support/hydra-build-products echo "file installer $out/install" >> $out/nix-support/hydra-build-products
''; '';
testNixVersions = pkgs: client: daemon: with commonDeps pkgs; pkgs.stdenv.mkDerivation {
NIX_DAEMON_PACKAGE = daemon;
NIX_CLIENT_PACKAGE = client;
# Must keep this name short as OSX has a rather strict limit on the
# socket path length, and this name appears in the path of the
# nix-daemon socket used in the tests
name = "nix-tests";
inherit version;
src = self;
VERSION_SUFFIX = versionSuffix;
nativeBuildInputs = nativeBuildDeps;
buildInputs = buildDeps ++ awsDeps;
propagatedBuildInputs = propagatedDeps;
enableParallelBuilding = true;
dontBuild = true;
doInstallCheck = true;
installPhase = ''
mkdir -p $out
'';
installCheckPhase = "make installcheck";
};
in { in {
# A Nixpkgs overlay that overrides the 'nix' and # A Nixpkgs overlay that overrides the 'nix' and
# 'nix.perl-bindings' packages. # 'nix.perl-bindings' packages.
overlay = final: prev: { overlay = final: prev: {
# An older version of Nix to test against when using the daemon.
# Currently using `nixUnstable` as the stable one doesn't respect
# `NIX_DAEMON_SOCKET_PATH` which is needed for the tests.
nixStable = prev.nix;
nix = with final; with commonDeps pkgs; stdenv.mkDerivation { nix = with final; with commonDeps pkgs; stdenv.mkDerivation {
name = "nix-${version}"; name = "nix-${version}";
inherit version; inherit version;
@ -434,6 +468,15 @@
checks = forAllSystems (system: { checks = forAllSystems (system: {
binaryTarball = self.hydraJobs.binaryTarball.${system}; binaryTarball = self.hydraJobs.binaryTarball.${system};
perlBindings = self.hydraJobs.perlBindings.${system}; perlBindings = self.hydraJobs.perlBindings.${system};
installTests =
let pkgs = nixpkgsFor.${system}; in
pkgs.runCommand "install-tests" {
againstSelf = testNixVersions pkgs pkgs.nix pkgs.pkgs.nix;
againstCurrentUnstable = testNixVersions pkgs pkgs.nix pkgs.nixUnstable;
# Disabled because the latest stable version doesn't handle
# `NIX_DAEMON_SOCKET_PATH` which is required for the tests to work
# againstLatestStable = testNixVersions pkgs pkgs.nix pkgs.nixStable;
} "touch $out";
}); });
packages = forAllSystems (system: { packages = forAllSystems (system: {

View file

@ -1,36 +0,0 @@
define build-jar
$(1)_NAME ?= $(1)
_d := $$(strip $$($(1)_DIR))
$(1)_PATH := $$(_d)/$$($(1)_NAME).jar
$(1)_TMPDIR := $$(_d)/.$$($(1)_NAME).jar.tmp
_jars := $$(foreach jar, $$($(1)_JARS), $$($$(jar)_PATH))
$$($(1)_PATH): $$($(1)_SOURCES) $$(_jars) $$($(1)_EXTRA_DEPS)| $$($(1)_ORDER_AFTER)
@rm -rf $$($(1)_TMPDIR)
@mkdir -p $$($(1)_TMPDIR)
$$(trace-javac) javac $(GLOBAL_JAVACFLAGS) $$($(1)_JAVACFLAGS) -d $$($(1)_TMPDIR) \
$$(foreach fn, $$($(1)_SOURCES), '$$(fn)') \
-cp "$$(subst $$(space),,$$(foreach jar,$$($(1)_JARS),$$($$(jar)_PATH):))$$$$CLASSPATH"
@echo -e '$$(subst $$(newline),\n,$$($(1)_MANIFEST))' > $$($(1)_PATH).manifest
$$(trace-jar) jar cfm $$($(1)_PATH) $$($(1)_PATH).manifest -C $$($(1)_TMPDIR) .
@rm $$($(1)_PATH).manifest
@rm -rf $$($(1)_TMPDIR)
$(1)_INSTALL_DIR ?= $$(jardir)
$(1)_INSTALL_PATH := $$($(1)_INSTALL_DIR)/$$($(1)_NAME).jar
$$(eval $$(call install-file-as, $$($(1)_PATH), $$($(1)_INSTALL_PATH), 0644))
install: $$($(1)_INSTALL_PATH)
jars-list += $$($(1)_PATH)
clean-files += $$($(1)_PATH)
endef

View file

@ -31,7 +31,6 @@ libdir ?= $(prefix)/lib
bindir ?= $(prefix)/bin bindir ?= $(prefix)/bin
libexecdir ?= $(prefix)/libexec libexecdir ?= $(prefix)/libexec
datadir ?= $(prefix)/share datadir ?= $(prefix)/share
jardir ?= $(datadir)/java
localstatedir ?= $(prefix)/var localstatedir ?= $(prefix)/var
sysconfdir ?= $(prefix)/etc sysconfdir ?= $(prefix)/etc
mandir ?= $(prefix)/share/man mandir ?= $(prefix)/share/man
@ -74,7 +73,6 @@ BUILD_DEBUG ?= 1
ifeq ($(BUILD_DEBUG), 1) ifeq ($(BUILD_DEBUG), 1)
GLOBAL_CFLAGS += -g GLOBAL_CFLAGS += -g
GLOBAL_CXXFLAGS += -g GLOBAL_CXXFLAGS += -g
GLOBAL_JAVACFLAGS += -g
endif endif
@ -84,7 +82,6 @@ include mk/clean.mk
include mk/install.mk include mk/install.mk
include mk/libraries.mk include mk/libraries.mk
include mk/programs.mk include mk/programs.mk
include mk/jars.mk
include mk/patterns.mk include mk/patterns.mk
include mk/templates.mk include mk/templates.mk
include mk/tests.mk include mk/tests.mk
@ -102,7 +99,6 @@ $(foreach mf, $(makefiles), $(eval $(call include-sub-makefile, $(mf))))
# Instantiate stuff. # Instantiate stuff.
$(foreach lib, $(libraries), $(eval $(call build-library,$(lib)))) $(foreach lib, $(libraries), $(eval $(call build-library,$(lib))))
$(foreach prog, $(programs), $(eval $(call build-program,$(prog)))) $(foreach prog, $(programs), $(eval $(call build-program,$(prog))))
$(foreach jar, $(jars), $(eval $(call build-jar,$(jar))))
$(foreach script, $(bin-scripts), $(eval $(call install-program-in,$(script),$(bindir)))) $(foreach script, $(bin-scripts), $(eval $(call install-program-in,$(script),$(bindir))))
$(foreach script, $(bin-scripts), $(eval programs-list += $(script))) $(foreach script, $(bin-scripts), $(eval programs-list += $(script)))
$(foreach script, $(noinst-scripts), $(eval programs-list += $(script))) $(foreach script, $(noinst-scripts), $(eval programs-list += $(script)))
@ -113,7 +109,7 @@ $(foreach file, $(man-pages), $(eval $(call install-data-in, $(file), $(mandir)/
.PHONY: default all man help .PHONY: default all man help
all: $(programs-list) $(libs-list) $(jars-list) $(man-pages) all: $(programs-list) $(libs-list) $(man-pages)
man: $(man-pages) man: $(man-pages)
@ -137,12 +133,6 @@ ifdef libs-list
@echo "The following libraries can be built:" @echo "The following libraries can be built:"
@echo "" @echo ""
@for i in $(libs-list); do echo " $$i"; done @for i in $(libs-list); do echo " $$i"; done
endif
ifdef jars-list
@echo ""
@echo "The following JARs can be built:"
@echo ""
@for i in $(jars-list); do echo " $$i"; done
endif endif
@echo "" @echo ""
@echo "The following variables control the build:" @echo "The following variables control the build:"
@ -153,4 +143,5 @@ endif
@echo " CFLAGS: Flags for the C compiler" @echo " CFLAGS: Flags for the C compiler"
@echo " CXX ($(CXX)): C++ compiler to be used" @echo " CXX ($(CXX)): C++ compiler to be used"
@echo " CXXFLAGS: Flags for the C++ compiler" @echo " CXXFLAGS: Flags for the C++ compiler"
@echo " CPPFLAGS: C preprocessor flags, used for both CC and CXX"
@$(print-var-help) @$(print-var-help)

View file

@ -1,11 +1,11 @@
$(buildprefix)%.o: %.cc $(buildprefix)%.o: %.cc
@mkdir -p "$(dir $@)" @mkdir -p "$(dir $@)"
$(trace-cxx) $(CXX) -o $@ -c $< $(GLOBAL_CXXFLAGS_PCH) $(GLOBAL_CXXFLAGS) $(CXXFLAGS) $($@_CXXFLAGS) -MMD -MF $(call filename-to-dep, $@) -MP $(trace-cxx) $(CXX) -o $@ -c $< $(CPPFLAGS) $(GLOBAL_CXXFLAGS_PCH) $(GLOBAL_CXXFLAGS) $(CXXFLAGS) $($@_CXXFLAGS) -MMD -MF $(call filename-to-dep, $@) -MP
$(buildprefix)%.o: %.cpp $(buildprefix)%.o: %.cpp
@mkdir -p "$(dir $@)" @mkdir -p "$(dir $@)"
$(trace-cxx) $(CXX) -o $@ -c $< $(GLOBAL_CXXFLAGS_PCH) $(GLOBAL_CXXFLAGS) $(CXXFLAGS) $($@_CXXFLAGS) -MMD -MF $(call filename-to-dep, $@) -MP $(trace-cxx) $(CXX) -o $@ -c $< $(CPPFLAGS) $(GLOBAL_CXXFLAGS_PCH) $(GLOBAL_CXXFLAGS) $(CXXFLAGS) $($@_CXXFLAGS) -MMD -MF $(call filename-to-dep, $@) -MP
$(buildprefix)%.o: %.c $(buildprefix)%.o: %.c
@mkdir -p "$(dir $@)" @mkdir -p "$(dir $@)"
$(trace-cc) $(CC) -o $@ -c $< $(GLOBAL_CFLAGS) $(CFLAGS) $($@_CFLAGS) -MMD -MF $(call filename-to-dep, $@) -MP $(trace-cc) $(CC) -o $@ -c $< $(CPPFLAGS) $(GLOBAL_CFLAGS) $(CFLAGS) $($@_CFLAGS) -MMD -MF $(call filename-to-dep, $@) -MP

View file

@ -14,7 +14,7 @@ if [ -t 1 ]; then
yellow="" yellow=""
normal="" normal=""
fi fi
(cd $(dirname $1) && env ${TESTS_ENVIRONMENT} init.sh 2>/dev/null > /dev/null) (cd tests && env ${TESTS_ENVIRONMENT} init.sh 2>/dev/null > /dev/null)
log="$(cd $(dirname $1) && env ${TESTS_ENVIRONMENT} $(basename $1) 2>&1)" log="$(cd $(dirname $1) && env ${TESTS_ENVIRONMENT} $(basename $1) 2>&1)"
status=$? status=$?
if [ $status -eq 0 ]; then if [ $status -eq 0 ]; then

View file

@ -8,7 +8,7 @@ define run-install-test
.PHONY: $1.test .PHONY: $1.test
$1.test: $1 $(test-deps) $1.test: $1 $(test-deps)
@env TEST_NAME=$(notdir $(basename $1)) TESTS_ENVIRONMENT="$(tests-environment)" mk/run_test.sh $1 < /dev/null @env TEST_NAME=$(basename $1) TESTS_ENVIRONMENT="$(tests-environment)" mk/run_test.sh $1 < /dev/null
endef endef

View file

@ -8,8 +8,6 @@ ifeq ($(V), 0)
trace-ld = @echo " LD " $@; trace-ld = @echo " LD " $@;
trace-ar = @echo " AR " $@; trace-ar = @echo " AR " $@;
trace-install = @echo " INST " $@; trace-install = @echo " INST " $@;
trace-javac = @echo " JAVAC " $@;
trace-jar = @echo " JAR " $@;
trace-mkdir = @echo " MKDIR " $@; trace-mkdir = @echo " MKDIR " $@;
trace-test = @echo " TEST " $@; trace-test = @echo " TEST " $@;

View file

@ -0,0 +1,46 @@
#!/usr/bin/env bash
((NEW_NIX_FIRST_BUILD_UID=301))
id_available(){
dscl . list /Users UniqueID | grep -E '\b'$1'\b' >/dev/null
}
change_nixbld_names_and_ids(){
local name uid next_id
((next_id=NEW_NIX_FIRST_BUILD_UID))
echo "Attempting to migrate nixbld users."
echo "Each user should change from nixbld# to _nixbld#"
echo "and their IDs relocated to $next_id+"
while read -r name uid; do
echo " Checking $name (uid: $uid)"
# iterate for a clean ID
while id_available "$next_id"; do
((next_id++))
if ((next_id >= 400)); then
echo "We've hit UID 400 without placing all of your users :("
echo "You should use the commands in this script as a starting"
echo "point to review your UID-space and manually move the"
echo "remaining users (or delete them, if you don't need them)."
exit 1
fi
done
if [[ $name == _* ]]; then
echo " It looks like $name has already been renamed--skipping."
else
# first 3 are cleanup, it's OK if they aren't here
sudo dscl . delete /Users/$name dsAttrTypeNative:_writers_passwd &>/dev/null || true
sudo dscl . change /Users/$name NFSHomeDirectory "/private/var/empty 1" "/var/empty" &>/dev/null || true
# remove existing user from group
sudo dseditgroup -o edit -t user -d $name nixbld || true
sudo dscl . change /Users/$name UniqueID $uid $next_id
sudo dscl . change /Users/$name RecordName $name _$name
# add renamed user to group
sudo dseditgroup -o edit -t user -a _$name nixbld
echo " $name migrated to _$name (uid: $next_id)"
fi
done < <(dscl . list /Users UniqueID | grep nixbld | sort -n -k2)
}
change_nixbld_names_and_ids

View file

@ -4,6 +4,8 @@ set -eu
set -o pipefail set -o pipefail
readonly PLIST_DEST=/Library/LaunchDaemons/org.nixos.nix-daemon.plist readonly PLIST_DEST=/Library/LaunchDaemons/org.nixos.nix-daemon.plist
NIX_FIRST_BUILD_UID="301"
NIX_BUILD_USER_NAME_TEMPLATE="_nixbld%d"
dsclattr() { dsclattr() {
/usr/bin/dscl . -read "$1" \ /usr/bin/dscl . -read "$1" \

View file

@ -25,13 +25,15 @@ readonly RED='\033[31m'
readonly NIX_USER_COUNT=${NIX_USER_COUNT:-32} readonly NIX_USER_COUNT=${NIX_USER_COUNT:-32}
readonly NIX_BUILD_GROUP_ID="30000" readonly NIX_BUILD_GROUP_ID="30000"
readonly NIX_BUILD_GROUP_NAME="nixbld" readonly NIX_BUILD_GROUP_NAME="nixbld"
readonly NIX_FIRST_BUILD_UID="30001" # darwin installer needs to override these
NIX_FIRST_BUILD_UID="30001"
NIX_BUILD_USER_NAME_TEMPLATE="nixbld%d"
# Please don't change this. We don't support it, because the # Please don't change this. We don't support it, because the
# default shell profile that comes with Nix doesn't support it. # default shell profile that comes with Nix doesn't support it.
readonly NIX_ROOT="/nix" readonly NIX_ROOT="/nix"
readonly NIX_EXTRA_CONF=${NIX_EXTRA_CONF:-} readonly NIX_EXTRA_CONF=${NIX_EXTRA_CONF:-}
readonly PROFILE_TARGETS=("/etc/bashrc" "/etc/profile.d/nix.sh" "/etc/zshenv") readonly PROFILE_TARGETS=("/etc/bashrc" "/etc/profile.d/nix.sh" "/etc/zshenv" "/etc/bash.bashrc" "/etc/zsh/zshenv")
readonly PROFILE_BACKUP_SUFFIX=".backup-before-nix" readonly PROFILE_BACKUP_SUFFIX=".backup-before-nix"
readonly PROFILE_NIX_FILE="$NIX_ROOT/var/nix/profiles/default/etc/profile.d/nix-daemon.sh" readonly PROFILE_NIX_FILE="$NIX_ROOT/var/nix/profiles/default/etc/profile.d/nix-daemon.sh"
@ -104,7 +106,7 @@ EOF
} }
nix_user_for_core() { nix_user_for_core() {
printf "nixbld%d" "$1" printf "$NIX_BUILD_USER_NAME_TEMPLATE" "$1"
} }
nix_uid_for_core() { nix_uid_for_core() {

View file

@ -48,6 +48,8 @@ struct EvalCommand : virtual StoreCommand, MixEvalArgs
ref<EvalState> getEvalState(); ref<EvalState> getEvalState();
std::shared_ptr<EvalState> evalState; std::shared_ptr<EvalState> evalState;
~EvalCommand();
}; };
struct MixFlakeOptions : virtual Args, EvalCommand struct MixFlakeOptions : virtual Args, EvalCommand

View file

@ -111,10 +111,11 @@ MixFlakeOptions::MixFlakeOptions()
addFlag({ addFlag({
.longName = "override-input", .longName = "override-input",
.description = "Override a specific flake input (e.g. `dwarffs/nixpkgs`).", .description = "Override a specific flake input (e.g. `dwarffs/nixpkgs`). This implies `--no-write-lock-file`.",
.category = category, .category = category,
.labels = {"input-path", "flake-url"}, .labels = {"input-path", "flake-url"},
.handler = {[&](std::string inputPath, std::string flakeRef) { .handler = {[&](std::string inputPath, std::string flakeRef) {
lockFlags.writeLockFile = false;
lockFlags.inputOverrides.insert_or_assign( lockFlags.inputOverrides.insert_or_assign(
flake::parseInputPath(inputPath), flake::parseInputPath(inputPath),
parseFlakeRef(flakeRef, absPath("."))); parseFlakeRef(flakeRef, absPath(".")));
@ -280,6 +281,12 @@ ref<EvalState> EvalCommand::getEvalState()
return ref<EvalState>(evalState); return ref<EvalState>(evalState);
} }
EvalCommand::~EvalCommand()
{
if (evalState)
evalState->printStats();
}
void completeFlakeRef(ref<Store> store, std::string_view prefix) void completeFlakeRef(ref<Store> store, std::string_view prefix)
{ {
if (prefix == "") if (prefix == "")

View file

@ -592,10 +592,8 @@ Value & EvalState::getBuiltin(const string & name)
std::optional<EvalState::Doc> EvalState::getDoc(Value & v) std::optional<EvalState::Doc> EvalState::getDoc(Value & v)
{ {
if (v.isPrimOp() || v.isPrimOpApp()) { if (v.isPrimOp()) {
auto v2 = &v; auto v2 = &v;
while (v2->isPrimOpApp())
v2 = v2->primOpApp.left;
if (v2->primOp->doc) if (v2->primOp->doc)
return Doc { return Doc {
.pos = noPos, .pos = noPos,

View file

@ -113,7 +113,7 @@ struct LockFlags
/* Whether to commit changes to flake.lock. */ /* Whether to commit changes to flake.lock. */
bool commitLockFile = false; bool commitLockFile = false;
/* Flake inputs to be overriden. */ /* Flake inputs to be overridden. */
std::map<InputPath, FlakeRef> inputOverrides; std::map<InputPath, FlakeRef> inputOverrides;
/* Flake inputs to be updated. This means that any existing lock /* Flake inputs to be updated. This means that any existing lock

View file

@ -145,7 +145,13 @@ DownloadFileResult downloadFile(
bool immutable, bool immutable,
const Headers & headers = {}); const Headers & headers = {});
std::pair<Tree, time_t> downloadTarball( struct DownloadTarballMeta
{
time_t lastModified;
std::string effectiveUrl;
};
std::pair<Tree, DownloadTarballMeta> downloadTarball(
ref<Store> store, ref<Store> store,
const std::string & url, const std::string & url,
const std::string & name, const std::string & name,

View file

@ -153,12 +153,14 @@ struct GitInputScheme : InputScheme
std::pair<bool, std::string> getActualUrl(const Input & input) const std::pair<bool, std::string> getActualUrl(const Input & input) const
{ {
// Don't clone file:// URIs (but otherwise treat them the // file:// URIs are normally not cloned (but otherwise treated the
// same as remote URIs, i.e. don't use the working tree or // same as remote URIs, i.e. we don't use the working tree or
// HEAD). // HEAD). Exception: If _NIX_FORCE_HTTP is set, or the repo is a bare git
// repo, treat as a remote URI to force a clone.
static bool forceHttp = getEnv("_NIX_FORCE_HTTP") == "1"; // for testing static bool forceHttp = getEnv("_NIX_FORCE_HTTP") == "1"; // for testing
auto url = parseURL(getStrAttr(input.attrs, "url")); auto url = parseURL(getStrAttr(input.attrs, "url"));
bool isLocal = url.scheme == "file" && !forceHttp; bool isBareRepository = url.scheme == "file" && !pathExists(url.path + "/.git");
bool isLocal = url.scheme == "file" && !forceHttp && !isBareRepository;
return {isLocal, isLocal ? url.path : url.base}; return {isLocal, isLocal ? url.path : url.base};
} }
@ -363,7 +365,9 @@ struct GitInputScheme : InputScheme
? "refs/*" ? "refs/*"
: ref->compare(0, 5, "refs/") == 0 : ref->compare(0, 5, "refs/") == 0
? *ref ? *ref
: "refs/heads/" + *ref; : ref == "HEAD"
? *ref
: "refs/heads/" + *ref;
runProgram("git", true, { "-C", repoDir, "fetch", "--quiet", "--force", "--", actualUrl, fmt("%s:%s", fetchRef, fetchRef) }); runProgram("git", true, { "-C", repoDir, "fetch", "--quiet", "--force", "--", actualUrl, fmt("%s:%s", fetchRef, fetchRef) });
} catch (Error & e) { } catch (Error & e) {
if (!pathExists(localRefFile)) throw; if (!pathExists(localRefFile)) throw;

View file

@ -207,16 +207,16 @@ struct GitArchiveInputScheme : InputScheme
auto url = getDownloadUrl(input); auto url = getDownloadUrl(input);
auto [tree, lastModified] = downloadTarball(store, url.url, "source", true, url.headers); auto [tree, meta] = downloadTarball(store, url.url, "source", true, url.headers);
input.attrs.insert_or_assign("lastModified", uint64_t(lastModified)); input.attrs.insert_or_assign("lastModified", uint64_t(meta.lastModified));
getCache()->add( getCache()->add(
store, store,
immutableAttrs, immutableAttrs,
{ {
{"rev", rev->gitRev()}, {"rev", rev->gitRev()},
{"lastModified", uint64_t(lastModified)} {"lastModified", uint64_t(meta.lastModified)}
}, },
tree.storePath, tree.storePath,
true); true);

View file

@ -114,7 +114,7 @@ static std::shared_ptr<Registry> getSystemRegistry()
Path getUserRegistryPath() Path getUserRegistryPath()
{ {
return getHome() + "/.config/nix/registry.json"; return getConfigDir() + "/nix/registry.json";
} }
std::shared_ptr<Registry> getUserRegistry() std::shared_ptr<Registry> getUserRegistry()

View file

@ -115,7 +115,7 @@ DownloadFileResult downloadFile(
}; };
} }
std::pair<Tree, time_t> downloadTarball( std::pair<Tree, DownloadTarballMeta> downloadTarball(
ref<Store> store, ref<Store> store,
const std::string & url, const std::string & url,
const std::string & name, const std::string & name,
@ -133,7 +133,10 @@ std::pair<Tree, time_t> downloadTarball(
if (cached && !cached->expired) if (cached && !cached->expired)
return { return {
Tree(store->toRealPath(cached->storePath), std::move(cached->storePath)), Tree(store->toRealPath(cached->storePath), std::move(cached->storePath)),
getIntAttr(cached->infoAttrs, "lastModified") {
.lastModified = time_t(getIntAttr(cached->infoAttrs, "lastModified")),
.effectiveUrl = maybeGetStrAttr(cached->infoAttrs, "effectiveUrl").value_or(url),
},
}; };
auto res = downloadFile(store, url, name, immutable, headers); auto res = downloadFile(store, url, name, immutable, headers);
@ -158,6 +161,7 @@ std::pair<Tree, time_t> downloadTarball(
Attrs infoAttrs({ Attrs infoAttrs({
{"lastModified", uint64_t(lastModified)}, {"lastModified", uint64_t(lastModified)},
{"effectiveUrl", res.effectiveUrl},
{"etag", res.etag}, {"etag", res.etag},
}); });
@ -170,7 +174,10 @@ std::pair<Tree, time_t> downloadTarball(
return { return {
Tree(store->toRealPath(*unpackedStorePath), std::move(*unpackedStorePath)), Tree(store->toRealPath(*unpackedStorePath), std::move(*unpackedStorePath)),
lastModified, {
.lastModified = lastModified,
.effectiveUrl = res.effectiveUrl,
},
}; };
} }
@ -229,9 +236,11 @@ struct TarballInputScheme : InputScheme
return true; return true;
} }
std::pair<Tree, Input> fetch(ref<Store> store, const Input & input) override std::pair<Tree, Input> fetch(ref<Store> store, const Input & _input) override
{ {
auto tree = downloadTarball(store, getStrAttr(input.attrs, "url"), "source", false).first; Input input(_input);
auto [tree, meta] = downloadTarball(store, getStrAttr(input.attrs, "url"), "source", false);
input.attrs.insert_or_assign("url", meta.effectiveUrl);
return {std::move(tree), input}; return {std::move(tree), input};
} }
}; };

View file

@ -122,6 +122,7 @@ public:
void log(Verbosity lvl, const FormatOrString & fs) override void log(Verbosity lvl, const FormatOrString & fs) override
{ {
if (lvl > verbosity) return;
auto state(state_.lock()); auto state(state_.lock());
log(*state, lvl, fs.s); log(*state, lvl, fs.s);
} }

View file

@ -170,7 +170,7 @@ void DerivationGoal::getDerivation()
return; return;
} }
addWaitee(upcast_goal(worker.makeSubstitutionGoal(drvPath))); addWaitee(upcast_goal(worker.makePathSubstitutionGoal(drvPath)));
state = &DerivationGoal::loadDerivation; state = &DerivationGoal::loadDerivation;
} }
@ -246,17 +246,22 @@ void DerivationGoal::haveDerivation()
through substitutes. If that doesn't work, we'll build through substitutes. If that doesn't work, we'll build
them. */ them. */
if (settings.useSubstitutes && parsedDrv->substitutesAllowed()) if (settings.useSubstitutes && parsedDrv->substitutesAllowed())
for (auto & [_, status] : initialOutputs) { for (auto & [outputName, status] : initialOutputs) {
if (!status.wanted) continue; if (!status.wanted) continue;
if (!status.known) { if (!status.known)
warn("do not know how to query for unknown floating content-addressed derivation output yet"); addWaitee(
/* Nothing to wait for; tail call */ upcast_goal(
return DerivationGoal::gaveUpOnSubstitution(); worker.makeDrvOutputSubstitutionGoal(
} DrvOutput{status.outputHash, outputName},
addWaitee(upcast_goal(worker.makeSubstitutionGoal( buildMode == bmRepair ? Repair : NoRepair
status.known->path, )
buildMode == bmRepair ? Repair : NoRepair, )
getDerivationCA(*drv)))); );
else
addWaitee(upcast_goal(worker.makePathSubstitutionGoal(
status.known->path,
buildMode == bmRepair ? Repair : NoRepair,
getDerivationCA(*drv))));
} }
if (waitees.empty()) /* to prevent hang (no wake-up event) */ if (waitees.empty()) /* to prevent hang (no wake-up event) */
@ -337,7 +342,7 @@ void DerivationGoal::gaveUpOnSubstitution()
if (!settings.useSubstitutes) if (!settings.useSubstitutes)
throw Error("dependency '%s' of '%s' does not exist, and substitution is disabled", throw Error("dependency '%s' of '%s' does not exist, and substitution is disabled",
worker.store.printStorePath(i), worker.store.printStorePath(drvPath)); worker.store.printStorePath(i), worker.store.printStorePath(drvPath));
addWaitee(upcast_goal(worker.makeSubstitutionGoal(i))); addWaitee(upcast_goal(worker.makePathSubstitutionGoal(i)));
} }
if (waitees.empty()) /* to prevent hang (no wake-up event) */ if (waitees.empty()) /* to prevent hang (no wake-up event) */
@ -388,7 +393,7 @@ void DerivationGoal::repairClosure()
worker.store.printStorePath(i), worker.store.printStorePath(drvPath)); worker.store.printStorePath(i), worker.store.printStorePath(drvPath));
auto drvPath2 = outputsToDrv.find(i); auto drvPath2 = outputsToDrv.find(i);
if (drvPath2 == outputsToDrv.end()) if (drvPath2 == outputsToDrv.end())
addWaitee(upcast_goal(worker.makeSubstitutionGoal(i, Repair))); addWaitee(upcast_goal(worker.makePathSubstitutionGoal(i, Repair)));
else else
addWaitee(worker.makeDerivationGoal(drvPath2->second, StringSet(), bmRepair)); addWaitee(worker.makeDerivationGoal(drvPath2->second, StringSet(), bmRepair));
} }
@ -920,6 +925,8 @@ void DerivationGoal::resolvedFinished() {
if (realisation) { if (realisation) {
auto newRealisation = *realisation; auto newRealisation = *realisation;
newRealisation.id = DrvOutput{initialOutputs.at(wantedOutput).outputHash, wantedOutput}; newRealisation.id = DrvOutput{initialOutputs.at(wantedOutput).outputHash, wantedOutput};
newRealisation.signatures.clear();
signRealisation(newRealisation);
worker.store.registerDrvOutput(newRealisation); worker.store.registerDrvOutput(newRealisation);
} else { } else {
// If we don't have a realisation, then it must mean that something // If we don't have a realisation, then it must mean that something
@ -1243,9 +1250,12 @@ OutputPathMap DerivationGoal::queryDerivationOutputMap()
void DerivationGoal::checkPathValidity() void DerivationGoal::checkPathValidity()
{ {
bool checkHash = buildMode == bmRepair; bool checkHash = buildMode == bmRepair;
auto wantedOutputsLeft = wantedOutputs;
for (auto & i : queryPartialDerivationOutputMap()) { for (auto & i : queryPartialDerivationOutputMap()) {
InitialOutput & info = initialOutputs.at(i.first); InitialOutput & info = initialOutputs.at(i.first);
info.wanted = wantOutput(i.first, wantedOutputs); info.wanted = wantOutput(i.first, wantedOutputs);
if (info.wanted)
wantedOutputsLeft.erase(i.first);
if (i.second) { if (i.second) {
auto outputPath = *i.second; auto outputPath = *i.second;
info.known = { info.known = {
@ -1267,6 +1277,13 @@ void DerivationGoal::checkPathValidity()
} }
} }
} }
// If we requested all the outputs via the empty set, we are always fine.
// If we requested specific elements, the loop above removes all the valid
// ones, so any that are left must be invalid.
if (!wantedOutputsLeft.empty())
throw Error("derivation '%s' does not have wanted outputs %s",
worker.store.printStorePath(drvPath),
concatStringsSep(", ", quoteStrings(wantedOutputsLeft)));
} }

View file

@ -180,6 +180,9 @@ struct DerivationGoal : public Goal
/* Open a log file and a pipe to it. */ /* Open a log file and a pipe to it. */
Path openLogFile(); Path openLogFile();
/* Sign the newly built realisation if the store allows it */
virtual void signRealisation(Realisation&) {}
/* Close the log file. */ /* Close the log file. */
void closeLogFile(); void closeLogFile();

View file

@ -0,0 +1,95 @@
#include "drv-output-substitution-goal.hh"
#include "worker.hh"
#include "substitution-goal.hh"
namespace nix {
DrvOutputSubstitutionGoal::DrvOutputSubstitutionGoal(const DrvOutput& id, Worker & worker, RepairFlag repair, std::optional<ContentAddress> ca)
: Goal(worker)
, id(id)
{
state = &DrvOutputSubstitutionGoal::init;
name = fmt("substitution of '%s'", id.to_string());
trace("created");
}
void DrvOutputSubstitutionGoal::init()
{
trace("init");
subs = settings.useSubstitutes ? getDefaultSubstituters() : std::list<ref<Store>>();
tryNext();
}
void DrvOutputSubstitutionGoal::tryNext()
{
trace("Trying next substituter");
if (subs.size() == 0) {
/* None left. Terminate this goal and let someone else deal
with it. */
debug("drv output '%s' is required, but there is no substituter that can provide it", id.to_string());
/* Hack: don't indicate failure if there were no substituters.
In that case the calling derivation should just do a
build. */
amDone(substituterFailed ? ecFailed : ecNoSubstituters);
if (substituterFailed) {
worker.failedSubstitutions++;
worker.updateProgress();
}
return;
}
auto sub = subs.front();
subs.pop_front();
// FIXME: Make async
outputInfo = sub->queryRealisation(id);
if (!outputInfo) {
tryNext();
return;
}
addWaitee(worker.makePathSubstitutionGoal(outputInfo->outPath));
if (waitees.empty()) outPathValid();
else state = &DrvOutputSubstitutionGoal::outPathValid;
}
void DrvOutputSubstitutionGoal::outPathValid()
{
assert(outputInfo);
trace("Output path substituted");
if (nrFailed > 0) {
debug("The output path of the derivation output '%s' could not be substituted", id.to_string());
amDone(nrNoSubstituters > 0 || nrIncompleteClosure > 0 ? ecIncompleteClosure : ecFailed);
return;
}
worker.store.registerDrvOutput(*outputInfo);
finished();
}
void DrvOutputSubstitutionGoal::finished()
{
trace("finished");
amDone(ecSuccess);
}
string DrvOutputSubstitutionGoal::key()
{
/* "a$" ensures substitution goals happen before derivation
goals. */
return "a$" + std::string(id.to_string());
}
void DrvOutputSubstitutionGoal::work()
{
(this->*state)();
}
}

View file

@ -0,0 +1,50 @@
#pragma once
#include "store-api.hh"
#include "goal.hh"
#include "realisation.hh"
namespace nix {
class Worker;
// Substitution of a derivation output.
// This is done in three steps:
// 1. Fetch the output info from a substituter
// 2. Substitute the corresponding output path
// 3. Register the output info
class DrvOutputSubstitutionGoal : public Goal {
private:
// The drv output we're trying to substitue
DrvOutput id;
// The realisation corresponding to the given output id.
// Will be filled once we can get it.
std::optional<Realisation> outputInfo;
/* The remaining substituters. */
std::list<ref<Store>> subs;
/* Whether a substituter failed. */
bool substituterFailed = false;
public:
DrvOutputSubstitutionGoal(const DrvOutput& id, Worker & worker, RepairFlag repair = NoRepair, std::optional<ContentAddress> ca = std::nullopt);
typedef void (DrvOutputSubstitutionGoal::*GoalState)();
GoalState state;
void init();
void tryNext();
void outPathValid();
void finished();
void timedOut(Error && ex) override { abort(); };
string key() override;
void work() override;
};
}

View file

@ -15,7 +15,7 @@ void Store::buildPaths(const std::vector<StorePathWithOutputs> & drvPaths, Build
if (path.path.isDerivation()) if (path.path.isDerivation())
goals.insert(worker.makeDerivationGoal(path.path, path.outputs, buildMode)); goals.insert(worker.makeDerivationGoal(path.path, path.outputs, buildMode));
else else
goals.insert(worker.makeSubstitutionGoal(path.path, buildMode == bmRepair ? Repair : NoRepair)); goals.insert(worker.makePathSubstitutionGoal(path.path, buildMode == bmRepair ? Repair : NoRepair));
} }
worker.run(goals); worker.run(goals);
@ -31,7 +31,7 @@ void Store::buildPaths(const std::vector<StorePathWithOutputs> & drvPaths, Build
} }
if (i->exitCode != Goal::ecSuccess) { if (i->exitCode != Goal::ecSuccess) {
if (auto i2 = dynamic_cast<DerivationGoal *>(i.get())) failed.insert(i2->drvPath); if (auto i2 = dynamic_cast<DerivationGoal *>(i.get())) failed.insert(i2->drvPath);
else if (auto i2 = dynamic_cast<SubstitutionGoal *>(i.get())) failed.insert(i2->storePath); else if (auto i2 = dynamic_cast<PathSubstitutionGoal *>(i.get())) failed.insert(i2->storePath);
} }
} }
@ -90,7 +90,7 @@ void Store::ensurePath(const StorePath & path)
if (isValidPath(path)) return; if (isValidPath(path)) return;
Worker worker(*this); Worker worker(*this);
GoalPtr goal = worker.makeSubstitutionGoal(path); GoalPtr goal = worker.makePathSubstitutionGoal(path);
Goals goals = {goal}; Goals goals = {goal};
worker.run(goals); worker.run(goals);
@ -108,7 +108,7 @@ void Store::ensurePath(const StorePath & path)
void LocalStore::repairPath(const StorePath & path) void LocalStore::repairPath(const StorePath & path)
{ {
Worker worker(*this); Worker worker(*this);
GoalPtr goal = worker.makeSubstitutionGoal(path, Repair); GoalPtr goal = worker.makePathSubstitutionGoal(path, Repair);
Goals goals = {goal}; Goals goals = {goal};
worker.run(goals); worker.run(goals);

View file

@ -287,7 +287,7 @@ bool LocalDerivationGoal::cleanupDecideWhetherDiskFull()
So instead, check if the disk is (nearly) full now. If So instead, check if the disk is (nearly) full now. If
so, we don't mark this build as a permanent failure. */ so, we don't mark this build as a permanent failure. */
#if HAVE_STATVFS #if HAVE_STATVFS
{ {
auto & localStore = getLocalStore(); auto & localStore = getLocalStore();
uint64_t required = 8ULL * 1024 * 1024; // FIXME: make configurable uint64_t required = 8ULL * 1024 * 1024; // FIXME: make configurable
struct statvfs st; struct statvfs st;
@ -297,7 +297,7 @@ bool LocalDerivationGoal::cleanupDecideWhetherDiskFull()
if (statvfs(tmpDir.c_str(), &st) == 0 && if (statvfs(tmpDir.c_str(), &st) == 0 &&
(uint64_t) st.f_bavail * st.f_bsize < required) (uint64_t) st.f_bavail * st.f_bsize < required)
diskFull = true; diskFull = true;
} }
#endif #endif
deleteTmpDir(false); deleteTmpDir(false);
@ -1703,18 +1703,18 @@ void LocalDerivationGoal::runChild()
network, so give them access to /etc/resolv.conf and so network, so give them access to /etc/resolv.conf and so
on. */ on. */
if (derivationIsImpure(derivationType)) { if (derivationIsImpure(derivationType)) {
ss.push_back("/etc/resolv.conf");
// Only use nss functions to resolve hosts and // Only use nss functions to resolve hosts and
// services. Dont use it for anything else that may // services. Dont use it for anything else that may
// be configured for this system. This limits the // be configured for this system. This limits the
// potential impurities introduced in fixed-outputs. // potential impurities introduced in fixed-outputs.
writeFile(chrootRootDir + "/etc/nsswitch.conf", "hosts: files dns\nservices: files\n"); writeFile(chrootRootDir + "/etc/nsswitch.conf", "hosts: files dns\nservices: files\n");
ss.push_back("/etc/services"); /* N.B. it is realistic that these paths might not exist. It
ss.push_back("/etc/hosts"); happens when testing Nix building fixed-output derivations
if (pathExists("/var/run/nscd/socket")) within a pure derivation. */
ss.push_back("/var/run/nscd/socket"); for (auto & path : { "/etc/resolv.conf", "/etc/services", "/etc/hosts", "/var/run/nscd/socket" })
if (pathExists(path))
ss.push_back(path);
} }
for (auto & i : ss) dirsInChroot.emplace(i, i); for (auto & i : ss) dirsInChroot.emplace(i, i);
@ -2620,13 +2620,22 @@ void LocalDerivationGoal::registerOutputs()
but it's fine to do in all cases. */ but it's fine to do in all cases. */
if (settings.isExperimentalFeatureEnabled("ca-derivations")) { if (settings.isExperimentalFeatureEnabled("ca-derivations")) {
for (auto& [outputName, newInfo] : infos) for (auto& [outputName, newInfo] : infos) {
worker.store.registerDrvOutput(Realisation{ auto thisRealisation = Realisation{
.id = DrvOutput{initialOutputs.at(outputName).outputHash, outputName}, .id = DrvOutput{initialOutputs.at(outputName).outputHash,
.outPath = newInfo.path}); outputName},
.outPath = newInfo.path};
signRealisation(thisRealisation);
worker.store.registerDrvOutput(thisRealisation);
}
} }
} }
void LocalDerivationGoal::signRealisation(Realisation & realisation)
{
getLocalStore().signRealisation(realisation);
}
void LocalDerivationGoal::checkOutputs(const std::map<Path, ValidPathInfo> & outputs) void LocalDerivationGoal::checkOutputs(const std::map<Path, ValidPathInfo> & outputs)
{ {

View file

@ -161,6 +161,8 @@ struct LocalDerivationGoal : public DerivationGoal
as valid. */ as valid. */
void registerOutputs() override; void registerOutputs() override;
void signRealisation(Realisation &) override;
/* Check that an output meets the requirements specified by the /* Check that an output meets the requirements specified by the
'outputChecks' attribute (or the legacy 'outputChecks' attribute (or the legacy
'{allowed,disallowed}{References,Requisites}' attributes). */ '{allowed,disallowed}{References,Requisites}' attributes). */

View file

@ -5,20 +5,20 @@
namespace nix { namespace nix {
SubstitutionGoal::SubstitutionGoal(const StorePath & storePath, Worker & worker, RepairFlag repair, std::optional<ContentAddress> ca) PathSubstitutionGoal::PathSubstitutionGoal(const StorePath & storePath, Worker & worker, RepairFlag repair, std::optional<ContentAddress> ca)
: Goal(worker) : Goal(worker)
, storePath(storePath) , storePath(storePath)
, repair(repair) , repair(repair)
, ca(ca) , ca(ca)
{ {
state = &SubstitutionGoal::init; state = &PathSubstitutionGoal::init;
name = fmt("substitution of '%s'", worker.store.printStorePath(this->storePath)); name = fmt("substitution of '%s'", worker.store.printStorePath(this->storePath));
trace("created"); trace("created");
maintainExpectedSubstitutions = std::make_unique<MaintainCount<uint64_t>>(worker.expectedSubstitutions); maintainExpectedSubstitutions = std::make_unique<MaintainCount<uint64_t>>(worker.expectedSubstitutions);
} }
SubstitutionGoal::~SubstitutionGoal() PathSubstitutionGoal::~PathSubstitutionGoal()
{ {
try { try {
if (thr.joinable()) { if (thr.joinable()) {
@ -32,13 +32,13 @@ SubstitutionGoal::~SubstitutionGoal()
} }
void SubstitutionGoal::work() void PathSubstitutionGoal::work()
{ {
(this->*state)(); (this->*state)();
} }
void SubstitutionGoal::init() void PathSubstitutionGoal::init()
{ {
trace("init"); trace("init");
@ -59,7 +59,7 @@ void SubstitutionGoal::init()
} }
void SubstitutionGoal::tryNext() void PathSubstitutionGoal::tryNext()
{ {
trace("trying next substituter"); trace("trying next substituter");
@ -145,7 +145,7 @@ void SubstitutionGoal::tryNext()
/* Bail out early if this substituter lacks a valid /* Bail out early if this substituter lacks a valid
signature. LocalStore::addToStore() also checks for this, but signature. LocalStore::addToStore() also checks for this, but
only after we've downloaded the path. */ only after we've downloaded the path. */
if (!sub->isTrusted && worker.store.pathInfoIsTrusted(*info)) if (!sub->isTrusted && worker.store.pathInfoIsUntrusted(*info))
{ {
warn("substituter '%s' does not have a valid signature for path '%s'", warn("substituter '%s' does not have a valid signature for path '%s'",
sub->getUri(), worker.store.printStorePath(storePath)); sub->getUri(), worker.store.printStorePath(storePath));
@ -156,16 +156,16 @@ void SubstitutionGoal::tryNext()
/* To maintain the closure invariant, we first have to realise the /* To maintain the closure invariant, we first have to realise the
paths referenced by this one. */ paths referenced by this one. */
for (auto & i : info->references) for (auto & i : info->references)
addWaitee(worker.makeSubstitutionGoal(i)); addWaitee(worker.makePathSubstitutionGoal(i));
if (waitees.empty()) /* to prevent hang (no wake-up event) */ if (waitees.empty()) /* to prevent hang (no wake-up event) */
referencesValid(); referencesValid();
else else
state = &SubstitutionGoal::referencesValid; state = &PathSubstitutionGoal::referencesValid;
} }
void SubstitutionGoal::referencesValid() void PathSubstitutionGoal::referencesValid()
{ {
trace("all references realised"); trace("all references realised");
@ -178,12 +178,12 @@ void SubstitutionGoal::referencesValid()
for (auto & i : info->references) for (auto & i : info->references)
assert(worker.store.isValidPath(i)); assert(worker.store.isValidPath(i));
state = &SubstitutionGoal::tryToRun; state = &PathSubstitutionGoal::tryToRun;
worker.wakeUp(shared_from_this()); worker.wakeUp(shared_from_this());
} }
void SubstitutionGoal::tryToRun() void PathSubstitutionGoal::tryToRun()
{ {
trace("trying to run"); trace("trying to run");
@ -222,11 +222,11 @@ void SubstitutionGoal::tryToRun()
worker.childStarted(shared_from_this(), {outPipe.readSide.get()}, true, false); worker.childStarted(shared_from_this(), {outPipe.readSide.get()}, true, false);
state = &SubstitutionGoal::finished; state = &PathSubstitutionGoal::finished;
} }
void SubstitutionGoal::finished() void PathSubstitutionGoal::finished()
{ {
trace("substitute finished"); trace("substitute finished");
@ -250,7 +250,7 @@ void SubstitutionGoal::finished()
} }
/* Try the next substitute. */ /* Try the next substitute. */
state = &SubstitutionGoal::tryNext; state = &PathSubstitutionGoal::tryNext;
worker.wakeUp(shared_from_this()); worker.wakeUp(shared_from_this());
return; return;
} }
@ -279,12 +279,12 @@ void SubstitutionGoal::finished()
} }
void SubstitutionGoal::handleChildOutput(int fd, const string & data) void PathSubstitutionGoal::handleChildOutput(int fd, const string & data)
{ {
} }
void SubstitutionGoal::handleEOF(int fd) void PathSubstitutionGoal::handleEOF(int fd)
{ {
if (fd == outPipe.readSide.get()) worker.wakeUp(shared_from_this()); if (fd == outPipe.readSide.get()) worker.wakeUp(shared_from_this());
} }

View file

@ -8,7 +8,7 @@ namespace nix {
class Worker; class Worker;
struct SubstitutionGoal : public Goal struct PathSubstitutionGoal : public Goal
{ {
/* The store path that should be realised through a substitute. */ /* The store path that should be realised through a substitute. */
StorePath storePath; StorePath storePath;
@ -47,14 +47,15 @@ struct SubstitutionGoal : public Goal
std::unique_ptr<MaintainCount<uint64_t>> maintainExpectedSubstitutions, std::unique_ptr<MaintainCount<uint64_t>> maintainExpectedSubstitutions,
maintainRunningSubstitutions, maintainExpectedNar, maintainExpectedDownload; maintainRunningSubstitutions, maintainExpectedNar, maintainExpectedDownload;
typedef void (SubstitutionGoal::*GoalState)(); typedef void (PathSubstitutionGoal::*GoalState)();
GoalState state; GoalState state;
/* Content address for recomputing store path */ /* Content address for recomputing store path */
std::optional<ContentAddress> ca; std::optional<ContentAddress> ca;
SubstitutionGoal(const StorePath & storePath, Worker & worker, RepairFlag repair = NoRepair, std::optional<ContentAddress> ca = std::nullopt); public:
~SubstitutionGoal(); PathSubstitutionGoal(const StorePath & storePath, Worker & worker, RepairFlag repair = NoRepair, std::optional<ContentAddress> ca = std::nullopt);
~PathSubstitutionGoal();
void timedOut(Error && ex) override { abort(); }; void timedOut(Error && ex) override { abort(); };

View file

@ -1,6 +1,7 @@
#include "machines.hh" #include "machines.hh"
#include "worker.hh" #include "worker.hh"
#include "substitution-goal.hh" #include "substitution-goal.hh"
#include "drv-output-substitution-goal.hh"
#include "local-derivation-goal.hh" #include "local-derivation-goal.hh"
#include "hook-instance.hh" #include "hook-instance.hh"
@ -78,20 +79,32 @@ std::shared_ptr<DerivationGoal> Worker::makeBasicDerivationGoal(const StorePath
} }
std::shared_ptr<SubstitutionGoal> Worker::makeSubstitutionGoal(const StorePath & path, RepairFlag repair, std::optional<ContentAddress> ca) std::shared_ptr<PathSubstitutionGoal> Worker::makePathSubstitutionGoal(const StorePath & path, RepairFlag repair, std::optional<ContentAddress> ca)
{ {
std::weak_ptr<SubstitutionGoal> & goal_weak = substitutionGoals[path]; std::weak_ptr<PathSubstitutionGoal> & goal_weak = substitutionGoals[path];
auto goal = goal_weak.lock(); // FIXME auto goal = goal_weak.lock(); // FIXME
if (!goal) { if (!goal) {
goal = std::make_shared<SubstitutionGoal>(path, *this, repair, ca); goal = std::make_shared<PathSubstitutionGoal>(path, *this, repair, ca);
goal_weak = goal; goal_weak = goal;
wakeUp(goal); wakeUp(goal);
} }
return goal; return goal;
} }
template<typename G> std::shared_ptr<DrvOutputSubstitutionGoal> Worker::makeDrvOutputSubstitutionGoal(const DrvOutput& id, RepairFlag repair, std::optional<ContentAddress> ca)
static void removeGoal(std::shared_ptr<G> goal, std::map<StorePath, std::weak_ptr<G>> & goalMap) {
std::weak_ptr<DrvOutputSubstitutionGoal> & goal_weak = drvOutputSubstitutionGoals[id];
auto goal = goal_weak.lock(); // FIXME
if (!goal) {
goal = std::make_shared<DrvOutputSubstitutionGoal>(id, *this, repair, ca);
goal_weak = goal;
wakeUp(goal);
}
return goal;
}
template<typename K, typename G>
static void removeGoal(std::shared_ptr<G> goal, std::map<K, std::weak_ptr<G>> & goalMap)
{ {
/* !!! inefficient */ /* !!! inefficient */
for (auto i = goalMap.begin(); for (auto i = goalMap.begin();
@ -109,8 +122,10 @@ void Worker::removeGoal(GoalPtr goal)
{ {
if (auto drvGoal = std::dynamic_pointer_cast<DerivationGoal>(goal)) if (auto drvGoal = std::dynamic_pointer_cast<DerivationGoal>(goal))
nix::removeGoal(drvGoal, derivationGoals); nix::removeGoal(drvGoal, derivationGoals);
else if (auto subGoal = std::dynamic_pointer_cast<SubstitutionGoal>(goal)) else if (auto subGoal = std::dynamic_pointer_cast<PathSubstitutionGoal>(goal))
nix::removeGoal(subGoal, substitutionGoals); nix::removeGoal(subGoal, substitutionGoals);
else if (auto subGoal = std::dynamic_pointer_cast<DrvOutputSubstitutionGoal>(goal))
nix::removeGoal(subGoal, drvOutputSubstitutionGoals);
else else
assert(false); assert(false);
if (topGoals.find(goal) != topGoals.end()) { if (topGoals.find(goal) != topGoals.end()) {
@ -217,7 +232,7 @@ void Worker::run(const Goals & _topGoals)
topGoals.insert(i); topGoals.insert(i);
if (auto goal = dynamic_cast<DerivationGoal *>(i.get())) { if (auto goal = dynamic_cast<DerivationGoal *>(i.get())) {
topPaths.push_back({goal->drvPath, goal->wantedOutputs}); topPaths.push_back({goal->drvPath, goal->wantedOutputs});
} else if (auto goal = dynamic_cast<SubstitutionGoal *>(i.get())) { } else if (auto goal = dynamic_cast<PathSubstitutionGoal *>(i.get())) {
topPaths.push_back({goal->storePath}); topPaths.push_back({goal->storePath});
} }
} }
@ -471,7 +486,10 @@ void Worker::markContentsGood(const StorePath & path)
} }
GoalPtr upcast_goal(std::shared_ptr<SubstitutionGoal> subGoal) { GoalPtr upcast_goal(std::shared_ptr<PathSubstitutionGoal> subGoal) {
return subGoal;
}
GoalPtr upcast_goal(std::shared_ptr<DrvOutputSubstitutionGoal> subGoal) {
return subGoal; return subGoal;
} }

View file

@ -4,6 +4,7 @@
#include "lock.hh" #include "lock.hh"
#include "store-api.hh" #include "store-api.hh"
#include "goal.hh" #include "goal.hh"
#include "realisation.hh"
#include <future> #include <future>
#include <thread> #include <thread>
@ -12,18 +13,20 @@ namespace nix {
/* Forward definition. */ /* Forward definition. */
struct DerivationGoal; struct DerivationGoal;
struct SubstitutionGoal; struct PathSubstitutionGoal;
class DrvOutputSubstitutionGoal;
/* Workaround for not being able to declare a something like /* Workaround for not being able to declare a something like
class SubstitutionGoal : public Goal; class PathSubstitutionGoal : public Goal;
even when Goal is a complete type. even when Goal is a complete type.
This is still a static cast. The purpose of exporting it is to define it in This is still a static cast. The purpose of exporting it is to define it in
a place where `SubstitutionGoal` is concrete, and use it in a place where it a place where `PathSubstitutionGoal` is concrete, and use it in a place where it
is opaque. */ is opaque. */
GoalPtr upcast_goal(std::shared_ptr<SubstitutionGoal> subGoal); GoalPtr upcast_goal(std::shared_ptr<PathSubstitutionGoal> subGoal);
GoalPtr upcast_goal(std::shared_ptr<DrvOutputSubstitutionGoal> subGoal);
typedef std::chrono::time_point<std::chrono::steady_clock> steady_time_point; typedef std::chrono::time_point<std::chrono::steady_clock> steady_time_point;
@ -72,7 +75,8 @@ private:
/* Maps used to prevent multiple instantiations of a goal for the /* Maps used to prevent multiple instantiations of a goal for the
same derivation / path. */ same derivation / path. */
std::map<StorePath, std::weak_ptr<DerivationGoal>> derivationGoals; std::map<StorePath, std::weak_ptr<DerivationGoal>> derivationGoals;
std::map<StorePath, std::weak_ptr<SubstitutionGoal>> substitutionGoals; std::map<StorePath, std::weak_ptr<PathSubstitutionGoal>> substitutionGoals;
std::map<DrvOutput, std::weak_ptr<DrvOutputSubstitutionGoal>> drvOutputSubstitutionGoals;
/* Goals waiting for busy paths to be unlocked. */ /* Goals waiting for busy paths to be unlocked. */
WeakGoals waitingForAnyGoal; WeakGoals waitingForAnyGoal;
@ -146,7 +150,8 @@ public:
const StringSet & wantedOutputs, BuildMode buildMode = bmNormal); const StringSet & wantedOutputs, BuildMode buildMode = bmNormal);
/* substitution goal */ /* substitution goal */
std::shared_ptr<SubstitutionGoal> makeSubstitutionGoal(const StorePath & storePath, RepairFlag repair = NoRepair, std::optional<ContentAddress> ca = std::nullopt); std::shared_ptr<PathSubstitutionGoal> makePathSubstitutionGoal(const StorePath & storePath, RepairFlag repair = NoRepair, std::optional<ContentAddress> ca = std::nullopt);
std::shared_ptr<DrvOutputSubstitutionGoal> makeDrvOutputSubstitutionGoal(const DrvOutput & id, RepairFlag repair = NoRepair, std::optional<ContentAddress> ca = std::nullopt);
/* Remove a dead goal. */ /* Remove a dead goal. */
void removeGoal(GoalPtr goal); void removeGoal(GoalPtr goal);

View file

@ -6,6 +6,7 @@ create table if not exists Realisations (
drvPath text not null, drvPath text not null,
outputName text not null, -- symbolic output id, usually "out" outputName text not null, -- symbolic output id, usually "out"
outputPath integer not null, outputPath integer not null,
signatures text, -- space-separated list
primary key (drvPath, outputName), primary key (drvPath, outputName),
foreign key (outputPath) references ValidPaths(id) on delete cascade foreign key (outputPath) references ValidPaths(id) on delete cascade
); );

View file

@ -579,7 +579,10 @@ static void performOp(TunnelLogger * logger, ref<Store> store,
auto res = store->buildDerivation(drvPath, drv, buildMode); auto res = store->buildDerivation(drvPath, drv, buildMode);
logger->stopWork(); logger->stopWork();
to << res.status << res.errorMsg; to << res.status << res.errorMsg;
if (GET_PROTOCOL_MINOR(clientVersion) >= 0xc) { if (GET_PROTOCOL_MINOR(clientVersion) >= 29) {
to << res.timesBuilt << res.isNonDeterministic << res.startTime << res.stopTime;
}
if (GET_PROTOCOL_MINOR(clientVersion) >= 28) {
worker_proto::write(*store, to, res.builtOutputs); worker_proto::write(*store, to, res.builtOutputs);
} }
break; break;

View file

@ -52,7 +52,7 @@ struct DerivationOutput
DerivationOutputCAFloating, DerivationOutputCAFloating,
DerivationOutputDeferred DerivationOutputDeferred
> output; > output;
std::optional<HashType> hashAlgoOpt(const Store & store) const;
/* Note, when you use this function you should make sure that you're passing /* Note, when you use this function you should make sure that you're passing
the right derivation name. When in doubt, you should use the safer the right derivation name. When in doubt, you should use the safer
interface provided by BasicDerivation::outputsAndOptPaths */ interface provided by BasicDerivation::outputsAndOptPaths */

View file

@ -81,7 +81,7 @@ void loadConfFile()
/* We only want to send overrides to the daemon, i.e. stuff from /* We only want to send overrides to the daemon, i.e. stuff from
~/.nix/nix.conf or the command line. */ ~/.nix/nix.conf or the command line. */
globalConfig.resetOverriden(); globalConfig.resetOverridden();
auto files = settings.nixUserConfFiles; auto files = settings.nixUserConfFiles;
for (auto file = files.rbegin(); file != files.rend(); file++) { for (auto file = files.rbegin(); file != files.rend(); file++) {

View file

@ -206,7 +206,10 @@ public:
Setting<std::string> builders{ Setting<std::string> builders{
this, "@" + nixConfDir + "/machines", "builders", this, "@" + nixConfDir + "/machines", "builders",
"A semicolon-separated list of build machines, in the format of `nix.machines`."}; R"(
A semicolon-separated list of build machines.
For the exact format and examples, see [the manual chapter on remote builds](../advanced-topics/distributed-builds.md)
)"};
Setting<bool> buildersUseSubstitutes{ Setting<bool> buildersUseSubstitutes{
this, false, "builders-use-substitutes", this, false, "builders-use-substitutes",

View file

@ -15,6 +15,7 @@ struct LegacySSHStoreConfig : virtual StoreConfig
using StoreConfig::StoreConfig; using StoreConfig::StoreConfig;
const Setting<int> maxConnections{(StoreConfig*) this, 1, "max-connections", "maximum number of concurrent SSH connections"}; const Setting<int> maxConnections{(StoreConfig*) this, 1, "max-connections", "maximum number of concurrent SSH connections"};
const Setting<Path> sshKey{(StoreConfig*) this, "", "ssh-key", "path to an SSH private key"}; const Setting<Path> sshKey{(StoreConfig*) this, "", "ssh-key", "path to an SSH private key"};
const Setting<std::string> sshPublicHostKey{(StoreConfig*) this, "", "base64-ssh-public-host-key", "The public half of the host's SSH key"};
const Setting<bool> compress{(StoreConfig*) this, false, "compress", "whether to compress the connection"}; const Setting<bool> compress{(StoreConfig*) this, false, "compress", "whether to compress the connection"};
const Setting<Path> remoteProgram{(StoreConfig*) this, "nix-store", "remote-program", "path to the nix-store executable on the remote system"}; const Setting<Path> remoteProgram{(StoreConfig*) this, "nix-store", "remote-program", "path to the nix-store executable on the remote system"};
const Setting<std::string> remoteStore{(StoreConfig*) this, "", "remote-store", "URI of the store on the remote system"}; const Setting<std::string> remoteStore{(StoreConfig*) this, "", "remote-store", "URI of the store on the remote system"};
@ -59,6 +60,7 @@ struct LegacySSHStore : public virtual LegacySSHStoreConfig, public virtual Stor
, master( , master(
host, host,
sshKey, sshKey,
sshPublicHostKey,
// Use SSH master only if using more than 1 connection. // Use SSH master only if using more than 1 connection.
connections->capacity() > 1, connections->capacity() > 1,
compress, compress,

View file

@ -2,6 +2,8 @@
#include "globals.hh" #include "globals.hh"
#include "nar-info-disk-cache.hh" #include "nar-info-disk-cache.hh"
#include <atomic>
namespace nix { namespace nix {
struct LocalBinaryCacheStoreConfig : virtual BinaryCacheStoreConfig struct LocalBinaryCacheStoreConfig : virtual BinaryCacheStoreConfig
@ -50,7 +52,8 @@ protected:
const std::string & mimeType) override const std::string & mimeType) override
{ {
auto path2 = binaryCacheDir + "/" + path; auto path2 = binaryCacheDir + "/" + path;
Path tmp = path2 + ".tmp." + std::to_string(getpid()); static std::atomic<int> counter{0};
Path tmp = fmt("%s.tmp.%d.%d", path2, getpid(), ++counter);
AutoDelete del(tmp, false); AutoDelete del(tmp, false);
StreamToSourceAdapter source(istream); StreamToSourceAdapter source(istream);
writeFile(tmp, source); writeFile(tmp, source);

View file

@ -310,13 +310,13 @@ LocalStore::LocalStore(const Params & params)
if (settings.isExperimentalFeatureEnabled("ca-derivations")) { if (settings.isExperimentalFeatureEnabled("ca-derivations")) {
state->stmts->RegisterRealisedOutput.create(state->db, state->stmts->RegisterRealisedOutput.create(state->db,
R"( R"(
insert or replace into Realisations (drvPath, outputName, outputPath) insert or replace into Realisations (drvPath, outputName, outputPath, signatures)
values (?, ?, (select id from ValidPaths where path = ?)) values (?, ?, (select id from ValidPaths where path = ?), ?)
; ;
)"); )");
state->stmts->QueryRealisedOutput.create(state->db, state->stmts->QueryRealisedOutput.create(state->db,
R"( R"(
select Output.path from Realisations select Output.path, Realisations.signatures from Realisations
inner join ValidPaths as Output on Output.id = Realisations.outputPath inner join ValidPaths as Output on Output.id = Realisations.outputPath
where drvPath = ? and outputName = ? where drvPath = ? and outputName = ?
; ;
@ -652,6 +652,14 @@ void LocalStore::checkDerivationOutputs(const StorePath & drvPath, const Derivat
} }
} }
void LocalStore::registerDrvOutput(const Realisation & info, CheckSigsFlag checkSigs)
{
settings.requireExperimentalFeature("ca-derivations");
if (checkSigs == NoCheckSigs || !realisationIsUntrusted(info))
registerDrvOutput(info);
else
throw Error("cannot register realisation '%s' because it lacks a valid signature", info.outPath.to_string());
}
void LocalStore::registerDrvOutput(const Realisation & info) void LocalStore::registerDrvOutput(const Realisation & info)
{ {
@ -662,6 +670,7 @@ void LocalStore::registerDrvOutput(const Realisation & info)
(info.id.strHash()) (info.id.strHash())
(info.id.outputName) (info.id.outputName)
(printStorePath(info.outPath)) (printStorePath(info.outPath))
(concatStringsSep(" ", info.signatures))
.exec(); .exec();
}); });
} }
@ -1108,15 +1117,20 @@ const PublicKeys & LocalStore::getPublicKeys()
return *state->publicKeys; return *state->publicKeys;
} }
bool LocalStore::pathInfoIsTrusted(const ValidPathInfo & info) bool LocalStore::pathInfoIsUntrusted(const ValidPathInfo & info)
{ {
return requireSigs && !info.checkSignatures(*this, getPublicKeys()); return requireSigs && !info.checkSignatures(*this, getPublicKeys());
} }
bool LocalStore::realisationIsUntrusted(const Realisation & realisation)
{
return requireSigs && !realisation.checkSignatures(getPublicKeys());
}
void LocalStore::addToStore(const ValidPathInfo & info, Source & source, void LocalStore::addToStore(const ValidPathInfo & info, Source & source,
RepairFlag repair, CheckSigsFlag checkSigs) RepairFlag repair, CheckSigsFlag checkSigs)
{ {
if (checkSigs && pathInfoIsTrusted(info)) if (checkSigs && pathInfoIsUntrusted(info))
throw Error("cannot add path '%s' because it lacks a valid signature", printStorePath(info.path)); throw Error("cannot add path '%s' because it lacks a valid signature", printStorePath(info.path));
addTempRoot(info.path); addTempRoot(info.path);
@ -1630,6 +1644,18 @@ void LocalStore::addSignatures(const StorePath & storePath, const StringSet & si
} }
void LocalStore::signRealisation(Realisation & realisation)
{
// FIXME: keep secret keys in memory.
auto secretKeyFiles = settings.secretKeyFiles;
for (auto & secretKeyFile : secretKeyFiles.get()) {
SecretKey secretKey(readFile(secretKeyFile));
realisation.sign(secretKey);
}
}
void LocalStore::signPathInfo(ValidPathInfo & info) void LocalStore::signPathInfo(ValidPathInfo & info)
{ {
// FIXME: keep secret keys in memory. // FIXME: keep secret keys in memory.
@ -1667,8 +1693,9 @@ std::optional<const Realisation> LocalStore::queryRealisation(
if (!use.next()) if (!use.next())
return std::nullopt; return std::nullopt;
auto outputPath = parseStorePath(use.getStr(0)); auto outputPath = parseStorePath(use.getStr(0));
return Ret{ auto signatures = tokenizeString<StringSet>(use.getStr(1));
Realisation{.id = id, .outPath = outputPath}}; return Ret{Realisation{
.id = id, .outPath = outputPath, .signatures = signatures}};
}); });
} }
} // namespace nix } // namespace nix

View file

@ -136,7 +136,8 @@ public:
void querySubstitutablePathInfos(const StorePathCAMap & paths, void querySubstitutablePathInfos(const StorePathCAMap & paths,
SubstitutablePathInfos & infos) override; SubstitutablePathInfos & infos) override;
bool pathInfoIsTrusted(const ValidPathInfo &) override; bool pathInfoIsUntrusted(const ValidPathInfo &) override;
bool realisationIsUntrusted(const Realisation & ) override;
void addToStore(const ValidPathInfo & info, Source & source, void addToStore(const ValidPathInfo & info, Source & source,
RepairFlag repair, CheckSigsFlag checkSigs) override; RepairFlag repair, CheckSigsFlag checkSigs) override;
@ -202,6 +203,7 @@ public:
/* Register the store path 'output' as the output named 'outputName' of /* Register the store path 'output' as the output named 'outputName' of
derivation 'deriver'. */ derivation 'deriver'. */
void registerDrvOutput(const Realisation & info) override; void registerDrvOutput(const Realisation & info) override;
void registerDrvOutput(const Realisation & info, CheckSigsFlag checkSigs) override;
void cacheDrvOutputMapping(State & state, const uint64_t deriver, const string & outputName, const StorePath & output); void cacheDrvOutputMapping(State & state, const uint64_t deriver, const string & outputName, const StorePath & output);
std::optional<const Realisation> queryRealisation(const DrvOutput&) override; std::optional<const Realisation> queryRealisation(const DrvOutput&) override;
@ -272,16 +274,19 @@ private:
bool isValidPath_(State & state, const StorePath & path); bool isValidPath_(State & state, const StorePath & path);
void queryReferrers(State & state, const StorePath & path, StorePathSet & referrers); void queryReferrers(State & state, const StorePath & path, StorePathSet & referrers);
/* Add signatures to a ValidPathInfo using the secret keys /* Add signatures to a ValidPathInfo or Realisation using the secret keys
specified by the secret-key-files option. */ specified by the secret-key-files option. */
void signPathInfo(ValidPathInfo & info); void signPathInfo(ValidPathInfo & info);
void signRealisation(Realisation &);
Path getRealStoreDir() override { return realStoreDir; } Path getRealStoreDir() override { return realStoreDir; }
void createUser(const std::string & userName, uid_t userId) override; void createUser(const std::string & userName, uid_t userId) override;
friend struct LocalDerivationGoal; friend struct LocalDerivationGoal;
friend struct PathSubstitutionGoal;
friend struct SubstitutionGoal; friend struct SubstitutionGoal;
friend struct DerivationGoal;
}; };

View file

@ -28,7 +28,7 @@ ifeq ($(OS), SunOS)
endif endif
ifeq ($(HAVE_SECCOMP), 1) ifeq ($(HAVE_SECCOMP), 1)
libstore_LDFLAGS += -lseccomp libstore_LDFLAGS += $(LIBSECCOMP_LIBS)
endif endif
libstore_CXXFLAGS += \ libstore_CXXFLAGS += \

View file

@ -54,9 +54,15 @@ ref<Store> Machine::openStore() const {
if (hasPrefix(storeUri, "ssh://")) { if (hasPrefix(storeUri, "ssh://")) {
storeParams["max-connections"] = "1"; storeParams["max-connections"] = "1";
storeParams["log-fd"] = "4"; storeParams["log-fd"] = "4";
}
if (hasPrefix(storeUri, "ssh://") || hasPrefix(storeUri, "ssh-ng://")) {
if (sshKey != "") if (sshKey != "")
storeParams["ssh-key"] = sshKey; storeParams["ssh-key"] = sshKey;
if (sshPublicHostKey != "")
storeParams["base64-ssh-public-host-key"] = sshPublicHostKey;
} }
{ {
auto & fs = storeParams["system-features"]; auto & fs = storeParams["system-features"];
auto append = [&](auto feats) { auto append = [&](auto feats) {

View file

@ -22,54 +22,52 @@ void Store::computeFSClosure(const StorePathSet & startPaths,
Sync<State> state_(State{0, paths_, 0}); Sync<State> state_(State{0, paths_, 0});
std::function<void(const Path &)> enqueue; std::function<void(const StorePath &)> enqueue;
std::condition_variable done; std::condition_variable done;
enqueue = [&](const Path & path) -> void { enqueue = [&](const StorePath & path) -> void {
{ {
auto state(state_.lock()); auto state(state_.lock());
if (state->exc) return; if (state->exc) return;
if (!state->paths.insert(parseStorePath(path)).second) return; if (!state->paths.insert(path).second) return;
state->pending++; state->pending++;
} }
queryPathInfo(parseStorePath(path), {[&, pathS(path)](std::future<ref<const ValidPathInfo>> fut) { queryPathInfo(path, {[&](std::future<ref<const ValidPathInfo>> fut) {
// FIXME: calls to isValidPath() should be async // FIXME: calls to isValidPath() should be async
try { try {
auto info = fut.get(); auto info = fut.get();
auto path = parseStorePath(pathS);
if (flipDirection) { if (flipDirection) {
StorePathSet referrers; StorePathSet referrers;
queryReferrers(path, referrers); queryReferrers(path, referrers);
for (auto & ref : referrers) for (auto & ref : referrers)
if (ref != path) if (ref != path)
enqueue(printStorePath(ref)); enqueue(ref);
if (includeOutputs) if (includeOutputs)
for (auto & i : queryValidDerivers(path)) for (auto & i : queryValidDerivers(path))
enqueue(printStorePath(i)); enqueue(i);
if (includeDerivers && path.isDerivation()) if (includeDerivers && path.isDerivation())
for (auto & i : queryDerivationOutputs(path)) for (auto & i : queryDerivationOutputs(path))
if (isValidPath(i) && queryPathInfo(i)->deriver == path) if (isValidPath(i) && queryPathInfo(i)->deriver == path)
enqueue(printStorePath(i)); enqueue(i);
} else { } else {
for (auto & ref : info->references) for (auto & ref : info->references)
enqueue(printStorePath(ref)); enqueue(ref);
if (includeOutputs && path.isDerivation()) if (includeOutputs && path.isDerivation())
for (auto & i : queryDerivationOutputs(path)) for (auto & i : queryDerivationOutputs(path))
if (isValidPath(i)) enqueue(printStorePath(i)); if (isValidPath(i)) enqueue(i);
if (includeDerivers && info->deriver && isValidPath(*info->deriver)) if (includeDerivers && info->deriver && isValidPath(*info->deriver))
enqueue(printStorePath(*info->deriver)); enqueue(*info->deriver);
} }
@ -89,7 +87,7 @@ void Store::computeFSClosure(const StorePathSet & startPaths,
}; };
for (auto & startPath : startPaths) for (auto & startPath : startPaths)
enqueue(printStorePath(startPath)); enqueue(startPath);
{ {
auto state(state_.lock()); auto state(state_.lock());
@ -171,13 +169,10 @@ void Store::queryMissing(const std::vector<StorePathWithOutputs> & targets,
}; };
auto checkOutput = [&]( auto checkOutput = [&](
const Path & drvPathS, ref<Derivation> drv, const Path & outPathS, ref<Sync<DrvState>> drvState_) const StorePath & drvPath, ref<Derivation> drv, const StorePath & outPath, ref<Sync<DrvState>> drvState_)
{ {
if (drvState_->lock()->done) return; if (drvState_->lock()->done) return;
auto drvPath = parseStorePath(drvPathS);
auto outPath = parseStorePath(outPathS);
SubstitutablePathInfos infos; SubstitutablePathInfos infos;
querySubstitutablePathInfos({{outPath, getDerivationCA(*drv)}}, infos); querySubstitutablePathInfos({{outPath, getDerivationCA(*drv)}}, infos);
@ -214,7 +209,7 @@ void Store::queryMissing(const std::vector<StorePathWithOutputs> & targets,
return; return;
} }
PathSet invalid; StorePathSet invalid;
/* true for regular derivations, and CA derivations for which we /* true for regular derivations, and CA derivations for which we
have a trust mapping for all wanted outputs. */ have a trust mapping for all wanted outputs. */
auto knownOutputPaths = true; auto knownOutputPaths = true;
@ -224,7 +219,7 @@ void Store::queryMissing(const std::vector<StorePathWithOutputs> & targets,
break; break;
} }
if (wantOutput(outputName, path.outputs) && !isValidPath(*pathOpt)) if (wantOutput(outputName, path.outputs) && !isValidPath(*pathOpt))
invalid.insert(printStorePath(*pathOpt)); invalid.insert(*pathOpt);
} }
if (knownOutputPaths && invalid.empty()) return; if (knownOutputPaths && invalid.empty()) return;
@ -234,7 +229,7 @@ void Store::queryMissing(const std::vector<StorePathWithOutputs> & targets,
if (knownOutputPaths && settings.useSubstitutes && parsedDrv.substitutesAllowed()) { if (knownOutputPaths && settings.useSubstitutes && parsedDrv.substitutesAllowed()) {
auto drvState = make_ref<Sync<DrvState>>(DrvState(invalid.size())); auto drvState = make_ref<Sync<DrvState>>(DrvState(invalid.size()));
for (auto & output : invalid) for (auto & output : invalid)
pool.enqueue(std::bind(checkOutput, printStorePath(path.path), drv, output, drvState)); pool.enqueue(std::bind(checkOutput, path.path, drv, output, drvState));
} else } else
mustBuildDrv(path.path, *drv); mustBuildDrv(path.path, *drv);

View file

@ -25,27 +25,69 @@ nlohmann::json Realisation::toJSON() const {
return nlohmann::json{ return nlohmann::json{
{"id", id.to_string()}, {"id", id.to_string()},
{"outPath", outPath.to_string()}, {"outPath", outPath.to_string()},
{"signatures", signatures},
}; };
} }
Realisation Realisation::fromJSON( Realisation Realisation::fromJSON(
const nlohmann::json& json, const nlohmann::json& json,
const std::string& whence) { const std::string& whence) {
auto getField = [&](std::string fieldName) -> std::string { auto getOptionalField = [&](std::string fieldName) -> std::optional<std::string> {
auto fieldIterator = json.find(fieldName); auto fieldIterator = json.find(fieldName);
if (fieldIterator == json.end()) if (fieldIterator == json.end())
return std::nullopt;
return *fieldIterator;
};
auto getField = [&](std::string fieldName) -> std::string {
if (auto field = getOptionalField(fieldName))
return *field;
else
throw Error( throw Error(
"Drv output info file '%1%' is corrupt, missing field %2%", "Drv output info file '%1%' is corrupt, missing field %2%",
whence, fieldName); whence, fieldName);
return *fieldIterator;
}; };
StringSet signatures;
if (auto signaturesIterator = json.find("signatures"); signaturesIterator != json.end())
signatures.insert(signaturesIterator->begin(), signaturesIterator->end());
return Realisation{ return Realisation{
.id = DrvOutput::parse(getField("id")), .id = DrvOutput::parse(getField("id")),
.outPath = StorePath(getField("outPath")), .outPath = StorePath(getField("outPath")),
.signatures = signatures,
}; };
} }
std::string Realisation::fingerprint() const
{
auto serialized = toJSON();
serialized.erase("signatures");
return serialized.dump();
}
void Realisation::sign(const SecretKey & secretKey)
{
signatures.insert(secretKey.signDetached(fingerprint()));
}
bool Realisation::checkSignature(const PublicKeys & publicKeys, const std::string & sig) const
{
return verifyDetached(fingerprint(), sig, publicKeys);
}
size_t Realisation::checkSignatures(const PublicKeys & publicKeys) const
{
// FIXME: Maybe we should return `maxSigs` if the realisation corresponds to
// an input-addressed one because in that case the drv is enough to check
// it but we can't know that here.
size_t good = 0;
for (auto & sig : signatures)
if (checkSignature(publicKeys, sig))
good++;
return good;
}
StorePath RealisedPath::path() const { StorePath RealisedPath::path() const {
return std::visit([](auto && arg) { return arg.getPath(); }, raw); return std::visit([](auto && arg) { return arg.getPath(); }, raw);
} }

View file

@ -6,6 +6,7 @@
#include "path.hh" #include "path.hh"
#include <nlohmann/json_fwd.hpp> #include <nlohmann/json_fwd.hpp>
#include "comparator.hh" #include "comparator.hh"
#include "crypto.hh"
namespace nix { namespace nix {
@ -28,9 +29,16 @@ struct Realisation {
DrvOutput id; DrvOutput id;
StorePath outPath; StorePath outPath;
StringSet signatures;
nlohmann::json toJSON() const; nlohmann::json toJSON() const;
static Realisation fromJSON(const nlohmann::json& json, const std::string& whence); static Realisation fromJSON(const nlohmann::json& json, const std::string& whence);
std::string fingerprint() const;
void sign(const SecretKey &);
bool checkSignature(const PublicKeys & publicKeys, const std::string & sig) const;
size_t checkSignatures(const PublicKeys & publicKeys) const;
StorePath getPath() const { return outPath; } StorePath getPath() const { return outPath; }
GENERATE_CMP(Realisation, me->id, me->outPath); GENERATE_CMP(Realisation, me->id, me->outPath);

View file

@ -62,9 +62,15 @@ void write(const Store & store, Sink & out, const Realisation & realisation)
{ out << realisation.toJSON().dump(); } { out << realisation.toJSON().dump(); }
DrvOutput read(const Store & store, Source & from, Phantom<DrvOutput> _) DrvOutput read(const Store & store, Source & from, Phantom<DrvOutput> _)
{ return DrvOutput::parse(readString(from)); } {
return DrvOutput::parse(readString(from));
}
void write(const Store & store, Sink & out, const DrvOutput & drvOutput) void write(const Store & store, Sink & out, const DrvOutput & drvOutput)
{ out << drvOutput.to_string(); } {
out << drvOutput.to_string();
}
std::optional<StorePath> read(const Store & store, Source & from, Phantom<std::optional<StorePath>> _) std::optional<StorePath> read(const Store & store, Source & from, Phantom<std::optional<StorePath>> _)
{ {
@ -682,10 +688,12 @@ BuildResult RemoteStore::buildDerivation(const StorePath & drvPath, const BasicD
conn->to << buildMode; conn->to << buildMode;
conn.processStderr(); conn.processStderr();
BuildResult res; BuildResult res;
unsigned int status; res.status = (BuildResult::Status) readInt(conn->from);
conn->from >> status >> res.errorMsg; conn->from >> res.errorMsg;
res.status = (BuildResult::Status) status; if (GET_PROTOCOL_MINOR(conn->daemonVersion) >= 29) {
if (GET_PROTOCOL_MINOR(conn->daemonVersion) >= 0xc) { conn->from >> res.timesBuilt >> res.isNonDeterministic >> res.startTime >> res.stopTime;
}
if (GET_PROTOCOL_MINOR(conn->daemonVersion) >= 28) {
auto builtOutputs = worker_proto::read(*this, conn->from, Phantom<DrvOutputs> {}); auto builtOutputs = worker_proto::read(*this, conn->from, Phantom<DrvOutputs> {});
res.builtOutputs = builtOutputs; res.builtOutputs = builtOutputs;
} }

View file

@ -5,7 +5,7 @@ namespace nix {
#define SERVE_MAGIC_1 0x390c9deb #define SERVE_MAGIC_1 0x390c9deb
#define SERVE_MAGIC_2 0x5452eecb #define SERVE_MAGIC_2 0x5452eecb
#define SERVE_PROTOCOL_VERSION 0x206 #define SERVE_PROTOCOL_VERSION (2 << 8 | 6)
#define GET_PROTOCOL_MAJOR(x) ((x) & 0xff00) #define GET_PROTOCOL_MAJOR(x) ((x) & 0xff00)
#define GET_PROTOCOL_MINOR(x) ((x) & 0x00ff) #define GET_PROTOCOL_MINOR(x) ((x) & 0x00ff)

View file

@ -13,6 +13,7 @@ struct SSHStoreConfig : virtual RemoteStoreConfig
using RemoteStoreConfig::RemoteStoreConfig; using RemoteStoreConfig::RemoteStoreConfig;
const Setting<Path> sshKey{(StoreConfig*) this, "", "ssh-key", "path to an SSH private key"}; const Setting<Path> sshKey{(StoreConfig*) this, "", "ssh-key", "path to an SSH private key"};
const Setting<std::string> sshPublicHostKey{(StoreConfig*) this, "", "base64-ssh-public-host-key", "The public half of the host's SSH key"};
const Setting<bool> compress{(StoreConfig*) this, false, "compress", "whether to compress the connection"}; const Setting<bool> compress{(StoreConfig*) this, false, "compress", "whether to compress the connection"};
const Setting<Path> remoteProgram{(StoreConfig*) this, "nix-daemon", "remote-program", "path to the nix-daemon executable on the remote system"}; const Setting<Path> remoteProgram{(StoreConfig*) this, "nix-daemon", "remote-program", "path to the nix-daemon executable on the remote system"};
const Setting<std::string> remoteStore{(StoreConfig*) this, "", "remote-store", "URI of the store on the remote system"}; const Setting<std::string> remoteStore{(StoreConfig*) this, "", "remote-store", "URI of the store on the remote system"};
@ -34,6 +35,7 @@ public:
, master( , master(
host, host,
sshKey, sshKey,
sshPublicHostKey,
// Use SSH master only if using more than 1 connection. // Use SSH master only if using more than 1 connection.
connections->capacity() > 1, connections->capacity() > 1,
compress) compress)

View file

@ -2,24 +2,37 @@
namespace nix { namespace nix {
SSHMaster::SSHMaster(const std::string & host, const std::string & keyFile, bool useMaster, bool compress, int logFD) SSHMaster::SSHMaster(const std::string & host, const std::string & keyFile, const std::string & sshPublicHostKey, bool useMaster, bool compress, int logFD)
: host(host) : host(host)
, fakeSSH(host == "localhost") , fakeSSH(host == "localhost")
, keyFile(keyFile) , keyFile(keyFile)
, sshPublicHostKey(sshPublicHostKey)
, useMaster(useMaster && !fakeSSH) , useMaster(useMaster && !fakeSSH)
, compress(compress) , compress(compress)
, logFD(logFD) , logFD(logFD)
{ {
if (host == "" || hasPrefix(host, "-")) if (host == "" || hasPrefix(host, "-"))
throw Error("invalid SSH host name '%s'", host); throw Error("invalid SSH host name '%s'", host);
auto state(state_.lock());
state->tmpDir = std::make_unique<AutoDelete>(createTempDir("", "nix", true, true, 0700));
} }
void SSHMaster::addCommonSSHOpts(Strings & args) void SSHMaster::addCommonSSHOpts(Strings & args)
{ {
auto state(state_.lock());
for (auto & i : tokenizeString<Strings>(getEnv("NIX_SSHOPTS").value_or(""))) for (auto & i : tokenizeString<Strings>(getEnv("NIX_SSHOPTS").value_or("")))
args.push_back(i); args.push_back(i);
if (!keyFile.empty()) if (!keyFile.empty())
args.insert(args.end(), {"-i", keyFile}); args.insert(args.end(), {"-i", keyFile});
if (!sshPublicHostKey.empty()) {
Path fileName = (Path) *state->tmpDir + "/host-key";
auto p = host.rfind("@");
string thost = p != string::npos ? string(host, p + 1) : host;
writeFile(fileName, thost + " " + base64Decode(sshPublicHostKey) + "\n");
args.insert(args.end(), {"-oUserKnownHostsFile=" + fileName});
}
if (compress) if (compress)
args.push_back("-C"); args.push_back("-C");
} }
@ -87,7 +100,6 @@ Path SSHMaster::startMaster()
if (state->sshMaster != -1) return state->socketPath; if (state->sshMaster != -1) return state->socketPath;
state->tmpDir = std::make_unique<AutoDelete>(createTempDir("", "nix", true, true, 0700));
state->socketPath = (Path) *state->tmpDir + "/ssh.sock"; state->socketPath = (Path) *state->tmpDir + "/ssh.sock";

View file

@ -12,6 +12,7 @@ private:
const std::string host; const std::string host;
bool fakeSSH; bool fakeSSH;
const std::string keyFile; const std::string keyFile;
const std::string sshPublicHostKey;
const bool useMaster; const bool useMaster;
const bool compress; const bool compress;
const int logFD; const int logFD;
@ -29,7 +30,7 @@ private:
public: public:
SSHMaster(const std::string & host, const std::string & keyFile, bool useMaster, bool compress, int logFD = -1); SSHMaster(const std::string & host, const std::string & keyFile, const std::string & sshPublicHostKey, bool useMaster, bool compress, int logFD = -1);
struct Connection struct Connection
{ {

View file

@ -815,7 +815,7 @@ std::map<StorePath, StorePath> copyPaths(ref<Store> srcStore, ref<Store> dstStor
auto pathsMap = copyPaths(srcStore, dstStore, storePaths, repair, checkSigs, substitute); auto pathsMap = copyPaths(srcStore, dstStore, storePaths, repair, checkSigs, substitute);
try { try {
for (auto & realisation : realisations) { for (auto & realisation : realisations) {
dstStore->registerDrvOutput(realisation); dstStore->registerDrvOutput(realisation, checkSigs);
} }
} catch (MissingExperimentalFeature & e) { } catch (MissingExperimentalFeature & e) {
// Don't fail if the remote doesn't support CA derivations is it might // Don't fail if the remote doesn't support CA derivations is it might

View file

@ -380,7 +380,12 @@ public:
we don't really want to add the dependencies listed in a nar info we we don't really want to add the dependencies listed in a nar info we
don't trust anyyways. don't trust anyyways.
*/ */
virtual bool pathInfoIsTrusted(const ValidPathInfo &) virtual bool pathInfoIsUntrusted(const ValidPathInfo &)
{
return true;
}
virtual bool realisationIsUntrusted(const Realisation & )
{ {
return true; return true;
} }
@ -476,6 +481,8 @@ public:
*/ */
virtual void registerDrvOutput(const Realisation & output) virtual void registerDrvOutput(const Realisation & output)
{ unsupported("registerDrvOutput"); } { unsupported("registerDrvOutput"); }
virtual void registerDrvOutput(const Realisation & output, CheckSigsFlag checkSigs)
{ return registerDrvOutput(output); }
/* Write a NAR dump of a store path. */ /* Write a NAR dump of a store path. */
virtual void narFromPath(const StorePath & path, Sink & sink) = 0; virtual void narFromPath(const StorePath & path, Sink & sink) = 0;

View file

@ -9,7 +9,7 @@ namespace nix {
#define WORKER_MAGIC_1 0x6e697863 #define WORKER_MAGIC_1 0x6e697863
#define WORKER_MAGIC_2 0x6478696f #define WORKER_MAGIC_2 0x6478696f
#define PROTOCOL_VERSION 0x11c #define PROTOCOL_VERSION (1 << 8 | 29)
#define GET_PROTOCOL_MAJOR(x) ((x) & 0xff00) #define GET_PROTOCOL_MAJOR(x) ((x) & 0xff00)
#define GET_PROTOCOL_MINOR(x) ((x) & 0x00ff) #define GET_PROTOCOL_MINOR(x) ((x) & 0x00ff)

View file

@ -20,7 +20,7 @@ bool Config::set(const std::string & name, const std::string & value)
return false; return false;
} }
i->second.setting->set(value, append); i->second.setting->set(value, append);
i->second.setting->overriden = true; i->second.setting->overridden = true;
return true; return true;
} }
@ -35,7 +35,7 @@ void Config::addSetting(AbstractSetting * setting)
auto i = unknownSettings.find(setting->name); auto i = unknownSettings.find(setting->name);
if (i != unknownSettings.end()) { if (i != unknownSettings.end()) {
setting->set(i->second); setting->set(i->second);
setting->overriden = true; setting->overridden = true;
unknownSettings.erase(i); unknownSettings.erase(i);
set = true; set = true;
} }
@ -48,7 +48,7 @@ void Config::addSetting(AbstractSetting * setting)
alias, setting->name); alias, setting->name);
else { else {
setting->set(i->second); setting->set(i->second);
setting->overriden = true; setting->overridden = true;
unknownSettings.erase(i); unknownSettings.erase(i);
set = true; set = true;
} }
@ -69,10 +69,10 @@ void AbstractConfig::reapplyUnknownSettings()
set(s.first, s.second); set(s.first, s.second);
} }
void Config::getSettings(std::map<std::string, SettingInfo> & res, bool overridenOnly) void Config::getSettings(std::map<std::string, SettingInfo> & res, bool overriddenOnly)
{ {
for (auto & opt : _settings) for (auto & opt : _settings)
if (!opt.second.isAlias && (!overridenOnly || opt.second.setting->overriden)) if (!opt.second.isAlias && (!overriddenOnly || opt.second.setting->overridden))
res.emplace(opt.first, SettingInfo{opt.second.setting->to_string(), opt.second.setting->description}); res.emplace(opt.first, SettingInfo{opt.second.setting->to_string(), opt.second.setting->description});
} }
@ -136,10 +136,10 @@ void AbstractConfig::applyConfigFile(const Path & path)
} catch (SysError &) { } } catch (SysError &) { }
} }
void Config::resetOverriden() void Config::resetOverridden()
{ {
for (auto & s : _settings) for (auto & s : _settings)
s.second.setting->overriden = false; s.second.setting->overridden = false;
} }
nlohmann::json Config::toJSON() nlohmann::json Config::toJSON()
@ -169,7 +169,7 @@ AbstractSetting::AbstractSetting(
void AbstractSetting::setDefault(const std::string & str) void AbstractSetting::setDefault(const std::string & str)
{ {
if (!overriden) set(str); if (!overridden) set(str);
} }
nlohmann::json AbstractSetting::toJSON() nlohmann::json AbstractSetting::toJSON()
@ -203,7 +203,7 @@ void BaseSetting<T>::convertToArg(Args & args, const std::string & category)
.description = fmt("Set the `%s` setting.", name), .description = fmt("Set the `%s` setting.", name),
.category = category, .category = category,
.labels = {"value"}, .labels = {"value"},
.handler = {[=](std::string s) { overriden = true; set(s); }}, .handler = {[=](std::string s) { overridden = true; set(s); }},
}); });
if (isAppendable()) if (isAppendable())
@ -212,7 +212,7 @@ void BaseSetting<T>::convertToArg(Args & args, const std::string & category)
.description = fmt("Append to the `%s` setting.", name), .description = fmt("Append to the `%s` setting.", name),
.category = category, .category = category,
.labels = {"value"}, .labels = {"value"},
.handler = {[=](std::string s) { overriden = true; set(s, true); }}, .handler = {[=](std::string s) { overridden = true; set(s, true); }},
}); });
} }
@ -365,16 +365,16 @@ bool GlobalConfig::set(const std::string & name, const std::string & value)
return false; return false;
} }
void GlobalConfig::getSettings(std::map<std::string, SettingInfo> & res, bool overridenOnly) void GlobalConfig::getSettings(std::map<std::string, SettingInfo> & res, bool overriddenOnly)
{ {
for (auto & config : *configRegistrations) for (auto & config : *configRegistrations)
config->getSettings(res, overridenOnly); config->getSettings(res, overriddenOnly);
} }
void GlobalConfig::resetOverriden() void GlobalConfig::resetOverridden()
{ {
for (auto & config : *configRegistrations) for (auto & config : *configRegistrations)
config->resetOverriden(); config->resetOverridden();
} }
nlohmann::json GlobalConfig::toJSON() nlohmann::json GlobalConfig::toJSON()

View file

@ -71,9 +71,9 @@ public:
/** /**
* Adds the currently known settings to the given result map `res`. * Adds the currently known settings to the given result map `res`.
* - res: map to store settings in * - res: map to store settings in
* - overridenOnly: when set to true only overridden settings will be added to `res` * - overriddenOnly: when set to true only overridden settings will be added to `res`
*/ */
virtual void getSettings(std::map<std::string, SettingInfo> & res, bool overridenOnly = false) = 0; virtual void getSettings(std::map<std::string, SettingInfo> & res, bool overriddenOnly = false) = 0;
/** /**
* Parses the configuration in `contents` and applies it * Parses the configuration in `contents` and applies it
@ -91,7 +91,7 @@ public:
/** /**
* Resets the `overridden` flag of all Settings * Resets the `overridden` flag of all Settings
*/ */
virtual void resetOverriden() = 0; virtual void resetOverridden() = 0;
/** /**
* Outputs all settings to JSON * Outputs all settings to JSON
@ -127,7 +127,7 @@ public:
MyClass() : Config(readConfigFile("/etc/my-app.conf")) MyClass() : Config(readConfigFile("/etc/my-app.conf"))
{ {
std::cout << foo << "\n"; // will print 123 unless overriden std::cout << foo << "\n"; // will print 123 unless overridden
} }
}; };
*/ */
@ -163,9 +163,9 @@ public:
void addSetting(AbstractSetting * setting); void addSetting(AbstractSetting * setting);
void getSettings(std::map<std::string, SettingInfo> & res, bool overridenOnly = false) override; void getSettings(std::map<std::string, SettingInfo> & res, bool overriddenOnly = false) override;
void resetOverriden() override; void resetOverridden() override;
nlohmann::json toJSON() override; nlohmann::json toJSON() override;
@ -184,7 +184,7 @@ public:
int created = 123; int created = 123;
bool overriden = false; bool overridden = false;
void setDefault(const std::string & str); void setDefault(const std::string & str);
@ -215,7 +215,7 @@ protected:
virtual void convertToArg(Args & args, const std::string & category); virtual void convertToArg(Args & args, const std::string & category);
bool isOverriden() const { return overriden; } bool isOverridden() const { return overridden; }
}; };
/* A setting of type T. */ /* A setting of type T. */
@ -252,7 +252,7 @@ public:
virtual void override(const T & v) virtual void override(const T & v)
{ {
overriden = true; overridden = true;
value = v; value = v;
} }
@ -324,9 +324,9 @@ struct GlobalConfig : public AbstractConfig
bool set(const std::string & name, const std::string & value) override; bool set(const std::string & name, const std::string & value) override;
void getSettings(std::map<std::string, SettingInfo> & res, bool overridenOnly = false) override; void getSettings(std::map<std::string, SettingInfo> & res, bool overriddenOnly = false) override;
void resetOverriden() override; void resetOverridden() override;
nlohmann::json toJSON() override; nlohmann::json toJSON() override;

View file

@ -29,20 +29,20 @@ namespace nix {
std::map<std::string, Config::SettingInfo> settings; std::map<std::string, Config::SettingInfo> settings;
Setting<std::string> foo{&config, value, "name-of-the-setting", "description"}; Setting<std::string> foo{&config, value, "name-of-the-setting", "description"};
config.getSettings(settings, /* overridenOnly = */ false); config.getSettings(settings, /* overriddenOnly = */ false);
const auto iter = settings.find("name-of-the-setting"); const auto iter = settings.find("name-of-the-setting");
ASSERT_NE(iter, settings.end()); ASSERT_NE(iter, settings.end());
ASSERT_EQ(iter->second.value, ""); ASSERT_EQ(iter->second.value, "");
ASSERT_EQ(iter->second.description, "description\n"); ASSERT_EQ(iter->second.description, "description\n");
} }
TEST(Config, getDefinedOverridenSettingNotSet) { TEST(Config, getDefinedOverriddenSettingNotSet) {
Config config; Config config;
std::string value; std::string value;
std::map<std::string, Config::SettingInfo> settings; std::map<std::string, Config::SettingInfo> settings;
Setting<std::string> foo{&config, value, "name-of-the-setting", "description"}; Setting<std::string> foo{&config, value, "name-of-the-setting", "description"};
config.getSettings(settings, /* overridenOnly = */ true); config.getSettings(settings, /* overriddenOnly = */ true);
const auto e = settings.find("name-of-the-setting"); const auto e = settings.find("name-of-the-setting");
ASSERT_EQ(e, settings.end()); ASSERT_EQ(e, settings.end());
} }
@ -55,7 +55,7 @@ namespace nix {
setting.assign("value"); setting.assign("value");
config.getSettings(settings, /* overridenOnly = */ false); config.getSettings(settings, /* overriddenOnly = */ false);
const auto iter = settings.find("name-of-the-setting"); const auto iter = settings.find("name-of-the-setting");
ASSERT_NE(iter, settings.end()); ASSERT_NE(iter, settings.end());
ASSERT_EQ(iter->second.value, "value"); ASSERT_EQ(iter->second.value, "value");
@ -69,7 +69,7 @@ namespace nix {
ASSERT_TRUE(config.set("name-of-the-setting", "value")); ASSERT_TRUE(config.set("name-of-the-setting", "value"));
config.getSettings(settings, /* overridenOnly = */ false); config.getSettings(settings, /* overriddenOnly = */ false);
const auto e = settings.find("name-of-the-setting"); const auto e = settings.find("name-of-the-setting");
ASSERT_NE(e, settings.end()); ASSERT_NE(e, settings.end());
ASSERT_EQ(e->second.value, "value"); ASSERT_EQ(e->second.value, "value");
@ -100,7 +100,7 @@ namespace nix {
{ {
std::map<std::string, Config::SettingInfo> settings; std::map<std::string, Config::SettingInfo> settings;
config.getSettings(settings, /* overridenOnly = */ false); config.getSettings(settings, /* overriddenOnly = */ false);
ASSERT_EQ(settings.find("key"), settings.end()); ASSERT_EQ(settings.find("key"), settings.end());
} }
@ -108,17 +108,17 @@ namespace nix {
{ {
std::map<std::string, Config::SettingInfo> settings; std::map<std::string, Config::SettingInfo> settings;
config.getSettings(settings, /* overridenOnly = */ false); config.getSettings(settings, /* overriddenOnly = */ false);
ASSERT_EQ(settings["key"].value, "value"); ASSERT_EQ(settings["key"].value, "value");
} }
} }
TEST(Config, resetOverriden) { TEST(Config, resetOverridden) {
Config config; Config config;
config.resetOverriden(); config.resetOverridden();
} }
TEST(Config, resetOverridenWithSetting) { TEST(Config, resetOverriddenWithSetting) {
Config config; Config config;
Setting<std::string> setting{&config, "", "name-of-the-setting", "description"}; Setting<std::string> setting{&config, "", "name-of-the-setting", "description"};
@ -127,7 +127,7 @@ namespace nix {
setting.set("foo"); setting.set("foo");
ASSERT_EQ(setting.get(), "foo"); ASSERT_EQ(setting.get(), "foo");
config.getSettings(settings, /* overridenOnly = */ true); config.getSettings(settings, /* overriddenOnly = */ true);
ASSERT_TRUE(settings.empty()); ASSERT_TRUE(settings.empty());
} }
@ -135,18 +135,18 @@ namespace nix {
std::map<std::string, Config::SettingInfo> settings; std::map<std::string, Config::SettingInfo> settings;
setting.override("bar"); setting.override("bar");
ASSERT_TRUE(setting.overriden); ASSERT_TRUE(setting.overridden);
ASSERT_EQ(setting.get(), "bar"); ASSERT_EQ(setting.get(), "bar");
config.getSettings(settings, /* overridenOnly = */ true); config.getSettings(settings, /* overriddenOnly = */ true);
ASSERT_FALSE(settings.empty()); ASSERT_FALSE(settings.empty());
} }
{ {
std::map<std::string, Config::SettingInfo> settings; std::map<std::string, Config::SettingInfo> settings;
config.resetOverriden(); config.resetOverridden();
ASSERT_FALSE(setting.overriden); ASSERT_FALSE(setting.overridden);
config.getSettings(settings, /* overridenOnly = */ true); config.getSettings(settings, /* overriddenOnly = */ true);
ASSERT_TRUE(settings.empty()); ASSERT_TRUE(settings.empty());
} }
} }

View file

@ -117,6 +117,24 @@ namespace nix {
ASSERT_EQ(parsed, expected); ASSERT_EQ(parsed, expected);
} }
TEST(parseURL, parseScopedRFC4007IPv6Address) {
auto s = "http://[fe80::818c:da4d:8975:415c\%enp0s25]:8080";
auto parsed = parseURL(s);
ParsedURL expected {
.url = "http://[fe80::818c:da4d:8975:415c\%enp0s25]:8080",
.base = "http://[fe80::818c:da4d:8975:415c\%enp0s25]:8080",
.scheme = "http",
.authority = "[fe80::818c:da4d:8975:415c\%enp0s25]:8080",
.path = "",
.query = (StringMap) { },
.fragment = "",
};
ASSERT_EQ(parsed, expected);
}
TEST(parseURL, parseIPv6Address) { TEST(parseURL, parseIPv6Address) {
auto s = "http://[2a02:8071:8192:c100:311d:192d:81ac:11ea]:8080"; auto s = "http://[2a02:8071:8192:c100:311d:192d:81ac:11ea]:8080";
auto parsed = parseURL(s); auto parsed = parseURL(s);

View file

@ -8,7 +8,7 @@ namespace nix {
// URI stuff. // URI stuff.
const static std::string pctEncoded = "(?:%[0-9a-fA-F][0-9a-fA-F])"; const static std::string pctEncoded = "(?:%[0-9a-fA-F][0-9a-fA-F])";
const static std::string schemeRegex = "(?:[a-z][a-z0-9+.-]*)"; const static std::string schemeRegex = "(?:[a-z][a-z0-9+.-]*)";
const static std::string ipv6AddressSegmentRegex = "[0-9a-fA-F:]+"; const static std::string ipv6AddressSegmentRegex = "[0-9a-fA-F:]+(?:%\\w+)?";
const static std::string ipv6AddressRegex = "(?:\\[" + ipv6AddressSegmentRegex + "\\]|" + ipv6AddressSegmentRegex + ")"; const static std::string ipv6AddressRegex = "(?:\\[" + ipv6AddressSegmentRegex + "\\]|" + ipv6AddressSegmentRegex + ")";
const static std::string unreservedRegex = "(?:[a-zA-Z0-9-._~])"; const static std::string unreservedRegex = "(?:[a-zA-Z0-9-._~])";
const static std::string subdelimsRegex = "(?:[!$&'\"()*+,;=])"; const static std::string subdelimsRegex = "(?:[!$&'\"()*+,;=])";

View file

@ -1590,7 +1590,7 @@ void startSignalHandlerThread()
updateWindowSize(); updateWindowSize();
if (sigprocmask(SIG_BLOCK, nullptr, &savedSignalMask)) if (sigprocmask(SIG_BLOCK, nullptr, &savedSignalMask))
throw SysError("quering signal mask"); throw SysError("querying signal mask");
sigset_t set; sigset_t set;
sigemptyset(&set); sigemptyset(&set);

View file

@ -447,6 +447,7 @@ static void main_nix_build(int argc, char * * argv)
"unset NIX_ENFORCE_PURITY; " "unset NIX_ENFORCE_PURITY; "
"shopt -u nullglob; " "shopt -u nullglob; "
"unset TZ; %6%" "unset TZ; %6%"
"shopt -s execfail;"
"%7%", "%7%",
shellEscape(tmpDir), shellEscape(tmpDir),
(pure ? "" : "p=$PATH; "), (pure ? "" : "p=$PATH; "),

View file

@ -911,7 +911,7 @@ static void opServe(Strings opFlags, Strings opArgs)
if (GET_PROTOCOL_MINOR(clientVersion) >= 3) if (GET_PROTOCOL_MINOR(clientVersion) >= 3)
out << status.timesBuilt << status.isNonDeterministic << status.startTime << status.stopTime; out << status.timesBuilt << status.isNonDeterministic << status.startTime << status.stopTime;
if (GET_PROTOCOL_MINOR(clientVersion >= 5)) { if (GET_PROTOCOL_MINOR(clientVersion >= 6)) {
worker_proto::write(*store, out, status.builtOutputs); worker_proto::write(*store, out, status.builtOutputs);
} }

View file

@ -81,7 +81,7 @@ path installables are substituted.
Unless `--no-link` is specified, after a successful build, it creates Unless `--no-link` is specified, after a successful build, it creates
symlinks to the store paths of the installables. These symlinks have symlinks to the store paths of the installables. These symlinks have
the prefix `./result` by default; this can be overriden using the the prefix `./result` by default; this can be overridden using the
`--out-link` option. Each symlink has a suffix `-<N>-<outname>`, where `--out-link` option. Each symlink has a suffix `-<N>-<outname>`, where
*N* is the index of the installable (with the left-most installable *N* is the index of the installable (with the left-most installable
having index 0), and *outname* is the symbolic derivation output name having index 0), and *outname* is the symbolic derivation output name

View file

@ -24,7 +24,7 @@ R""(
This command creates a flake in the current directory by copying the This command creates a flake in the current directory by copying the
files of a template. It will not overwrite existing files. The default files of a template. It will not overwrite existing files. The default
template is `templates#defaultTemplate`, but this can be overriden template is `templates#defaultTemplate`, but this can be overridden
using `-t`. using `-t`.
# Template definitions # Template definitions

View file

@ -1,23 +0,0 @@
R""(
# Examples
* Show the inputs of the `hydra` flake:
```console
# nix flake list-inputs github:NixOS/hydra
github:NixOS/hydra/bde8d81876dfc02143e5070e42c78d8f0d83d6f7
├───nix: github:NixOS/nix/79aa7d95183cbe6c0d786965f0dbff414fd1aa67
│ ├───lowdown-src: github:kristapsdz/lowdown/1705b4a26fbf065d9574dce47a94e8c7c79e052f
│ └───nixpkgs: github:NixOS/nixpkgs/ad0d20345219790533ebe06571f82ed6b034db31
└───nixpkgs follows input 'nix/nixpkgs'
```
# Description
This command shows the inputs of the flake specified by the flake
referenced *flake-url*. Since it prints the locked inputs that result
from generating or updating the lock file, this command essentially
displays the contents of the flake's lock file in human-readable form.
)""

View file

@ -5,19 +5,24 @@ R""(
* Show what `nixpkgs` resolves to: * Show what `nixpkgs` resolves to:
```console ```console
# nix flake info nixpkgs # nix flake metadata nixpkgs
Resolved URL: github:NixOS/nixpkgs Resolved URL: github:edolstra/dwarffs
Locked URL: github:NixOS/nixpkgs/b67ba0bfcc714453cdeb8d713e35751eb8b4c8f4 Locked URL: github:edolstra/dwarffs/f691e2c991e75edb22836f1dbe632c40324215c5
Description: A collection of packages for the Nix package manager Description: A filesystem that fetches DWARF debug info from the Internet on demand
Path: /nix/store/23qapccs6cfmwwrlq8kr41vz5vdmns3r-source Path: /nix/store/769s05vjydmc2lcf6b02az28wsa9ixh1-source
Revision: b67ba0bfcc714453cdeb8d713e35751eb8b4c8f4 Revision: f691e2c991e75edb22836f1dbe632c40324215c5
Last modified: 2020-12-23 12:36:12 Last modified: 2021-01-21 15:41:26
Inputs:
├───nix: github:NixOS/nix/6254b1f5d298ff73127d7b0f0da48f142bdc753c
│ ├───lowdown-src: github:kristapsdz/lowdown/1705b4a26fbf065d9574dce47a94e8c7c79e052f
│ └───nixpkgs: github:NixOS/nixpkgs/ad0d20345219790533ebe06571f82ed6b034db31
└───nixpkgs follows input 'nix/nixpkgs'
``` ```
* Show information about `dwarffs` in JSON format: * Show information about `dwarffs` in JSON format:
```console ```console
# nix flake info dwarffs --json | jq . # nix flake metadata dwarffs --json | jq .
{ {
"description": "A filesystem that fetches DWARF debug info from the Internet on demand", "description": "A filesystem that fetches DWARF debug info from the Internet on demand",
"lastModified": 1597153508, "lastModified": 1597153508,
@ -29,6 +34,7 @@ R""(
"rev": "d181d714fd36eb06f4992a1997cd5601e26db8f5", "rev": "d181d714fd36eb06f4992a1997cd5601e26db8f5",
"type": "github" "type": "github"
}, },
"locks": { ... },
"original": { "original": {
"id": "dwarffs", "id": "dwarffs",
"type": "indirect" "type": "indirect"
@ -75,6 +81,9 @@ data. This includes:
time of the commit of the locked flake; for tarball flakes, it's the time of the commit of the locked flake; for tarball flakes, it's the
most recent timestamp of any file inside the tarball. most recent timestamp of any file inside the tarball.
* `Inputs`: The flake inputs with their corresponding lock file
entries.
With `--json`, the output is a JSON object with the following fields: With `--json`, the output is a JSON object with the following fields:
* `original` and `originalUrl`: The flake reference specified by the * `original` and `originalUrl`: The flake reference specified by the
@ -96,4 +105,6 @@ With `--json`, the output is a JSON object with the following fields:
* `lastModified`: See `Last modified` above. * `lastModified`: See `Last modified` above.
* `locks`: The contents of `flake.lock`.
)"" )""

View file

@ -43,12 +43,6 @@ public:
return parseFlakeRef(flakeUrl, absPath(".")); //FIXME return parseFlakeRef(flakeUrl, absPath(".")); //FIXME
} }
Flake getFlake()
{
auto evalState = getEvalState();
return flake::getFlake(*evalState, getFlakeRef(), lockFlags.useRegistries);
}
LockedFlake lockFlake() LockedFlake lockFlake()
{ {
return flake::lockFlake(*getEvalState(), getFlakeRef(), lockFlags); return flake::lockFlake(*getEvalState(), getFlakeRef(), lockFlags);
@ -60,43 +54,6 @@ public:
} }
}; };
static void printFlakeInfo(const Store & store, const Flake & flake)
{
logger->cout("Resolved URL: %s", flake.resolvedRef.to_string());
logger->cout("Locked URL: %s", flake.lockedRef.to_string());
if (flake.description)
logger->cout("Description: %s", *flake.description);
logger->cout("Path: %s", store.printStorePath(flake.sourceInfo->storePath));
if (auto rev = flake.lockedRef.input.getRev())
logger->cout("Revision: %s", rev->to_string(Base16, false));
if (auto revCount = flake.lockedRef.input.getRevCount())
logger->cout("Revisions: %s", *revCount);
if (auto lastModified = flake.lockedRef.input.getLastModified())
logger->cout("Last modified: %s",
std::put_time(std::localtime(&*lastModified), "%F %T"));
}
static nlohmann::json flakeToJSON(const Store & store, const Flake & flake)
{
nlohmann::json j;
if (flake.description)
j["description"] = *flake.description;
j["originalUrl"] = flake.originalRef.to_string();
j["original"] = fetchers::attrsToJSON(flake.originalRef.toAttrs());
j["resolvedUrl"] = flake.resolvedRef.to_string();
j["resolved"] = fetchers::attrsToJSON(flake.resolvedRef.toAttrs());
j["url"] = flake.lockedRef.to_string(); // FIXME: rename to lockedUrl
j["locked"] = fetchers::attrsToJSON(flake.lockedRef.toAttrs());
if (auto rev = flake.lockedRef.input.getRev())
j["revision"] = rev->to_string(Base16, false);
if (auto revCount = flake.lockedRef.input.getRevCount())
j["revCount"] = *revCount;
if (auto lastModified = flake.lockedRef.input.getLastModified())
j["lastModified"] = *lastModified;
j["path"] = store.printStorePath(flake.sourceInfo->storePath);
return j;
}
struct CmdFlakeUpdate : FlakeCommand struct CmdFlakeUpdate : FlakeCommand
{ {
std::string description() override std::string description() override
@ -110,6 +67,7 @@ struct CmdFlakeUpdate : FlakeCommand
removeFlag("recreate-lock-file"); removeFlag("recreate-lock-file");
removeFlag("update-input"); removeFlag("update-input");
removeFlag("no-update-lock-file"); removeFlag("no-update-lock-file");
removeFlag("no-write-lock-file");
} }
std::string doc() override std::string doc() override
@ -124,6 +82,7 @@ struct CmdFlakeUpdate : FlakeCommand
settings.tarballTtl = 0; settings.tarballTtl = 0;
lockFlags.recreateLockFile = true; lockFlags.recreateLockFile = true;
lockFlags.writeLockFile = true;
lockFlake(); lockFlake();
} }
@ -136,6 +95,12 @@ struct CmdFlakeLock : FlakeCommand
return "create missing lock file entries"; return "create missing lock file entries";
} }
CmdFlakeLock()
{
/* Remove flags that don't make sense. */
removeFlag("no-write-lock-file");
}
std::string doc() override std::string doc() override
{ {
return return
@ -147,6 +112,8 @@ struct CmdFlakeLock : FlakeCommand
{ {
settings.tarballTtl = 0; settings.tarballTtl = 0;
lockFlags.writeLockFile = true;
lockFlake(); lockFlake();
} }
}; };
@ -165,54 +132,72 @@ static void enumerateOutputs(EvalState & state, Value & vFlake,
callback(attr.name, *attr.value, *attr.pos); callback(attr.name, *attr.value, *attr.pos);
} }
struct CmdFlakeInfo : FlakeCommand, MixJSON struct CmdFlakeMetadata : FlakeCommand, MixJSON
{ {
std::string description() override std::string description() override
{ {
return "list info about a given flake"; return "show flake metadata";
} }
std::string doc() override std::string doc() override
{ {
return return
#include "flake-info.md" #include "flake-metadata.md"
; ;
} }
void run(nix::ref<nix::Store> store) override void run(nix::ref<nix::Store> store) override
{ {
auto flake = getFlake(); auto lockedFlake = lockFlake();
auto & flake = lockedFlake.flake;
if (json) { if (json) {
auto json = flakeToJSON(*store, flake); nlohmann::json j;
logger->cout("%s", json.dump()); if (flake.description)
} else j["description"] = *flake.description;
printFlakeInfo(*store, flake); j["originalUrl"] = flake.originalRef.to_string();
} j["original"] = fetchers::attrsToJSON(flake.originalRef.toAttrs());
}; j["resolvedUrl"] = flake.resolvedRef.to_string();
j["resolved"] = fetchers::attrsToJSON(flake.resolvedRef.toAttrs());
j["url"] = flake.lockedRef.to_string(); // FIXME: rename to lockedUrl
j["locked"] = fetchers::attrsToJSON(flake.lockedRef.toAttrs());
if (auto rev = flake.lockedRef.input.getRev())
j["revision"] = rev->to_string(Base16, false);
if (auto revCount = flake.lockedRef.input.getRevCount())
j["revCount"] = *revCount;
if (auto lastModified = flake.lockedRef.input.getLastModified())
j["lastModified"] = *lastModified;
j["path"] = store->printStorePath(flake.sourceInfo->storePath);
j["locks"] = lockedFlake.lockFile.toJSON();
logger->cout("%s", j.dump());
} else {
logger->cout(
ANSI_BOLD "Resolved URL:" ANSI_NORMAL " %s",
flake.resolvedRef.to_string());
logger->cout(
ANSI_BOLD "Locked URL:" ANSI_NORMAL " %s",
flake.lockedRef.to_string());
if (flake.description)
logger->cout(
ANSI_BOLD "Description:" ANSI_NORMAL " %s",
*flake.description);
logger->cout(
ANSI_BOLD "Path:" ANSI_NORMAL " %s",
store->printStorePath(flake.sourceInfo->storePath));
if (auto rev = flake.lockedRef.input.getRev())
logger->cout(
ANSI_BOLD "Revision:" ANSI_NORMAL " %s",
rev->to_string(Base16, false));
if (auto revCount = flake.lockedRef.input.getRevCount())
logger->cout(
ANSI_BOLD "Revisions:" ANSI_NORMAL " %s",
*revCount);
if (auto lastModified = flake.lockedRef.input.getLastModified())
logger->cout(
ANSI_BOLD "Last modified:" ANSI_NORMAL " %s",
std::put_time(std::localtime(&*lastModified), "%F %T"));
struct CmdFlakeListInputs : FlakeCommand, MixJSON logger->cout(ANSI_BOLD "Inputs:" ANSI_NORMAL);
{
std::string description() override
{
return "list flake inputs";
}
std::string doc() override
{
return
#include "flake-list-inputs.md"
;
}
void run(nix::ref<nix::Store> store) override
{
auto flake = lockFlake();
if (json)
logger->cout("%s", flake.lockFile.toJSON());
else {
logger->cout("%s", flake.flake.lockedRef);
std::unordered_set<std::shared_ptr<Node>> visited; std::unordered_set<std::shared_ptr<Node>> visited;
@ -226,7 +211,7 @@ struct CmdFlakeListInputs : FlakeCommand, MixJSON
if (auto lockedNode = std::get_if<0>(&input.second)) { if (auto lockedNode = std::get_if<0>(&input.second)) {
logger->cout("%s" ANSI_BOLD "%s" ANSI_NORMAL ": %s", logger->cout("%s" ANSI_BOLD "%s" ANSI_NORMAL ": %s",
prefix + (last ? treeLast : treeConn), input.first, prefix + (last ? treeLast : treeConn), input.first,
*lockedNode ? (*lockedNode)->lockedRef : flake.flake.lockedRef); *lockedNode ? (*lockedNode)->lockedRef : flake.lockedRef);
bool firstVisit = visited.insert(*lockedNode).second; bool firstVisit = visited.insert(*lockedNode).second;
@ -239,12 +224,21 @@ struct CmdFlakeListInputs : FlakeCommand, MixJSON
} }
}; };
visited.insert(flake.lockFile.root); visited.insert(lockedFlake.lockFile.root);
recurse(*flake.lockFile.root, ""); recurse(*lockedFlake.lockFile.root, "");
} }
} }
}; };
struct CmdFlakeInfo : CmdFlakeMetadata
{
void run(nix::ref<nix::Store> store) override
{
warn("'nix flake info' is a deprecated alias for 'nix flake metadata'");
CmdFlakeMetadata::run(store);
}
};
struct CmdFlakeCheck : FlakeCommand struct CmdFlakeCheck : FlakeCommand
{ {
bool build = true; bool build = true;
@ -1038,8 +1032,8 @@ struct CmdFlake : NixMultiCommand
: MultiCommand({ : MultiCommand({
{"update", []() { return make_ref<CmdFlakeUpdate>(); }}, {"update", []() { return make_ref<CmdFlakeUpdate>(); }},
{"lock", []() { return make_ref<CmdFlakeLock>(); }}, {"lock", []() { return make_ref<CmdFlakeLock>(); }},
{"metadata", []() { return make_ref<CmdFlakeMetadata>(); }},
{"info", []() { return make_ref<CmdFlakeInfo>(); }}, {"info", []() { return make_ref<CmdFlakeInfo>(); }},
{"list-inputs", []() { return make_ref<CmdFlakeListInputs>(); }},
{"check", []() { return make_ref<CmdFlakeCheck>(); }}, {"check", []() { return make_ref<CmdFlakeCheck>(); }},
{"init", []() { return make_ref<CmdFlakeInit>(); }}, {"init", []() { return make_ref<CmdFlakeInit>(); }},
{"new", []() { return make_ref<CmdFlakeNew>(); }}, {"new", []() { return make_ref<CmdFlakeNew>(); }},

View file

@ -70,7 +70,7 @@ Here are some examples of flake references in their URL-like representation:
* `/home/alice/src/patchelf`: A flake in some other directory. * `/home/alice/src/patchelf`: A flake in some other directory.
* `nixpkgs`: The `nixpkgs` entry in the flake registry. * `nixpkgs`: The `nixpkgs` entry in the flake registry.
* `nixpkgs/a3a3dda3bacf61e8a39258a0ed9c924eeca8e293`: The `nixpkgs` * `nixpkgs/a3a3dda3bacf61e8a39258a0ed9c924eeca8e293`: The `nixpkgs`
entry in the flake registry, with its Git revision overriden to a entry in the flake registry, with its Git revision overridden to a
specific value. specific value.
* `github:NixOS/nixpkgs`: The `master` branch of the `NixOS/nixpkgs` * `github:NixOS/nixpkgs`: The `master` branch of the `NixOS/nixpkgs`
repository on GitHub. repository on GitHub.
@ -377,7 +377,7 @@ outputs = { self, nixpkgs, grcov }: {
}; };
``` ```
Transitive inputs can be overriden from a `flake.nix` file. For Transitive inputs can be overridden from a `flake.nix` file. For
example, the following overrides the `nixpkgs` input of the `nixops` example, the following overrides the `nixpkgs` input of the `nixops`
input: input:

View file

@ -309,13 +309,13 @@ void mainWrapped(int argc, char * * argv)
if (!args.useNet) { if (!args.useNet) {
// FIXME: should check for command line overrides only. // FIXME: should check for command line overrides only.
if (!settings.useSubstitutes.overriden) if (!settings.useSubstitutes.overridden)
settings.useSubstitutes = false; settings.useSubstitutes = false;
if (!settings.tarballTtl.overriden) if (!settings.tarballTtl.overridden)
settings.tarballTtl = std::numeric_limits<unsigned int>::max(); settings.tarballTtl = std::numeric_limits<unsigned int>::max();
if (!fileTransferSettings.tries.overriden) if (!fileTransferSettings.tries.overridden)
fileTransferSettings.tries = 0; fileTransferSettings.tries = 0;
if (!fileTransferSettings.connectTimeout.overriden) if (!fileTransferSettings.connectTimeout.overridden)
fileTransferSettings.connectTimeout = 1; fileTransferSettings.connectTimeout = 1;
} }

78
src/nix/realisation.cc Normal file
View file

@ -0,0 +1,78 @@
#include "command.hh"
#include "common-args.hh"
#include <nlohmann/json.hpp>
using namespace nix;
struct CmdRealisation : virtual NixMultiCommand
{
CmdRealisation() : MultiCommand(RegisterCommand::getCommandsFor({"realisation"}))
{ }
std::string description() override
{
return "manipulate a Nix realisation";
}
Category category() override { return catUtility; }
void run() override
{
if (!command)
throw UsageError("'nix realisation' requires a sub-command.");
command->second->prepare();
command->second->run();
}
};
static auto rCmdRealisation = registerCommand<CmdRealisation>("realisation");
struct CmdRealisationInfo : RealisedPathsCommand, MixJSON
{
std::string description() override
{
return "query information about one or several realisations";
}
std::string doc() override
{
return
#include "realisation/info.md"
;
}
Category category() override { return catSecondary; }
void run(ref<Store> store, std::vector<RealisedPath> paths) override
{
settings.requireExperimentalFeature("ca-derivations");
if (json) {
nlohmann::json res = nlohmann::json::array();
for (auto & path : paths) {
nlohmann::json currentPath;
if (auto realisation = std::get_if<Realisation>(&path.raw))
currentPath = realisation->toJSON();
else
currentPath["opaquePath"] = store->printStorePath(path.path());
res.push_back(currentPath);
}
std::cout << res.dump();
}
else {
for (auto & path : paths) {
if (auto realisation = std::get_if<Realisation>(&path.raw)) {
std::cout <<
realisation->id.to_string() << " " <<
store->printStorePath(realisation->outPath);
} else
std::cout << store->printStorePath(path.path());
std::cout << std::endl;
}
}
}
};
static auto rCmdRealisationInfo = registerCommand2<CmdRealisationInfo>({"realisation", "info"});

View file

@ -0,0 +1,15 @@
R"MdBoundary(
# Description
Display some informations about the given realisation
# Examples
Show some information about the realisation of the `hello` package:
```console
$ nix realisation info nixpkgs#hello --json
[{"id":"sha256:3d382378a00588e064ee30be96dd0fa7e7df7cf3fbcace85a0e7b7dada1eef25!out","outPath":"fd3m7xawvrqcg98kgz5hc2vk3x9q0lh7-hello"}]
```
)MdBoundary"

View file

@ -27,6 +27,6 @@ the resulting store path and the cryptographic hash of the contents of
the file. the file.
The name component of the store path defaults to the last component of The name component of the store path defaults to the last component of
*url*, but this can be overriden using `--name`. *url*, but this can be overridden using `--name`.
)"" )""

View file

@ -0,0 +1,56 @@
{ busybox }:
with import ./config.nix;
let
mkDerivation = args:
derivation ({
inherit system;
builder = busybox;
args = ["sh" "-e" args.builder or (builtins.toFile "builder-${args.name}.sh" "if [ -e .attrs.sh ]; then source .attrs.sh; fi; eval \"$buildCommand\"")];
outputHashMode = "recursive";
outputHashAlgo = "sha256";
} // removeAttrs args ["builder" "meta"])
// { meta = args.meta or {}; };
input1 = mkDerivation {
shell = busybox;
name = "build-remote-input-1";
buildCommand = "echo FOO > $out";
requiredSystemFeatures = ["foo"];
outputHash = "sha256-FePFYIlMuycIXPZbWi7LGEiMmZSX9FMbaQenWBzm1Sc=";
};
input2 = mkDerivation {
shell = busybox;
name = "build-remote-input-2";
buildCommand = "echo BAR > $out";
requiredSystemFeatures = ["bar"];
outputHash = "sha256-XArauVH91AVwP9hBBQNlkX9ccuPpSYx9o0zeIHb6e+Q=";
};
input3 = mkDerivation {
shell = busybox;
name = "build-remote-input-3";
buildCommand = ''
read x < ${input2}
echo $x BAZ > $out
'';
requiredSystemFeatures = ["baz"];
outputHash = "sha256-daKAcPp/+BYMQsVi/YYMlCKoNAxCNDsaivwSHgQqD2s=";
};
in
mkDerivation {
shell = busybox;
name = "build-remote";
buildCommand =
''
read x < ${input1}
read y < ${input3}
echo "$x $y" > $out
'';
outputHash = "sha256-5SxbkUw6xe2l9TE1uwCvTtTDysD1vhRor38OtDF0LqQ=";
}

View file

@ -0,0 +1,5 @@
source common.sh
file=build-hook-ca-fixed.nix
source build-remote.sh

View file

@ -1,6 +1,6 @@
source common.sh source common.sh
file=build-hook-ca.nix file=build-hook-ca-floating.nix
sed -i 's/experimental-features .*/& ca-derivations/' "$NIX_CONF_DIR"/nix.conf sed -i 's/experimental-features .*/& ca-derivations/' "$NIX_CONF_DIR"/nix.conf

View file

@ -61,7 +61,9 @@ testNixCommand () {
# Disabled until we have it properly working # Disabled until we have it properly working
# testRemoteCache # testRemoteCache
clearStore
testDeterministicCA testDeterministicCA
clearStore
testCutoff testCutoff
testGC testGC
testNixCommand testNixCommand

1
tests/ca/common.sh Normal file
View file

@ -0,0 +1 @@
source ../common.sh

View file

@ -1,4 +1,4 @@
with import ./config.nix; with import ../config.nix;
{ seed ? 0 }: { seed ? 0 }:
# A simple content-addressed derivation. # A simple content-addressed derivation.

39
tests/ca/signatures.sh Normal file
View file

@ -0,0 +1,39 @@
source common.sh
# Globally enable the ca derivations experimental flag
sed -i 's/experimental-features = .*/& ca-derivations ca-references/' "$NIX_CONF_DIR/nix.conf"
clearStore
clearCache
nix-store --generate-binary-cache-key cache1.example.org $TEST_ROOT/sk1 $TEST_ROOT/pk1
pk1=$(cat $TEST_ROOT/pk1)
export REMOTE_STORE_DIR="$TEST_ROOT/remote_store"
export REMOTE_STORE="file://$REMOTE_STORE_DIR"
ensureCorrectlyCopied () {
attrPath="$1"
nix build --store "$REMOTE_STORE" --file ./content-addressed.nix "$attrPath"
}
testOneCopy () {
clearStore
rm -rf "$REMOTE_STORE_DIR"
attrPath="$1"
nix copy --to $REMOTE_STORE "$attrPath" --file ./content-addressed.nix \
--secret-key-files "$TEST_ROOT/sk1"
ensureCorrectlyCopied "$attrPath"
# Ensure that we can copy back what we put in the store
clearStore
nix copy --from $REMOTE_STORE \
--file ./content-addressed.nix "$attrPath" \
--trusted-public-keys $pk1
}
for attrPath in rootCA dependentCA transitivelyDependentCA dependentNonCA dependentFixedOutput; do
testOneCopy "$attrPath"
done

24
tests/ca/substitute.sh Normal file
View file

@ -0,0 +1,24 @@
#!/usr/bin/env bash
# Ensure that binary substitution works properly with ca derivations
source common.sh
sed -i 's/experimental-features .*/& ca-derivations ca-references/' "$NIX_CONF_DIR"/nix.conf
rm -rf $TEST_ROOT/binary_cache
export REMOTE_STORE=file://$TEST_ROOT/binary_cache
buildDrvs () {
nix build --file ./content-addressed.nix -L --no-link "$@"
}
# Populate the remote cache
clearStore
buildDrvs --post-build-hook ../push-to-store.sh
# Restart the build on an empty store, ensuring that we don't build
clearStore
buildDrvs --substitute --substituters $REMOTE_STORE --no-require-sigs -j0

View file

@ -11,7 +11,7 @@ export NIX_LOCALSTATE_DIR=$TEST_ROOT/var
export NIX_LOG_DIR=$TEST_ROOT/var/log/nix export NIX_LOG_DIR=$TEST_ROOT/var/log/nix
export NIX_STATE_DIR=$TEST_ROOT/var/nix export NIX_STATE_DIR=$TEST_ROOT/var/nix
export NIX_CONF_DIR=$TEST_ROOT/etc export NIX_CONF_DIR=$TEST_ROOT/etc
export NIX_DAEMON_SOCKET_PATH=$TEST_ROOT/daemon-socket export NIX_DAEMON_SOCKET_PATH=$TEST_ROOT/dSocket
unset NIX_USER_CONF_FILES unset NIX_USER_CONF_FILES
export _NIX_TEST_SHARED=$TEST_ROOT/shared export _NIX_TEST_SHARED=$TEST_ROOT/shared
if [[ -n $NIX_STORE ]]; then if [[ -n $NIX_STORE ]]; then
@ -29,6 +29,12 @@ unset XDG_CACHE_HOME
mkdir -p $TEST_HOME mkdir -p $TEST_HOME
export PATH=@bindir@:$PATH export PATH=@bindir@:$PATH
if [[ -n "${NIX_CLIENT_PACKAGE:-}" ]]; then
export PATH="$NIX_CLIENT_PACKAGE/bin":$PATH
fi
if [[ -n "${NIX_DAEMON_PACKAGE:-}" ]]; then
export NIX_DAEMON_COMMAND="$NIX_DAEMON_PACKAGE/bin/nix-daemon"
fi
coreutils=@coreutils@ coreutils=@coreutils@
export dot=@dot@ export dot=@dot@
@ -57,7 +63,6 @@ clearStore() {
mkdir "$NIX_STORE_DIR" mkdir "$NIX_STORE_DIR"
rm -rf "$NIX_STATE_DIR" rm -rf "$NIX_STATE_DIR"
mkdir "$NIX_STATE_DIR" mkdir "$NIX_STATE_DIR"
nix-store --init
clearProfiles clearProfiles
} }
@ -73,7 +78,7 @@ startDaemon() {
# Start the daemon, wait for the socket to appear. !!! # Start the daemon, wait for the socket to appear. !!!
# nix-daemon should have an option to fork into the background. # nix-daemon should have an option to fork into the background.
rm -f $NIX_STATE_DIR/daemon-socket/socket rm -f $NIX_STATE_DIR/daemon-socket/socket
nix daemon & ${NIX_DAEMON_COMMAND:-nix daemon} &
for ((i = 0; i < 30; i++)); do for ((i = 0; i < 30; i++)); do
if [ -e $NIX_DAEMON_SOCKET_PATH ]; then break; fi if [ -e $NIX_DAEMON_SOCKET_PATH ]; then break; fi
sleep 1 sleep 1

View file

@ -1,15 +1,41 @@
source common.sh source common.sh
# Isolate the home for this test.
# Other tests (e.g. flake registry tests) could be writing to $HOME in parallel.
export HOME=$TEST_ROOT/userhome
# Test that using XDG_CONFIG_HOME works
# Assert the config folder didn't exist initially.
[ ! -e "$HOME/.config" ]
# Without XDG_CONFIG_HOME, creates $HOME/.config
unset XDG_CONFIG_HOME
# Run against the nix registry to create the config dir
# (Tip: this relies on removing non-existent entries being a no-op!)
nix registry remove userhome-without-xdg
# Verifies it created it
[ -e "$HOME/.config" ]
# Remove the directory it created
rm -rf "$HOME/.config"
# Run the same test, but with XDG_CONFIG_HOME
export XDG_CONFIG_HOME=$TEST_ROOT/confighome
# Assert the XDG_CONFIG_HOME/nix path does not exist yet.
[ ! -e "$TEST_ROOT/confighome/nix" ]
nix registry remove userhome-with-xdg
# Verifies the confighome path has been created
[ -e "$TEST_ROOT/confighome/nix" ]
# Assert the .config folder hasn't been created.
[ ! -e "$HOME/.config" ]
# Test that files are loaded from XDG by default # Test that files are loaded from XDG by default
export XDG_CONFIG_HOME=/tmp/home export XDG_CONFIG_HOME=$TEST_ROOT/confighome
export XDG_CONFIG_DIRS=/tmp/dir1:/tmp/dir2 export XDG_CONFIG_DIRS=$TEST_ROOT/dir1:$TEST_ROOT/dir2
files=$(nix-build --verbose --version | grep "User config" | cut -d ':' -f2- | xargs) files=$(nix-build --verbose --version | grep "User config" | cut -d ':' -f2- | xargs)
[[ $files == "/tmp/home/nix/nix.conf:/tmp/dir1/nix/nix.conf:/tmp/dir2/nix/nix.conf" ]] [[ $files == "$TEST_ROOT/confighome/nix/nix.conf:$TEST_ROOT/dir1/nix/nix.conf:$TEST_ROOT/dir2/nix/nix.conf" ]]
# Test that setting NIX_USER_CONF_FILES overrides all the default user config files # Test that setting NIX_USER_CONF_FILES overrides all the default user config files
export NIX_USER_CONF_FILES=/tmp/file1.conf:/tmp/file2.conf export NIX_USER_CONF_FILES=$TEST_ROOT/file1.conf:$TEST_ROOT/file2.conf
files=$(nix-build --verbose --version | grep "User config" | cut -d ':' -f2- | xargs) files=$(nix-build --verbose --version | grep "User config" | cut -d ':' -f2- | xargs)
[[ $files == "/tmp/file1.conf:/tmp/file2.conf" ]] [[ $files == "$TEST_ROOT/file1.conf:$TEST_ROOT/file2.conf" ]]
# Test that it's possible to load the config from a custom location # Test that it's possible to load the config from a custom location
here=$(readlink -f "$(dirname "${BASH_SOURCE[0]}")") here=$(readlink -f "$(dirname "${BASH_SOURCE[0]}")")

26
tests/db-migration.sh Normal file
View file

@ -0,0 +1,26 @@
# Test that we can successfully migrate from an older db schema
# Only run this if we have an older Nix available
# XXX: This assumes that the `daemon` package is older than the `client` one
if [[ -z "$NIX_DAEMON_PACKAGE" ]]; then
exit 0
fi
source common.sh
# Fill the db using the older Nix
PATH_WITH_NEW_NIX="$PATH"
export PATH="$NIX_DAEMON_PACKAGE/bin:$PATH"
clearStore
nix-build simple.nix --no-out-link
nix-store --generate-binary-cache-key cache1.example.org $TEST_ROOT/sk1 $TEST_ROOT/pk1
dependenciesOutPath=$(nix-build dependencies.nix --no-out-link --secret-key-files "$TEST_ROOT/sk1")
fixedOutPath=$(IMPURE_VAR1=foo IMPURE_VAR2=bar nix-build fixed.nix -A good.0 --no-out-link)
# Migrate to the new schema and ensure that everything's there
export PATH="$PATH_WITH_NEW_NIX"
info=$(nix path-info --json $dependenciesOutPath)
[[ $info =~ '"ultimate":true' ]]
[[ $info =~ 'cache1.example.org' ]]
nix verify -r "$fixedOutPath"
nix verify -r "$dependenciesOutPath" --sigs-needed 1 --trusted-public-keys $(cat $TEST_ROOT/pk1)

View file

@ -179,3 +179,13 @@ git clone --depth 1 file://$repo $TEST_ROOT/shallow
path6=$(nix eval --impure --raw --expr "(builtins.fetchTree { type = \"git\"; url = \"file://$TEST_ROOT/shallow\"; ref = \"dev\"; shallow = true; }).outPath") path6=$(nix eval --impure --raw --expr "(builtins.fetchTree { type = \"git\"; url = \"file://$TEST_ROOT/shallow\"; ref = \"dev\"; shallow = true; }).outPath")
[[ $path3 = $path6 ]] [[ $path3 = $path6 ]]
[[ $(nix eval --impure --expr "(builtins.fetchTree { type = \"git\"; url = \"file://$TEST_ROOT/shallow\"; ref = \"dev\"; shallow = true; }).revCount or 123") == 123 ]] [[ $(nix eval --impure --expr "(builtins.fetchTree { type = \"git\"; url = \"file://$TEST_ROOT/shallow\"; ref = \"dev\"; shallow = true; }).revCount or 123") == 123 ]]
# Explicit ref = "HEAD" should work, and produce the same outPath as without ref
path7=$(nix eval --impure --raw --expr "(builtins.fetchGit { url = \"file://$repo\"; ref = \"HEAD\"; }).outPath")
path8=$(nix eval --impure --raw --expr "(builtins.fetchGit { url = \"file://$repo\"; }).outPath")
[[ $path7 = $path8 ]]
# ref = "HEAD" should fetch the HEAD revision
rev4=$(git -C $repo rev-parse HEAD)
rev4_nix=$(nix eval --impure --raw --expr "(builtins.fetchGit { url = \"file://$repo\"; ref = \"HEAD\"; }).rev")
[[ $rev4 = $rev4_nix ]]

View file

@ -25,6 +25,7 @@ templatesDir=$TEST_ROOT/templates
nonFlakeDir=$TEST_ROOT/nonFlake nonFlakeDir=$TEST_ROOT/nonFlake
flakeA=$TEST_ROOT/flakeA flakeA=$TEST_ROOT/flakeA
flakeB=$TEST_ROOT/flakeB flakeB=$TEST_ROOT/flakeB
flakeGitBare=$TEST_ROOT/flakeGitBare
for repo in $flake1Dir $flake2Dir $flake3Dir $flake7Dir $templatesDir $nonFlakeDir $flakeA $flakeB; do for repo in $flake1Dir $flake2Dir $flake3Dir $flake7Dir $templatesDir $nonFlakeDir $flakeA $flakeB; do
rm -rf $repo $repo.tmp rm -rf $repo $repo.tmp
@ -163,16 +164,17 @@ EOF
# Test 'nix flake list'. # Test 'nix flake list'.
[[ $(nix registry list | wc -l) == 7 ]] [[ $(nix registry list | wc -l) == 7 ]]
# Test 'nix flake info'. # Test 'nix flake metadata'.
nix flake info flake1 | grep -q 'URL: .*flake1.*' nix flake metadata flake1
nix flake metadata flake1 | grep -q 'Locked URL:.*flake1.*'
# Test 'nix flake info' on a local flake. # Test 'nix flake metadata' on a local flake.
(cd $flake1Dir && nix flake info) | grep -q 'URL: .*flake1.*' (cd $flake1Dir && nix flake metadata) | grep -q 'URL:.*flake1.*'
(cd $flake1Dir && nix flake info .) | grep -q 'URL: .*flake1.*' (cd $flake1Dir && nix flake metadata .) | grep -q 'URL:.*flake1.*'
nix flake info $flake1Dir | grep -q 'URL: .*flake1.*' nix flake metadata $flake1Dir | grep -q 'URL:.*flake1.*'
# Test 'nix flake info --json'. # Test 'nix flake metadata --json'.
json=$(nix flake info flake1 --json | jq .) json=$(nix flake metadata flake1 --json | jq .)
[[ $(echo "$json" | jq -r .description) = 'Bla bla' ]] [[ $(echo "$json" | jq -r .description) = 'Bla bla' ]]
[[ -d $(echo "$json" | jq -r .path) ]] [[ -d $(echo "$json" | jq -r .path) ]]
[[ $(echo "$json" | jq -r .lastModified) = $(git -C $flake1Dir log -n1 --format=%ct) ]] [[ $(echo "$json" | jq -r .lastModified) = $(git -C $flake1Dir log -n1 --format=%ct) ]]
@ -180,7 +182,7 @@ hash1=$(echo "$json" | jq -r .revision)
echo -n '# foo' >> $flake1Dir/flake.nix echo -n '# foo' >> $flake1Dir/flake.nix
git -C $flake1Dir commit -a -m 'Foo' git -C $flake1Dir commit -a -m 'Foo'
hash2=$(nix flake info flake1 --json --refresh | jq -r .revision) hash2=$(nix flake metadata flake1 --json --refresh | jq -r .revision)
[[ $hash1 != $hash2 ]] [[ $hash1 != $hash2 ]]
# Test 'nix build' on a flake. # Test 'nix build' on a flake.
@ -604,6 +606,11 @@ nix flake update $flake3Dir
[[ $(jq -c .nodes.flake2.inputs.flake1 $flake3Dir/flake.lock) =~ '["foo"]' ]] [[ $(jq -c .nodes.flake2.inputs.flake1 $flake3Dir/flake.lock) =~ '["foo"]' ]]
[[ $(jq .nodes.foo.locked.url $flake3Dir/flake.lock) =~ flake7 ]] [[ $(jq .nodes.foo.locked.url $flake3Dir/flake.lock) =~ flake7 ]]
# Test git+file with bare repo.
rm -rf $flakeGitBare
git clone --bare $flake1Dir $flakeGitBare
nix build -o $TEST_ROOT/result git+file://$flakeGitBare
# Test Mercurial flakes. # Test Mercurial flakes.
rm -rf $flake5Dir rm -rf $flake5Dir
hg init $flake5Dir hg init $flake5Dir
@ -624,7 +631,7 @@ hg commit --config ui.username=foobar@example.org $flake5Dir -m 'Initial commit'
nix build -o $TEST_ROOT/result hg+file://$flake5Dir nix build -o $TEST_ROOT/result hg+file://$flake5Dir
[[ -e $TEST_ROOT/result/hello ]] [[ -e $TEST_ROOT/result/hello ]]
(! nix flake info --json hg+file://$flake5Dir | jq -e -r .revision) (! nix flake metadata --json hg+file://$flake5Dir | jq -e -r .revision)
nix eval hg+file://$flake5Dir#expr nix eval hg+file://$flake5Dir#expr
@ -632,13 +639,13 @@ nix eval hg+file://$flake5Dir#expr
(! nix eval hg+file://$flake5Dir#expr --no-allow-dirty) (! nix eval hg+file://$flake5Dir#expr --no-allow-dirty)
(! nix flake info --json hg+file://$flake5Dir | jq -e -r .revision) (! nix flake metadata --json hg+file://$flake5Dir | jq -e -r .revision)
hg commit --config ui.username=foobar@example.org $flake5Dir -m 'Add lock file' hg commit --config ui.username=foobar@example.org $flake5Dir -m 'Add lock file'
nix flake info --json hg+file://$flake5Dir --refresh | jq -e -r .revision nix flake metadata --json hg+file://$flake5Dir --refresh | jq -e -r .revision
nix flake info --json hg+file://$flake5Dir nix flake metadata --json hg+file://$flake5Dir
[[ $(nix flake info --json hg+file://$flake5Dir | jq -e -r .revCount) = 1 ]] [[ $(nix flake metadata --json hg+file://$flake5Dir | jq -e -r .revCount) = 1 ]]
nix build -o $TEST_ROOT/result hg+file://$flake5Dir --no-registries --no-allow-dirty nix build -o $TEST_ROOT/result hg+file://$flake5Dir --no-registries --no-allow-dirty
@ -648,7 +655,7 @@ tar cfz $TEST_ROOT/flake.tar.gz -C $TEST_ROOT --exclude .hg flake5
nix build -o $TEST_ROOT/result file://$TEST_ROOT/flake.tar.gz nix build -o $TEST_ROOT/result file://$TEST_ROOT/flake.tar.gz
# Building with a tarball URL containing a SRI hash should also work. # Building with a tarball URL containing a SRI hash should also work.
url=$(nix flake info --json file://$TEST_ROOT/flake.tar.gz | jq -r .url) url=$(nix flake metadata --json file://$TEST_ROOT/flake.tar.gz | jq -r .url)
[[ $url =~ sha256- ]] [[ $url =~ sha256- ]]
nix build -o $TEST_ROOT/result $url nix build -o $TEST_ROOT/result $url
@ -674,9 +681,8 @@ nix flake lock $flake3Dir
nix flake lock $flake3Dir --update-input flake2/flake1 nix flake lock $flake3Dir --update-input flake2/flake1
[[ $(jq -r .nodes.flake1_2.locked.rev $flake3Dir/flake.lock) =~ $hash2 ]] [[ $(jq -r .nodes.flake1_2.locked.rev $flake3Dir/flake.lock) =~ $hash2 ]]
# Test 'nix flake list-inputs'. # Test 'nix flake metadata --json'.
[[ $(nix flake list-inputs $flake3Dir | wc -l) == 5 ]] nix flake metadata $flake3Dir --json | jq .
nix flake list-inputs $flake3Dir --json | jq .
# Test circular flake dependencies. # Test circular flake dependencies.
cat > $flakeA/flake.nix <<EOF cat > $flakeA/flake.nix <<EOF
@ -715,4 +721,4 @@ git -C $flakeB commit -a -m 'Foo'
[[ $(nix eval --update-input b $flakeA#foo) = 1912 ]] [[ $(nix eval --update-input b $flakeA#foo) = 1912 ]]
# Test list-inputs with circular dependencies # Test list-inputs with circular dependencies
nix flake list-inputs $flakeA nix flake metadata $flakeA

View file

@ -7,6 +7,7 @@ nix_tests = \
referrers.sh user-envs.sh logging.sh nix-build.sh misc.sh fixed.sh \ referrers.sh user-envs.sh logging.sh nix-build.sh misc.sh fixed.sh \
gc-runtime.sh check-refs.sh filter-source.sh \ gc-runtime.sh check-refs.sh filter-source.sh \
local-store.sh remote-store.sh export.sh export-graph.sh \ local-store.sh remote-store.sh export.sh export-graph.sh \
db-migration.sh \
timeout.sh secure-drv-outputs.sh nix-channel.sh \ timeout.sh secure-drv-outputs.sh nix-channel.sh \
multiple-outputs.sh import-derivation.sh fetchurl.sh optimise-store.sh \ multiple-outputs.sh import-derivation.sh fetchurl.sh optimise-store.sh \
binary-cache.sh \ binary-cache.sh \
@ -17,6 +18,7 @@ nix_tests = \
linux-sandbox.sh \ linux-sandbox.sh \
build-dry.sh \ build-dry.sh \
build-remote-input-addressed.sh \ build-remote-input-addressed.sh \
build-remote-content-addressed-fixed.sh \
build-remote-content-addressed-floating.sh \ build-remote-content-addressed-floating.sh \
ssh-relay.sh \ ssh-relay.sh \
nar-access.sh \ nar-access.sh \
@ -38,11 +40,13 @@ nix_tests = \
recursive.sh \ recursive.sh \
describe-stores.sh \ describe-stores.sh \
flakes.sh \ flakes.sh \
content-addressed.sh \
nix-copy-content-addressed.sh \
text-hashed-output.sh \ text-hashed-output.sh \
build.sh \ build.sh \
compute-levels.sh compute-levels.sh \
ca/build.sh \
ca/substitute.sh \
ca/signatures.sh \
ca/nix-copy.sh
# parallel.sh # parallel.sh
install-tests += $(foreach x, $(nix_tests), tests/$(x)) install-tests += $(foreach x, $(nix_tests), tests/$(x))

View file

@ -1,4 +1,6 @@
#!/bin/sh #!/bin/sh
echo Pushing "$@" to "$REMOTE_STORE" set -x
printf "%s" "$OUT_PATHS" | xargs -d: nix copy --to "$REMOTE_STORE" --no-require-sigs
echo Pushing "$OUT_PATHS" to "$REMOTE_STORE"
printf "%s" "$DRV_PATH" | xargs nix copy --to "$REMOTE_STORE" --no-require-sigs

View file

@ -23,12 +23,12 @@ startDaemon
storeCleared=1 NIX_REMOTE_=$NIX_REMOTE $SHELL ./user-envs.sh storeCleared=1 NIX_REMOTE_=$NIX_REMOTE $SHELL ./user-envs.sh
nix-store --gc --max-freed 1K
nix-store --dump-db > $TEST_ROOT/d1 nix-store --dump-db > $TEST_ROOT/d1
NIX_REMOTE= nix-store --dump-db > $TEST_ROOT/d2 NIX_REMOTE= nix-store --dump-db > $TEST_ROOT/d2
cmp $TEST_ROOT/d1 $TEST_ROOT/d2 cmp $TEST_ROOT/d1 $TEST_ROOT/d2
nix-store --gc --max-freed 1K
killDaemon killDaemon
user=$(whoami) user=$(whoami)