Merge branch 'master' into debug-merge

This commit is contained in:
Ben Burdette 2021-11-25 08:53:59 -07:00
commit 64c4ba8f66
348 changed files with 20809 additions and 9834 deletions

View file

@ -3,7 +3,7 @@
- Thanks for your contribution! - Thanks for your contribution!
- To remove the stale label, just leave a new comment. - To remove the stale label, just leave a new comment.
- _How to find the right people to ping?_ → [`git blame`](https://git-scm.com/docs/git-blame) to the rescue! (or GitHub's history and blame buttons.) - _How to find the right people to ping?_ → [`git blame`](https://git-scm.com/docs/git-blame) to the rescue! (or GitHub's history and blame buttons.)
- You can always ask for help on [our Discourse Forum](https://discourse.nixos.org/) or on the [#nixos IRC channel](https://webchat.freenode.net/#nixos). - You can always ask for help on [our Discourse Forum](https://discourse.nixos.org/) or on [Matrix - #nix:nixos.org](https://matrix.to/#/#nix:nixos.org).
## Suggestions for PRs ## Suggestions for PRs

26
.github/workflows/backport.yml vendored Normal file
View file

@ -0,0 +1,26 @@
name: Backport
on:
pull_request_target:
types: [closed, labeled]
jobs:
backport:
name: Backport Pull Request
if: github.repository_owner == 'NixOS' && github.event.pull_request.merged == true && (github.event_name != 'labeled' || startsWith('backport', github.event.label.name))
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
with:
ref: ${{ github.event.pull_request.head.sha }}
# required to find all branches
fetch-depth: 0
- name: Create backport PRs
# should be kept in sync with `version`
uses: zeebe-io/backport-action@v0.0.7
with:
# Config README: https://github.com/zeebe-io/backport-action#backport-action
github_token: ${{ secrets.GITHUB_TOKEN }}
github_workspace: ${{ github.workspace }}
pull_description: |-
Bot-based backport to `${target_branch}`, triggered by a label in #${pull_number}.
# should be kept in sync with `uses`
version: v0.0.5

View file

@ -1,27 +1,32 @@
name: "Test" name: "Test"
on: on:
pull_request: pull_request:
push: push:
jobs: jobs:
tests: tests:
needs: [check_cachix]
strategy: strategy:
matrix: matrix:
os: [ubuntu-latest, macos-latest] os: [ubuntu-latest, macos-latest]
runs-on: ${{ matrix.os }} runs-on: ${{ matrix.os }}
timeout-minutes: 60
steps: steps:
- uses: actions/checkout@v2.3.4 - uses: actions/checkout@v2.4.0
with: with:
fetch-depth: 0 fetch-depth: 0
- uses: cachix/install-nix-action@v13 - uses: cachix/install-nix-action@v16
- run: echo CACHIX_NAME="$(echo $GITHUB_REPOSITORY-install-tests | tr "[A-Z]/" "[a-z]-")" >> $GITHUB_ENV - run: echo CACHIX_NAME="$(echo $GITHUB_REPOSITORY-install-tests | tr "[A-Z]/" "[a-z]-")" >> $GITHUB_ENV
- uses: cachix/cachix-action@v10 - uses: cachix/cachix-action@v10
if: needs.check_cachix.outputs.secret == 'true'
with: with:
name: '${{ env.CACHIX_NAME }}' name: '${{ env.CACHIX_NAME }}'
signingKey: '${{ secrets.CACHIX_SIGNING_KEY }}' signingKey: '${{ secrets.CACHIX_SIGNING_KEY }}'
authToken: '${{ secrets.CACHIX_AUTH_TOKEN }}' authToken: '${{ secrets.CACHIX_AUTH_TOKEN }}'
#- run: nix flake check - run: nix-build -A checks.$(nix-instantiate --eval -E '(builtins.currentSystem)')
- run: nix-build -A checks.$(if [[ `uname` = Linux ]]; then echo x86_64-linux; else echo x86_64-darwin; fi)
check_cachix: check_cachix:
name: Cachix secret present for installer tests name: Cachix secret present for installer tests
runs-on: ubuntu-latest runs-on: ubuntu-latest
@ -33,6 +38,7 @@ jobs:
env: env:
_CACHIX_SECRETS: ${{ secrets.CACHIX_SIGNING_KEY }}${{ secrets.CACHIX_AUTH_TOKEN }} _CACHIX_SECRETS: ${{ secrets.CACHIX_SIGNING_KEY }}${{ secrets.CACHIX_AUTH_TOKEN }}
run: echo "::set-output name=secret::${{ env._CACHIX_SECRETS != '' }}" run: echo "::set-output name=secret::${{ env._CACHIX_SECRETS != '' }}"
installer: installer:
needs: [tests, check_cachix] needs: [tests, check_cachix]
if: github.event_name == 'push' && needs.check_cachix.outputs.secret == 'true' if: github.event_name == 'push' && needs.check_cachix.outputs.secret == 'true'
@ -40,11 +46,11 @@ jobs:
outputs: outputs:
installerURL: ${{ steps.prepare-installer.outputs.installerURL }} installerURL: ${{ steps.prepare-installer.outputs.installerURL }}
steps: steps:
- uses: actions/checkout@v2.3.4 - uses: actions/checkout@v2.4.0
with: with:
fetch-depth: 0 fetch-depth: 0
- run: echo CACHIX_NAME="$(echo $GITHUB_REPOSITORY-install-tests | tr "[A-Z]/" "[a-z]-")" >> $GITHUB_ENV - run: echo CACHIX_NAME="$(echo $GITHUB_REPOSITORY-install-tests | tr "[A-Z]/" "[a-z]-")" >> $GITHUB_ENV
- uses: cachix/install-nix-action@v13 - uses: cachix/install-nix-action@v16
- uses: cachix/cachix-action@v10 - uses: cachix/cachix-action@v10
with: with:
name: '${{ env.CACHIX_NAME }}' name: '${{ env.CACHIX_NAME }}'
@ -52,6 +58,7 @@ jobs:
authToken: '${{ secrets.CACHIX_AUTH_TOKEN }}' authToken: '${{ secrets.CACHIX_AUTH_TOKEN }}'
- id: prepare-installer - id: prepare-installer
run: scripts/prepare-installer-for-github-actions run: scripts/prepare-installer-for-github-actions
installer_test: installer_test:
needs: [installer, check_cachix] needs: [installer, check_cachix]
if: github.event_name == 'push' && needs.check_cachix.outputs.secret == 'true' if: github.event_name == 'push' && needs.check_cachix.outputs.secret == 'true'
@ -60,9 +67,9 @@ jobs:
os: [ubuntu-latest, macos-latest] os: [ubuntu-latest, macos-latest]
runs-on: ${{ matrix.os }} runs-on: ${{ matrix.os }}
steps: steps:
- uses: actions/checkout@v2.3.4 - uses: actions/checkout@v2.4.0
- run: echo CACHIX_NAME="$(echo $GITHUB_REPOSITORY-install-tests | tr "[A-Z]/" "[a-z]-")" >> $GITHUB_ENV - run: echo CACHIX_NAME="$(echo $GITHUB_REPOSITORY-install-tests | tr "[A-Z]/" "[a-z]-")" >> $GITHUB_ENV
- uses: cachix/install-nix-action@v13 - uses: cachix/install-nix-action@v16
with: with:
install_url: '${{needs.installer.outputs.installerURL}}' install_url: '${{needs.installer.outputs.installerURL}}'
install_options: "--tarball-url-prefix https://${{ env.CACHIX_NAME }}.cachix.org/serve" install_options: "--tarball-url-prefix https://${{ env.CACHIX_NAME }}.cachix.org/serve"

9
.gitignore vendored
View file

@ -15,6 +15,7 @@ perl/Makefile.config
/doc/manual/*.1 /doc/manual/*.1
/doc/manual/*.5 /doc/manual/*.5
/doc/manual/*.8 /doc/manual/*.8
/doc/manual/generated/*
/doc/manual/nix.json /doc/manual/nix.json
/doc/manual/conf-file.json /doc/manual/conf-file.json
/doc/manual/builtins.json /doc/manual/builtins.json
@ -25,8 +26,6 @@ perl/Makefile.config
# /scripts/ # /scripts/
/scripts/nix-profile.sh /scripts/nix-profile.sh
/scripts/nix-reduce-build
/scripts/nix-http-export.cgi
/scripts/nix-profile-daemon.sh /scripts/nix-profile-daemon.sh
# /src/libexpr/ # /src/libexpr/
@ -39,6 +38,7 @@ perl/Makefile.config
# /src/libstore/ # /src/libstore/
*.gen.* *.gen.*
/src/libstore/tests/libstore-tests
# /src/libutil/ # /src/libutil/
/src/libutil/tests/libutil-tests /src/libutil/tests/libutil-tests
@ -56,9 +56,6 @@ perl/Makefile.config
/src/nix-prefetch-url/nix-prefetch-url /src/nix-prefetch-url/nix-prefetch-url
# /src/nix-daemon/
/src/nix-daemon/nix-daemon
/src/nix-collect-garbage/nix-collect-garbage /src/nix-collect-garbage/nix-collect-garbage
# /src/nix-channel/ # /src/nix-channel/
@ -76,12 +73,12 @@ perl/Makefile.config
# /tests/ # /tests/
/tests/test-tmp /tests/test-tmp
/tests/common.sh /tests/common.sh
/tests/dummy
/tests/result* /tests/result*
/tests/restricted-innocent /tests/restricted-innocent
/tests/shell /tests/shell
/tests/shell.drv /tests/shell.drv
/tests/config.nix /tests/config.nix
/tests/ca/config.nix
# /tests/lang/ # /tests/lang/
/tests/lang/*.out /tests/lang/*.out

View file

@ -1 +1 @@
2.4 2.5

View file

@ -4,6 +4,7 @@ makefiles = \
src/libutil/local.mk \ src/libutil/local.mk \
src/libutil/tests/local.mk \ src/libutil/tests/local.mk \
src/libstore/local.mk \ src/libstore/local.mk \
src/libstore/tests/local.mk \
src/libfetchers/local.mk \ src/libfetchers/local.mk \
src/libmain/local.mk \ src/libmain/local.mk \
src/libexpr/local.mk \ src/libexpr/local.mk \
@ -12,6 +13,8 @@ makefiles = \
src/resolve-system-dependencies/local.mk \ src/resolve-system-dependencies/local.mk \
scripts/local.mk \ scripts/local.mk \
misc/bash/local.mk \ misc/bash/local.mk \
misc/fish/local.mk \
misc/zsh/local.mk \
misc/systemd/local.mk \ misc/systemd/local.mk \
misc/launchd/local.mk \ misc/launchd/local.mk \
misc/upstart/local.mk \ misc/upstart/local.mk \
@ -32,4 +35,4 @@ endif
include mk/lib.mk include mk/lib.mk
# GLOBAL_CXXFLAGS += -g -Wall -include config.h -std=c++17 -fstack-usage # GLOBAL_CXXFLAGS += -g -Wall -include config.h -std=c++17 -fstack-usage
GLOBAL_CXXFLAGS += -g -Wall -include config.h -std=c++17 GLOBAL_CXXFLAGS += -g -Wall -include config.h -std=c++17 -I src

View file

@ -1,3 +1,4 @@
HOST_OS = @host_os@
AR = @AR@ AR = @AR@
BDW_GC_LIBS = @BDW_GC_LIBS@ BDW_GC_LIBS = @BDW_GC_LIBS@
BOOST_LDFLAGS = @BOOST_LDFLAGS@ BOOST_LDFLAGS = @BOOST_LDFLAGS@

View file

@ -28,7 +28,8 @@ build nix from source with nix-build or how to get a development environment.
- [Nix manual](https://nixos.org/nix/manual) - [Nix manual](https://nixos.org/nix/manual)
- [Nix jobsets on hydra.nixos.org](https://hydra.nixos.org/project/nix) - [Nix jobsets on hydra.nixos.org](https://hydra.nixos.org/project/nix)
- [NixOS Discourse](https://discourse.nixos.org/) - [NixOS Discourse](https://discourse.nixos.org/)
- [IRC - #nixos on freenode.net](irc://irc.freenode.net/#nixos) - [Matrix - #nix:nixos.org](https://matrix.to/#/#nix:nixos.org)
- [IRC - #nixos on libera.chat](irc://irc.libera.chat/#nixos)
## License ## License

View file

@ -0,0 +1,45 @@
diff --git a/pthread_stop_world.c b/pthread_stop_world.c
index 4b2c429..1fb4c52 100644
--- a/pthread_stop_world.c
+++ b/pthread_stop_world.c
@@ -673,6 +673,8 @@ GC_INNER void GC_push_all_stacks(void)
struct GC_traced_stack_sect_s *traced_stack_sect;
pthread_t self = pthread_self();
word total_size = 0;
+ size_t stack_limit;
+ pthread_attr_t pattr;
if (!EXPECT(GC_thr_initialized, TRUE))
GC_thr_init();
@@ -722,6 +724,31 @@ GC_INNER void GC_push_all_stacks(void)
hi = p->altstack + p->altstack_size;
/* FIXME: Need to scan the normal stack too, but how ? */
/* FIXME: Assume stack grows down */
+ } else {
+ if (pthread_getattr_np(p->id, &pattr)) {
+ ABORT("GC_push_all_stacks: pthread_getattr_np failed!");
+ }
+ if (pthread_attr_getstacksize(&pattr, &stack_limit)) {
+ ABORT("GC_push_all_stacks: pthread_attr_getstacksize failed!");
+ }
+ if (pthread_attr_destroy(&pattr)) {
+ ABORT("GC_push_all_stacks: pthread_attr_destroy failed!");
+ }
+ // When a thread goes into a coroutine, we lose its original sp until
+ // control flow returns to the thread.
+ // While in the coroutine, the sp points outside the thread stack,
+ // so we can detect this and push the entire thread stack instead,
+ // as an approximation.
+ // We assume that the coroutine has similarly added its entire stack.
+ // This could be made accurate by cooperating with the application
+ // via new functions and/or callbacks.
+ #ifndef STACK_GROWS_UP
+ if (lo >= hi || lo < hi - stack_limit) { // sp outside stack
+ lo = hi - stack_limit;
+ }
+ #else
+ #error "STACK_GROWS_UP not supported in boost_coroutine2 (as of june 2021), so we don't support it in Nix."
+ #endif
}
GC_push_all_stack_sections(lo, hi, traced_stack_sect);
# ifdef STACK_GROWS_UP

20
config/config.guess vendored
View file

@ -1,8 +1,8 @@
#! /bin/sh #! /bin/sh
# Attempt to guess a canonical system name. # Attempt to guess a canonical system name.
# Copyright 1992-2020 Free Software Foundation, Inc. # Copyright 1992-2021 Free Software Foundation, Inc.
timestamp='2020-11-19' timestamp='2021-01-25'
# This file is free software; you can redistribute it and/or modify it # This file is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by # under the terms of the GNU General Public License as published by
@ -50,7 +50,7 @@ version="\
GNU config.guess ($timestamp) GNU config.guess ($timestamp)
Originally written by Per Bothner. Originally written by Per Bothner.
Copyright 1992-2020 Free Software Foundation, Inc. Copyright 1992-2021 Free Software Foundation, Inc.
This is free software; see the source for copying conditions. There is NO This is free software; see the source for copying conditions. There is NO
warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE." warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE."
@ -188,10 +188,9 @@ case "$UNAME_MACHINE:$UNAME_SYSTEM:$UNAME_RELEASE:$UNAME_VERSION" in
# #
# Note: NetBSD doesn't particularly care about the vendor # Note: NetBSD doesn't particularly care about the vendor
# portion of the name. We always set it to "unknown". # portion of the name. We always set it to "unknown".
sysctl="sysctl -n hw.machine_arch"
UNAME_MACHINE_ARCH=$( (uname -p 2>/dev/null || \ UNAME_MACHINE_ARCH=$( (uname -p 2>/dev/null || \
"/sbin/$sysctl" 2>/dev/null || \ /sbin/sysctl -n hw.machine_arch 2>/dev/null || \
"/usr/sbin/$sysctl" 2>/dev/null || \ /usr/sbin/sysctl -n hw.machine_arch 2>/dev/null || \
echo unknown)) echo unknown))
case "$UNAME_MACHINE_ARCH" in case "$UNAME_MACHINE_ARCH" in
aarch64eb) machine=aarch64_be-unknown ;; aarch64eb) machine=aarch64_be-unknown ;;
@ -996,6 +995,9 @@ EOF
k1om:Linux:*:*) k1om:Linux:*:*)
echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" echo "$UNAME_MACHINE"-unknown-linux-"$LIBC"
exit ;; exit ;;
loongarch32:Linux:*:* | loongarch64:Linux:*:* | loongarchx32:Linux:*:*)
echo "$UNAME_MACHINE"-unknown-linux-"$LIBC"
exit ;;
m32r*:Linux:*:*) m32r*:Linux:*:*)
echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" echo "$UNAME_MACHINE"-unknown-linux-"$LIBC"
exit ;; exit ;;
@ -1084,7 +1086,7 @@ EOF
ppcle:Linux:*:*) ppcle:Linux:*:*)
echo powerpcle-unknown-linux-"$LIBC" echo powerpcle-unknown-linux-"$LIBC"
exit ;; exit ;;
riscv32:Linux:*:* | riscv64:Linux:*:*) riscv32:Linux:*:* | riscv32be:Linux:*:* | riscv64:Linux:*:* | riscv64be:Linux:*:*)
echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" echo "$UNAME_MACHINE"-unknown-linux-"$LIBC"
exit ;; exit ;;
s390:Linux:*:* | s390x:Linux:*:*) s390:Linux:*:* | s390x:Linux:*:*)
@ -1480,8 +1482,8 @@ EOF
i*86:rdos:*:*) i*86:rdos:*:*)
echo "$UNAME_MACHINE"-pc-rdos echo "$UNAME_MACHINE"-pc-rdos
exit ;; exit ;;
i*86:AROS:*:*) *:AROS:*:*)
echo "$UNAME_MACHINE"-pc-aros echo "$UNAME_MACHINE"-unknown-aros
exit ;; exit ;;
x86_64:VMkernel:*:*) x86_64:VMkernel:*:*)
echo "$UNAME_MACHINE"-unknown-esx echo "$UNAME_MACHINE"-unknown-esx

20
config/config.sub vendored
View file

@ -1,8 +1,8 @@
#! /bin/sh #! /bin/sh
# Configuration validation subroutine script. # Configuration validation subroutine script.
# Copyright 1992-2020 Free Software Foundation, Inc. # Copyright 1992-2021 Free Software Foundation, Inc.
timestamp='2020-12-02' timestamp='2021-01-08'
# This file is free software; you can redistribute it and/or modify it # This file is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by # under the terms of the GNU General Public License as published by
@ -67,7 +67,7 @@ Report bugs and patches to <config-patches@gnu.org>."
version="\ version="\
GNU config.sub ($timestamp) GNU config.sub ($timestamp)
Copyright 1992-2020 Free Software Foundation, Inc. Copyright 1992-2021 Free Software Foundation, Inc.
This is free software; see the source for copying conditions. There is NO This is free software; see the source for copying conditions. There is NO
warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE." warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE."
@ -1185,6 +1185,7 @@ case $cpu-$vendor in
| k1om \ | k1om \
| le32 | le64 \ | le32 | le64 \
| lm32 \ | lm32 \
| loongarch32 | loongarch64 | loongarchx32 \
| m32c | m32r | m32rle \ | m32c | m32r | m32rle \
| m5200 | m68000 | m680[012346]0 | m68360 | m683?2 | m68k \ | m5200 | m68000 | m680[012346]0 | m68360 | m683?2 | m68k \
| m6811 | m68hc11 | m6812 | m68hc12 | m68hcs12x \ | m6811 | m68hc11 | m6812 | m68hc12 | m68hcs12x \
@ -1229,7 +1230,7 @@ case $cpu-$vendor in
| powerpc | powerpc64 | powerpc64le | powerpcle | powerpcspe \ | powerpc | powerpc64 | powerpc64le | powerpcle | powerpcspe \
| pru \ | pru \
| pyramid \ | pyramid \
| riscv | riscv32 | riscv64 \ | riscv | riscv32 | riscv32be | riscv64 | riscv64be \
| rl78 | romp | rs6000 | rx \ | rl78 | romp | rs6000 | rx \
| s390 | s390x \ | s390 | s390x \
| score \ | score \
@ -1682,11 +1683,14 @@ fi
# Now, validate our (potentially fixed-up) OS. # Now, validate our (potentially fixed-up) OS.
case $os in case $os in
# Sometimes we do "kernel-abi", so those need to count as OSes. # Sometimes we do "kernel-libc", so those need to count as OSes.
musl* | newlib* | uclibc*) musl* | newlib* | uclibc*)
;; ;;
# Likewise for "kernel-libc" # Likewise for "kernel-abi"
eabi | eabihf | gnueabi | gnueabihf) eabi* | gnueabi*)
;;
# VxWorks passes extra cpu info in the 4th filed.
simlinux | simwindows | spe)
;; ;;
# Now accept the basic system types. # Now accept the basic system types.
# The portable systems comes first. # The portable systems comes first.
@ -1750,6 +1754,8 @@ case $kernel-$os in
;; ;;
kfreebsd*-gnu* | kopensolaris*-gnu*) kfreebsd*-gnu* | kopensolaris*-gnu*)
;; ;;
vxworks-simlinux | vxworks-simwindows | vxworks-spe)
;;
nto-qnx*) nto-qnx*)
;; ;;
os2-emx) os2-emx)

View file

@ -1,4 +1,4 @@
AC_INIT(nix, m4_esyscmd([bash -c "echo -n $(cat ./.version)$VERSION_SUFFIX"])) AC_INIT([nix],[m4_esyscmd(bash -c "echo -n $(cat ./.version)$VERSION_SUFFIX")])
AC_CONFIG_MACRO_DIRS([m4]) AC_CONFIG_MACRO_DIRS([m4])
AC_CONFIG_SRCDIR(README.md) AC_CONFIG_SRCDIR(README.md)
AC_CONFIG_AUX_DIR(config) AC_CONFIG_AUX_DIR(config)
@ -9,8 +9,7 @@ AC_PROG_SED
AC_CANONICAL_HOST AC_CANONICAL_HOST
AC_MSG_CHECKING([for the canonical Nix system name]) AC_MSG_CHECKING([for the canonical Nix system name])
AC_ARG_WITH(system, AC_HELP_STRING([--with-system=SYSTEM], AC_ARG_WITH(system, AS_HELP_STRING([--with-system=SYSTEM],[Platform identifier (e.g., `i686-linux').]),
[Platform identifier (e.g., `i686-linux').]),
[system=$withval], [system=$withval],
[case "$host_cpu" in [case "$host_cpu" in
i*86) i*86)
@ -33,14 +32,6 @@ AC_ARG_WITH(system, AC_HELP_STRING([--with-system=SYSTEM],
system="$machine_name-`echo $host_os | "$SED" -e's/@<:@0-9.@:>@*$//g'`";; system="$machine_name-`echo $host_os | "$SED" -e's/@<:@0-9.@:>@*$//g'`";;
esac]) esac])
sys_name=$(uname -s | tr 'A-Z ' 'a-z_')
case $sys_name in
cygwin*)
sys_name=cygwin
;;
esac
AC_MSG_RESULT($system) AC_MSG_RESULT($system)
AC_SUBST(system) AC_SUBST(system)
AC_DEFINE_UNQUOTED(SYSTEM, ["$system"], [platform identifier ('cpu-os')]) AC_DEFINE_UNQUOTED(SYSTEM, ["$system"], [platform identifier ('cpu-os')])
@ -64,10 +55,12 @@ AC_SYS_LARGEFILE
# Solaris-specific stuff. # Solaris-specific stuff.
AC_STRUCT_DIRENT_D_TYPE AC_STRUCT_DIRENT_D_TYPE
if test "$sys_name" = sunos; then case "$host_os" in
solaris*)
# Solaris requires -lsocket -lnsl for network functions # Solaris requires -lsocket -lnsl for network functions
LIBS="-lsocket -lnsl $LIBS" LDFLAGS="-lsocket -lnsl $LDFLAGS"
fi ;;
esac
# Check for pubsetbuf. # Check for pubsetbuf.
@ -127,8 +120,7 @@ NEED_PROG(jq, jq)
AC_SUBST(coreutils, [$(dirname $(type -p cat))]) AC_SUBST(coreutils, [$(dirname $(type -p cat))])
AC_ARG_WITH(store-dir, AC_HELP_STRING([--with-store-dir=PATH], AC_ARG_WITH(store-dir, AS_HELP_STRING([--with-store-dir=PATH],[path of the Nix store (defaults to /nix/store)]),
[path of the Nix store (defaults to /nix/store)]),
storedir=$withval, storedir='/nix/store') storedir=$withval, storedir='/nix/store')
AC_SUBST(storedir) AC_SUBST(storedir)
@ -152,13 +144,12 @@ int main() {
}]])], GCC_ATOMIC_BUILTINS_NEED_LIBATOMIC=no, GCC_ATOMIC_BUILTINS_NEED_LIBATOMIC=yes) }]])], GCC_ATOMIC_BUILTINS_NEED_LIBATOMIC=no, GCC_ATOMIC_BUILTINS_NEED_LIBATOMIC=yes)
AC_MSG_RESULT($GCC_ATOMIC_BUILTINS_NEED_LIBATOMIC) AC_MSG_RESULT($GCC_ATOMIC_BUILTINS_NEED_LIBATOMIC)
if test "x$GCC_ATOMIC_BUILTINS_NEED_LIBATOMIC" = xyes; then if test "x$GCC_ATOMIC_BUILTINS_NEED_LIBATOMIC" = xyes; then
LIBS="-latomic $LIBS" LDFLAGS="-latomic $LDFLAGS"
fi fi
PKG_PROG_PKG_CONFIG PKG_PROG_PKG_CONFIG
AC_ARG_ENABLE(shared, AC_HELP_STRING([--enable-shared], AC_ARG_ENABLE(shared, AS_HELP_STRING([--enable-shared],[Build shared libraries for Nix [default=yes]]),
[Build shared libraries for Nix [default=yes]]),
shared=$enableval, shared=yes) shared=$enableval, shared=yes)
if test "$shared" = yes; then if test "$shared" = yes; then
AC_SUBST(BUILD_SHARED_LIBS, 1, [Whether to build shared libraries.]) AC_SUBST(BUILD_SHARED_LIBS, 1, [Whether to build shared libraries.])
@ -213,30 +204,32 @@ AC_SUBST(HAVE_LIBCPUID, [$have_libcpuid])
# Look for libseccomp, required for Linux sandboxing. # Look for libseccomp, required for Linux sandboxing.
if test "$sys_name" = linux; then case "$host_os" in
AC_ARG_ENABLE([seccomp-sandboxing], linux*)
AC_HELP_STRING([--disable-seccomp-sandboxing], AC_ARG_ENABLE([seccomp-sandboxing],
[Don't build support for seccomp sandboxing (only recommended if your arch doesn't support libseccomp yet!)] AS_HELP_STRING([--disable-seccomp-sandboxing],[Don't build support for seccomp sandboxing (only recommended if your arch doesn't support libseccomp yet!)
)) ]))
if test "x$enable_seccomp_sandboxing" != "xno"; then if test "x$enable_seccomp_sandboxing" != "xno"; then
PKG_CHECK_MODULES([LIBSECCOMP], [libseccomp], PKG_CHECK_MODULES([LIBSECCOMP], [libseccomp],
[CXXFLAGS="$LIBSECCOMP_CFLAGS $CXXFLAGS"]) [CXXFLAGS="$LIBSECCOMP_CFLAGS $CXXFLAGS"])
have_seccomp=1 have_seccomp=1
AC_DEFINE([HAVE_SECCOMP], [1], [Whether seccomp is available and should be used for sandboxing.]) AC_DEFINE([HAVE_SECCOMP], [1], [Whether seccomp is available and should be used for sandboxing.])
else else
have_seccomp=
fi
;;
*)
have_seccomp= have_seccomp=
fi ;;
else esac
have_seccomp=
fi
AC_SUBST(HAVE_SECCOMP, [$have_seccomp]) AC_SUBST(HAVE_SECCOMP, [$have_seccomp])
# Look for aws-cpp-sdk-s3. # Look for aws-cpp-sdk-s3.
AC_LANG_PUSH(C++) AC_LANG_PUSH(C++)
AC_CHECK_HEADERS([aws/s3/S3Client.h], AC_CHECK_HEADERS([aws/s3/S3Client.h],
[AC_DEFINE([ENABLE_S3], [1], [Whether to enable S3 support via aws-sdk-cpp.]) [AC_DEFINE([ENABLE_S3], [1], [Whether to enable S3 support via aws-sdk-cpp.]) enable_s3=1],
enable_s3=1], [enable_s3=]) [AC_DEFINE([ENABLE_S3], [0], [Whether to enable S3 support via aws-sdk-cpp.]) enable_s3=])
AC_SUBST(ENABLE_S3, [$enable_s3]) AC_SUBST(ENABLE_S3, [$enable_s3])
AC_LANG_POP(C++) AC_LANG_POP(C++)
@ -249,8 +242,7 @@ fi
# Whether to use the Boehm garbage collector. # Whether to use the Boehm garbage collector.
AC_ARG_ENABLE(gc, AC_HELP_STRING([--enable-gc], AC_ARG_ENABLE(gc, AS_HELP_STRING([--enable-gc],[enable garbage collection in the Nix expression evaluator (requires Boehm GC) [default=yes]]),
[enable garbage collection in the Nix expression evaluator (requires Boehm GC) [default=yes]]),
gc=$enableval, gc=yes) gc=$enableval, gc=yes)
if test "$gc" = yes; then if test "$gc" = yes; then
PKG_CHECK_MODULES([BDW_GC], [bdw-gc]) PKG_CHECK_MODULES([BDW_GC], [bdw-gc])
@ -264,11 +256,12 @@ PKG_CHECK_MODULES([GTEST], [gtest_main])
# documentation generation switch # documentation generation switch
AC_ARG_ENABLE(doc-gen, AC_HELP_STRING([--disable-doc-gen], AC_ARG_ENABLE(doc-gen, AS_HELP_STRING([--disable-doc-gen],[disable documentation generation]),
[disable documentation generation]),
doc_generate=$enableval, doc_generate=yes) doc_generate=$enableval, doc_generate=yes)
AC_SUBST(doc_generate) AC_SUBST(doc_generate)
# Look for lowdown library.
PKG_CHECK_MODULES([LOWDOWN], [lowdown >= 0.8.0], [CXXFLAGS="$LOWDOWN_CFLAGS $CXXFLAGS"])
# Setuid installations. # Setuid installations.
AC_CHECK_FUNCS([setresuid setreuid lchown]) AC_CHECK_FUNCS([setresuid setreuid lchown])
@ -280,13 +273,14 @@ AC_CHECK_FUNCS([strsignal posix_fallocate sysconf])
# This is needed if bzip2 is a static library, and the Nix libraries # This is needed if bzip2 is a static library, and the Nix libraries
# are dynamic. # are dynamic.
if test "$(uname)" = "Darwin"; then case "${host_os}" in
darwin*)
LDFLAGS="-all_load $LDFLAGS" LDFLAGS="-all_load $LDFLAGS"
fi ;;
esac
AC_ARG_WITH(sandbox-shell, AC_HELP_STRING([--with-sandbox-shell=PATH], AC_ARG_WITH(sandbox-shell, AS_HELP_STRING([--with-sandbox-shell=PATH],[path of a statically-linked shell to use as /bin/sh in sandboxes]),
[path of a statically-linked shell to use as /bin/sh in sandboxes]),
sandbox_shell=$withval) sandbox_shell=$withval)
AC_SUBST(sandbox_shell) AC_SUBST(sandbox_shell)
@ -301,6 +295,6 @@ done
rm -f Makefile.config rm -f Makefile.config
AC_CONFIG_HEADER([config.h]) AC_CONFIG_HEADERS([config.h])
AC_CONFIG_FILES([]) AC_CONFIG_FILES([])
AC_OUTPUT AC_OUTPUT

View file

@ -6,9 +6,11 @@ builtins:
concatStrings (map concatStrings (map
(name: (name:
let builtin = builtins.${name}; in let builtin = builtins.${name}; in
" - `builtins.${name}` " + concatStringsSep " " (map (s: "*${s}*") builtin.args) "<dt><code>${name} "
+ " \n\n" + concatStringsSep " " (map (s: "<var>${s}</var>") builtin.args)
+ concatStrings (map (s: " ${s}\n") (splitLines builtin.doc)) + "\n\n" + "</code></dt>"
+ "<dd>\n\n"
+ builtin.doc
+ "\n\n</dd>"
) )
(attrNames builtins)) (attrNames builtins))

View file

@ -1,4 +1,4 @@
command: { command, renderLinks ? false }:
with builtins; with builtins;
with import ./utils.nix; with import ./utils.nix;
@ -20,7 +20,11 @@ let
categories = sort (x: y: x.id < y.id) (unique (map (cmd: cmd.category) (attrValues def.commands))); categories = sort (x: y: x.id < y.id) (unique (map (cmd: cmd.category) (attrValues def.commands)));
listCommands = cmds: listCommands = cmds:
concatStrings (map (name: concatStrings (map (name:
"* [`${command} ${name}`](./${appendName filename name}.md) - ${cmds.${name}.description}\n") "* "
+ (if renderLinks
then "[`${command} ${name}`](./${appendName filename name}.md)"
else "`${command} ${name}`")
+ " - ${cmds.${name}.description}\n")
(attrNames cmds)); (attrNames cmds));
in in
"where *subcommand* is one of the following:\n\n" "where *subcommand* is one of the following:\n\n"
@ -89,7 +93,7 @@ let
in in
let let
manpages = processCommand { filename = "nix"; command = "nix"; def = command; }; manpages = processCommand { filename = "nix"; command = "nix"; def = builtins.fromJSON command; };
summary = concatStrings (map (manpage: " - [${manpage.command}](command-ref/new-cli/${manpage.name})\n") manpages); summary = concatStrings (map (manpage: " - [${manpage.command}](command-ref/new-cli/${manpage.name})\n") manpages);
in in
(listToAttrs manpages) // { "SUMMARY.md" = summary; } (listToAttrs manpages) // { "SUMMARY.md" = summary; }

View file

@ -1,7 +1,5 @@
ifeq ($(doc_generate),yes) ifeq ($(doc_generate),yes)
MANUAL_SRCS := $(call rwildcard, $(d)/src, *.md)
# Generate man pages. # Generate man pages.
man-pages := $(foreach n, \ man-pages := $(foreach n, \
nix-env.1 nix-build.1 nix-shell.1 nix-store.1 nix-instantiate.1 \ nix-env.1 nix-build.1 nix-shell.1 nix-store.1 nix-instantiate.1 \
@ -46,7 +44,7 @@ $(d)/src/SUMMARY.md: $(d)/src/SUMMARY.md.in $(d)/src/command-ref/new-cli
$(d)/src/command-ref/new-cli: $(d)/nix.json $(d)/generate-manpage.nix $(bindir)/nix $(d)/src/command-ref/new-cli: $(d)/nix.json $(d)/generate-manpage.nix $(bindir)/nix
@rm -rf $@ @rm -rf $@
$(trace-gen) $(nix-eval) --write-to $@ --expr 'import doc/manual/generate-manpage.nix (builtins.fromJSON (builtins.readFile $<))' $(trace-gen) $(nix-eval) --write-to $@ --expr 'import doc/manual/generate-manpage.nix { command = builtins.readFile $<; renderLinks = true; }'
$(d)/src/command-ref/conf-file.md: $(d)/conf-file.json $(d)/generate-options.nix $(d)/src/command-ref/conf-file-prefix.md $(bindir)/nix $(d)/src/command-ref/conf-file.md: $(d)/conf-file.json $(d)/generate-options.nix $(d)/src/command-ref/conf-file-prefix.md $(bindir)/nix
@cat doc/manual/src/command-ref/conf-file-prefix.md > $@.tmp @cat doc/manual/src/command-ref/conf-file-prefix.md > $@.tmp
@ -64,6 +62,7 @@ $(d)/conf-file.json: $(bindir)/nix
$(d)/src/expressions/builtins.md: $(d)/builtins.json $(d)/generate-builtins.nix $(d)/src/expressions/builtins-prefix.md $(bindir)/nix $(d)/src/expressions/builtins.md: $(d)/builtins.json $(d)/generate-builtins.nix $(d)/src/expressions/builtins-prefix.md $(bindir)/nix
@cat doc/manual/src/expressions/builtins-prefix.md > $@.tmp @cat doc/manual/src/expressions/builtins-prefix.md > $@.tmp
$(trace-gen) $(nix-eval) --expr 'import doc/manual/generate-builtins.nix (builtins.fromJSON (builtins.readFile $<))' >> $@.tmp $(trace-gen) $(nix-eval) --expr 'import doc/manual/generate-builtins.nix (builtins.fromJSON (builtins.readFile $<))' >> $@.tmp
@cat doc/manual/src/expressions/builtins-suffix.md >> $@.tmp
@mv $@.tmp $@ @mv $@.tmp $@
$(d)/builtins.json: $(bindir)/nix $(d)/builtins.json: $(bindir)/nix
@ -74,17 +73,28 @@ $(d)/builtins.json: $(bindir)/nix
install: $(docdir)/manual/index.html install: $(docdir)/manual/index.html
# Generate 'nix' manpages. # Generate 'nix' manpages.
install: $(d)/src/command-ref/new-cli install: $(mandir)/man1/nix3-manpages
man: doc/manual/generated/man1/nix3-manpages
all: doc/manual/generated/man1/nix3-manpages
$(mandir)/man1/nix3-manpages: doc/manual/generated/man1/nix3-manpages
@mkdir -p $(DESTDIR)$$(dirname $@)
$(trace-install) install -m 0644 $$(dirname $<)/* $(DESTDIR)$$(dirname $@)
doc/manual/generated/man1/nix3-manpages: $(d)/src/command-ref/new-cli
@mkdir -p $(DESTDIR)$$(dirname $@)
$(trace-gen) for i in doc/manual/src/command-ref/new-cli/*.md; do \ $(trace-gen) for i in doc/manual/src/command-ref/new-cli/*.md; do \
name=$$(basename $$i .md); \ name=$$(basename $$i .md); \
tmpFile=$$(mktemp); \
if [[ $$name = SUMMARY ]]; then continue; fi; \ if [[ $$name = SUMMARY ]]; then continue; fi; \
printf "Title: %s\n\n" "$$name" > $$i.tmp; \ printf "Title: %s\n\n" "$$name" > $$tmpFile; \
cat $$i >> $$i.tmp; \ cat $$i >> $$tmpFile; \
lowdown -sT man -M section=1 $$i.tmp -o $(mandir)/man1/$$name.1; \ lowdown -sT man -M section=1 $$tmpFile -o $(DESTDIR)$$(dirname $@)/$$name.1; \
rm $$tmpFile; \
done done
@touch $@
$(docdir)/manual/index.html: $(MANUAL_SRCS) $(d)/book.toml $(d)/custom.css $(d)/src/SUMMARY.md $(d)/src/command-ref/new-cli $(d)/src/command-ref/conf-file.md $(d)/src/expressions/builtins.md $(docdir)/manual/index.html: $(MANUAL_SRCS) $(d)/book.toml $(d)/custom.css $(d)/src/SUMMARY.md $(d)/src/command-ref/new-cli $(d)/src/command-ref/conf-file.md $(d)/src/expressions/builtins.md $(call rwildcard, $(d)/src, *.md)
$(trace-gen) RUST_LOG=warn mdbook build doc/manual -d $(docdir)/manual $(trace-gen) RUST_LOG=warn mdbook build doc/manual -d $(DESTDIR)$(docdir)/manual
@cp doc/manual/highlight.pack.js $(docdir)/manual/highlight.js
endif endif

View file

@ -9,6 +9,7 @@
- [Prerequisites](installation/prerequisites-source.md) - [Prerequisites](installation/prerequisites-source.md)
- [Obtaining a Source Distribution](installation/obtaining-source.md) - [Obtaining a Source Distribution](installation/obtaining-source.md)
- [Building Nix from Source](installation/building-source.md) - [Building Nix from Source](installation/building-source.md)
- [Using Nix within Docker](installation/installing-docker.md)
- [Security](installation/nix-security.md) - [Security](installation/nix-security.md)
- [Single-User Mode](installation/single-user.md) - [Single-User Mode](installation/single-user.md)
- [Multi-User Mode](installation/multi-user.md) - [Multi-User Mode](installation/multi-user.md)
@ -70,6 +71,8 @@
- [Hacking](contributing/hacking.md) - [Hacking](contributing/hacking.md)
- [CLI guideline](contributing/cli-guideline.md) - [CLI guideline](contributing/cli-guideline.md)
- [Release Notes](release-notes/release-notes.md) - [Release Notes](release-notes/release-notes.md)
- [Release X.Y (202?-??-??)](release-notes/rl-next.md)
- [Release 2.4 (2021-11-01)](release-notes/rl-2.4.md)
- [Release 2.3 (2019-09-04)](release-notes/rl-2.3.md) - [Release 2.3 (2019-09-04)](release-notes/rl-2.3.md)
- [Release 2.2 (2019-01-11)](release-notes/rl-2.2.md) - [Release 2.2 (2019-01-11)](release-notes/rl-2.2.md)
- [Release 2.1 (2018-09-02)](release-notes/rl-2.1.md) - [Release 2.1 (2018-09-02)](release-notes/rl-2.1.md)

View file

@ -16,8 +16,9 @@ By default Nix reads settings from the following places:
will be loaded in reverse order. will be loaded in reverse order.
Otherwise it will look for `nix/nix.conf` files in `XDG_CONFIG_DIRS` Otherwise it will look for `nix/nix.conf` files in `XDG_CONFIG_DIRS`
and `XDG_CONFIG_HOME`. If these are unset, it will look in and `XDG_CONFIG_HOME`. If unset, `XDG_CONFIG_DIRS` defaults to
`$HOME/.config/nix.conf`. `/etc/xdg`, and `XDG_CONFIG_HOME` defaults to `$HOME/.config`
as per [XDG Base Directory Specification](https://specifications.freedesktop.org/basedir-spec/basedir-spec-latest.html).
- If `NIX_CONFIG` is set, its contents is treated as the contents of - If `NIX_CONFIG` is set, its contents is treated as the contents of
a configuration file. a configuration file.

View file

@ -28,17 +28,21 @@ Most Nix commands interpret the following environment variables:
consist of a single top-level directory. For example, setting consist of a single top-level directory. For example, setting
`NIX_PATH` to `NIX_PATH` to
nixpkgs=https://github.com/NixOS/nixpkgs/archive/nixos-15.09.tar.gz nixpkgs=https://github.com/NixOS/nixpkgs/archive/master.tar.gz
tells Nix to download the latest revision in the Nixpkgs/NixOS 15.09 tells Nix to download and use the current contents of the
channel. `master` branch in the `nixpkgs` repository.
A following shorthand can be used to refer to the official channels: The URLs of the tarballs from the official nixos.org channels (see
[the manual for `nix-channel`](nix-channel.md)) can be abbreviated
as `channel:<channel-name>`. For instance, the following two
values of `NIX_PATH` are equivalent:
nixpkgs=channel:nixos-15.09 nixpkgs=channel:nixos-21.05
nixpkgs=https://nixos.org/channels/nixos-21.05/nixexprs.tar.xz
The search path can be extended using the `-I` option, which takes The Nix search path can also be extended using the `-I` option to
precedence over `NIX_PATH`. many Nix commands, which takes precedence over `NIX_PATH`.
- `NIX_IGNORE_SYMLINK_STORE`\ - `NIX_IGNORE_SYMLINK_STORE`\
Normally, the Nix store directory (typically `/nix/store`) is not Normally, the Nix store directory (typically `/nix/store`) is not

View file

@ -238,7 +238,16 @@ a number of possible ways:
## Examples ## Examples
To install a specific version of `gcc` from the active Nix expression: To install a package using a specific attribute path from the active Nix expression:
```console
$ nix-env -iA gcc40mips
installing `gcc-4.0.2'
$ nix-env -iA xorg.xorgserver
installing `xorg-server-1.2.0'
```
To install a specific version of `gcc` using the derivation name:
```console ```console
$ nix-env --install gcc-3.3.2 $ nix-env --install gcc-3.3.2
@ -246,6 +255,9 @@ installing `gcc-3.3.2'
uninstalling `gcc-3.1' uninstalling `gcc-3.1'
``` ```
Using attribute path for selecting a package is preferred,
as it is much faster and there will not be multiple matches.
Note the previously installed version is removed, since Note the previously installed version is removed, since
`--preserve-installed` was not specified. `--preserve-installed` was not specified.
@ -256,13 +268,6 @@ $ nix-env --install gcc
installing `gcc-3.3.2' installing `gcc-3.3.2'
``` ```
To install using a specific attribute:
```console
$ nix-env -i -A gcc40mips
$ nix-env -i -A xorg.xorgserver
```
To install all derivations in the Nix expression `foo.nix`: To install all derivations in the Nix expression `foo.nix`:
```console ```console
@ -374,22 +379,29 @@ For the other flags, see `--install`.
## Examples ## Examples
```console ```console
$ nix-env --upgrade gcc $ nix-env --upgrade -A nixpkgs.gcc
upgrading `gcc-3.3.1' to `gcc-3.4' upgrading `gcc-3.3.1' to `gcc-3.4'
``` ```
When there are no updates available, nothing will happen:
```console ```console
$ nix-env -u gcc-3.3.2 --always (switch to a specific version) $ nix-env --upgrade -A nixpkgs.pan
```
Using `-A` is preferred when possible, as it is faster and unambiguous but
it is also possible to upgrade to a specific version by matching the derivation name:
```console
$ nix-env -u gcc-3.3.2 --always
upgrading `gcc-3.4' to `gcc-3.3.2' upgrading `gcc-3.4' to `gcc-3.3.2'
``` ```
```console To try to upgrade everything
$ nix-env --upgrade pan (matching packages based on the part of the derivation name without version):
(no upgrades available, so nothing happens)
```
```console ```console
$ nix-env -u (try to upgrade everything) $ nix-env -u
upgrading `hello-2.1.2' to `hello-2.1.3' upgrading `hello-2.1.2' to `hello-2.1.3'
upgrading `mozilla-1.2' to `mozilla-1.4' upgrading `mozilla-1.2' to `mozilla-1.4'
``` ```
@ -401,7 +413,7 @@ of a derivation `x` by looking at their respective `name` attributes.
The names (e.g., `gcc-3.3.1` are split into two parts: the package name The names (e.g., `gcc-3.3.1` are split into two parts: the package name
(`gcc`), and the version (`3.3.1`). The version part starts after the (`gcc`), and the version (`3.3.1`). The version part starts after the
first dash not followed by a letter. `x` is considered an upgrade of `y` first dash not followed by a letter. `x` is considered an upgrade of `y`
if their package names match, and the version of `y` is higher that that if their package names match, and the version of `y` is higher than that
of `x`. of `x`.
The versions are compared by splitting them into contiguous components The versions are compared by splitting them into contiguous components

View file

@ -11,8 +11,8 @@
[`--command` *cmd*] [`--command` *cmd*]
[`--run` *cmd*] [`--run` *cmd*]
[`--exclude` *regexp*] [`--exclude` *regexp*]
[--pure] [`--pure`]
[--keep *name*] [`--keep` *name*]
{{`--packages` | `-p`} {*packages* | *expressions*} … | [*path*]} {{`--packages` | `-p`} {*packages* | *expressions*} … | [*path*]}
# Description # Description
@ -78,9 +78,7 @@ All options not listed here are passed to `nix-store
cleared before the interactive shell is started, so you get an cleared before the interactive shell is started, so you get an
environment that more closely corresponds to the “real” Nix build. A environment that more closely corresponds to the “real” Nix build. A
few variables, in particular `HOME`, `USER` and `DISPLAY`, are few variables, in particular `HOME`, `USER` and `DISPLAY`, are
retained. Note that (depending on your Bash retained.
installation) `/etc/bashrc` is still sourced, so any variables set
there will affect the interactive shell.
- `--packages` / `-p` *packages*…\ - `--packages` / `-p` *packages*…\
Set up an environment in which the specified packages are present. Set up an environment in which the specified packages are present.
@ -112,13 +110,19 @@ shell in which to build it:
```console ```console
$ nix-shell '<nixpkgs>' -A pan $ nix-shell '<nixpkgs>' -A pan
[nix-shell]$ unpackPhase [nix-shell]$ eval ${unpackPhase:-unpackPhase}
[nix-shell]$ cd pan-* [nix-shell]$ cd pan-*
[nix-shell]$ configurePhase [nix-shell]$ eval ${configurePhase:-configurePhase}
[nix-shell]$ buildPhase [nix-shell]$ eval ${buildPhase:-buildPhase}
[nix-shell]$ ./pan/gui/pan [nix-shell]$ ./pan/gui/pan
``` ```
The reason we use form `eval ${configurePhase:-configurePhase}` here is because
those packages that override these phases do so by exporting the overridden
values in the environment variable of the same name.
Here bash is being told to either evaluate the contents of 'configurePhase',
if it exists as a variable, otherwise evaluate the configurePhase function.
To clear the environment first, and do some additional automatic To clear the environment first, and do some additional automatic
initialisation of the interactive shell: initialisation of the interactive shell:

View file

@ -125,7 +125,7 @@ Special exit codes:
- `104`\ - `104`\
Not deterministic, the build succeeded in check mode but the Not deterministic, the build succeeded in check mode but the
resulting output is not binary reproducable. resulting output is not binary reproducible.
With the `--keep-going` flag it's possible for multiple failures to With the `--keep-going` flag it's possible for multiple failures to
occur, in this case the 1xx status codes are or combined using binary occur, in this case the 1xx status codes are or combined using binary

View file

@ -162,11 +162,11 @@ Most Nix commands accept the following command-line options:
}: ... }: ...
``` ```
So if you call this Nix expression (e.g., when you do `nix-env -i So if you call this Nix expression (e.g., when you do `nix-env -iA
pkgname`), the function will be called automatically using the pkgname`), the function will be called automatically using the
value [`builtins.currentSystem`](../expressions/builtins.md) for value [`builtins.currentSystem`](../expressions/builtins.md) for
the `system` argument. You can override this using `--arg`, e.g., the `system` argument. You can override this using `--arg`, e.g.,
`nix-env -i pkgname --arg system \"i686-freebsd\"`. (Note that `nix-env -iA pkgname --arg system \"i686-freebsd\"`. (Note that
since the argument is a Nix string literal, you have to escape the since the argument is a Nix string literal, you have to escape the
quotes.) quotes.)

View file

@ -3,7 +3,7 @@
## Goals ## Goals
Purpose of this document is to provide a clear direction to **help design Purpose of this document is to provide a clear direction to **help design
delightful command line** experience. This document contain guidelines to delightful command line** experience. This document contains guidelines to
follow to ensure a consistent and approachable user experience. follow to ensure a consistent and approachable user experience.
## Overview ## Overview
@ -115,7 +115,7 @@ The rules are:
- Help is shown by using `--help` or `help` command (eg `nix` `--``help` or - Help is shown by using `--help` or `help` command (eg `nix` `--``help` or
`nix help`). `nix help`).
- For non-COMMANDs (eg. `nix` `--``help` and `nix store` `--``help`) we **show - For non-COMMANDs (eg. `nix` `--``help` and `nix store` `--``help`) we **show
a summary** of most common use cases. Summary is presented on the STDOUT a summary** of most common use cases. Summary is presented on the STDOUT
without any use of PAGER. without any use of PAGER.
- For COMMANDs (eg. `nix init` `--``help` or `nix help init`) we display the - For COMMANDs (eg. `nix init` `--``help` or `nix help init`) we display the
@ -176,7 +176,7 @@ $ nix init --template=template#pyton
------------------------------------------------------------------------ ------------------------------------------------------------------------
Initializing Nix project at `/path/to/here`. Initializing Nix project at `/path/to/here`.
Select a template for you new project: Select a template for you new project:
|> template#pyton |> template#python
template#python-pip template#python-pip
template#python-poetry template#python-poetry
``` ```
@ -230,8 +230,8 @@ Now **Learn** part of the output is where you educate users. You should only
show it when you know that a build will take some time and not annoy users of show it when you know that a build will take some time and not annoy users of
the builds that take only few seconds. the builds that take only few seconds.
Every feature like this should go though a intensive review and testing to Every feature like this should go through an intensive review and testing to
collect as much a feedback as possible and to fine tune every little detail. If collect as much feedback as possible and to fine tune every little detail. If
done right this can be an awesome features beginners and advance users will done right this can be an awesome features beginners and advance users will
love, but if not done perfectly it will annoy users and leave bad impression. love, but if not done perfectly it will annoy users and leave bad impression.
@ -240,7 +240,7 @@ love, but if not done perfectly it will annoy users and leave bad impression.
Input to a command is provided via `ARGUMENTS` and `OPTIONS`. Input to a command is provided via `ARGUMENTS` and `OPTIONS`.
`ARGUMENTS` represent a required input for a function. When choosing to use `ARGUMENTS` represent a required input for a function. When choosing to use
`ARGUMENT` over function please be aware of the downsides that come with it: `ARGUMENTS` over `OPTIONS` please be aware of the downsides that come with it:
- User will need to remember the order of `ARGUMENTS`. This is not a problem if - User will need to remember the order of `ARGUMENTS`. This is not a problem if
there is only one `ARGUMENT`. there is only one `ARGUMENT`.
@ -253,7 +253,7 @@ developer consider the downsides and choose wisely.
## Naming the `OPTIONS` ## Naming the `OPTIONS`
Then only naming convention - apart from the ones mentioned in Naming the The only naming convention - apart from the ones mentioned in Naming the
`COMMANDS` section is how flags are named. `COMMANDS` section is how flags are named.
Flags are a type of `OPTION` that represent an option that can be turned ON of Flags are a type of `OPTION` that represent an option that can be turned ON of
@ -271,12 +271,12 @@ to improve the discoverability of possible input. A new user will most likely
not know which `ARGUMENTS` and `OPTIONS` are required or which values are not know which `ARGUMENTS` and `OPTIONS` are required or which values are
possible for those options. possible for those options.
In cases, the user might not provide the input or they provide wrong input, In case the user does not provide the input or they provide wrong input,
rather then show the error, prompt a user with an option to find and select rather than show the error, prompt a user with an option to find and select
correct input (see examples). correct input (see examples).
Prompting is of course not required when TTY is not attached to STDIN. This Prompting is of course not required when TTY is not attached to STDIN. This
would mean that scripts wont need to handle prompt, but rather handle errors. would mean that scripts won't need to handle prompt, but rather handle errors.
A place to use prompt and provide user with interactive select A place to use prompt and provide user with interactive select
@ -300,7 +300,7 @@ going to happen.
```shell ```shell
$ nix build --option substitutors https://cache.example.org $ nix build --option substitutors https://cache.example.org
------------------------------------------------------------------------ ------------------------------------------------------------------------
Warning! A security related question need to be answered. Warning! A security related question needs to be answered.
------------------------------------------------------------------------ ------------------------------------------------------------------------
The following substitutors will be used to in `my-project`: The following substitutors will be used to in `my-project`:
- https://cache.example.org - https://cache.example.org
@ -311,14 +311,14 @@ $ nix build --option substitutors https://cache.example.org
# Output # Output
Terminal output can be quite limiting in many ways. Which should forces us to Terminal output can be quite limiting in many ways. Which should force us to
think about the experience even more. As with every design the output is a think about the experience even more. As with every design the output is a
compromise between being terse and being verbose, between showing help to compromise between being terse and being verbose, between showing help to
beginners and annoying advance users. For this it is important that we know beginners and annoying advance users. For this it is important that we know
what are the priorities. what are the priorities.
Nix command line should be first and foremost written with beginners in mind. Nix command line should be first and foremost written with beginners in mind.
But users wont stay beginners for long and what was once useful might quickly But users won't stay beginners for long and what was once useful might quickly
become annoying. There is no golden rule that we can give in this guideline become annoying. There is no golden rule that we can give in this guideline
that would make it easier how to draw a line and find best compromise. that would make it easier how to draw a line and find best compromise.
@ -342,7 +342,7 @@ also allowing them to redirect content to a file. For example:
```shell ```shell
$ nix build > build.txt $ nix build > build.txt
------------------------------------------------------------------------ ------------------------------------------------------------------------
Error! Atrribute `bin` missing at (1:94) from string. Error! Attribute `bin` missing at (1:94) from string.
------------------------------------------------------------------------ ------------------------------------------------------------------------
1| with import <nixpkgs> { }; (pkgs.runCommandCC or pkgs.runCommand) "shell" { buildInputs = [ (surge.bin) ]; } "" 1| with import <nixpkgs> { }; (pkgs.runCommandCC or pkgs.runCommand) "shell" { buildInputs = [ (surge.bin) ]; } ""
@ -508,7 +508,7 @@ can, with a few key strokes, be changed into and advance introspection tool.
### Progress ### Progress
For longer running commands we should provide and overview of the progress. For longer running commands we should provide and overview the progress.
This is shown best in `nix build` example: This is shown best in `nix build` example:
```shell ```shell
@ -553,7 +553,7 @@ going to happen.
```shell ```shell
$ nix build --option substitutors https://cache.example.org $ nix build --option substitutors https://cache.example.org
------------------------------------------------------------------------ ------------------------------------------------------------------------
Warning! A security related question need to be answered. Warning! A security related question needs to be answered.
------------------------------------------------------------------------ ------------------------------------------------------------------------
The following substitutors will be used to in `my-project`: The following substitutors will be used to in `my-project`:
- https://cache.example.org - https://cache.example.org

View file

@ -237,7 +237,7 @@ Derivations can declare some infrequently used optional attributes.
- `preferLocalBuild`\ - `preferLocalBuild`\
If this attribute is set to `true` and [distributed building is If this attribute is set to `true` and [distributed building is
enabled](../advanced-topics/distributed-builds.md), then, if enabled](../advanced-topics/distributed-builds.md), then, if
possible, the derivaton will be built locally instead of forwarded possible, the derivation will be built locally instead of forwarded
to a remote machine. This is appropriate for trivial builders to a remote machine. This is appropriate for trivial builders
where the cost of doing a download or remote build would exceed where the cost of doing a download or remote build would exceed
the cost of building locally. the cost of building locally.

View file

@ -9,7 +9,8 @@ scope. Instead, you can access them through the `builtins` built-in
value, which is a set that contains all built-in functions and values. value, which is a set that contains all built-in functions and values.
For instance, `derivation` is also available as `builtins.derivation`. For instance, `derivation` is also available as `builtins.derivation`.
- `derivation` *attrs*; `builtins.derivation` *attrs*\ <dl>
<dt><code>derivation <var>attrs</var></code>;
`derivation` is described in [its own section](derivations.md). <code>builtins.derivation <var>attrs</var></code></dt>
<dd><p><var>derivation</var> is described in
<a href="derivations.md">its own section</a>.</p></dd>

View file

@ -0,0 +1 @@
</dl>

View file

@ -26,7 +26,7 @@ elements (referenced from the figure by number):
called with three arguments: `stdenv`, `fetchurl`, and `perl`. They called with three arguments: `stdenv`, `fetchurl`, and `perl`. They
are needed to build Hello, but we don't know how to build them here; are needed to build Hello, but we don't know how to build them here;
that's why they are function arguments. `stdenv` is a package that that's why they are function arguments. `stdenv` is a package that
is used by almost all Nix Packages packages; it provides a is used by almost all Nix Packages; it provides a
“standard” environment consisting of the things you would expect “standard” environment consisting of the things you would expect
in a basic Unix environment: a C/C++ compiler (GCC, to be precise), in a basic Unix environment: a C/C++ compiler (GCC, to be precise),
the Bash shell, fundamental Unix tools such as `cp`, `grep`, `tar`, the Bash shell, fundamental Unix tools such as `cp`, `grep`, `tar`,

View file

@ -17,12 +17,12 @@ order of precedence (from strongest to weakest binding).
| String Concatenation | *string1* `+` *string2* | left | String concatenation. | 7 | | String Concatenation | *string1* `+` *string2* | left | String concatenation. | 7 |
| Not | `!` *e* | none | Boolean negation. | 8 | | Not | `!` *e* | none | Boolean negation. | 8 |
| Update | *e1* `//` *e2* | right | Return a set consisting of the attributes in *e1* and *e2* (with the latter taking precedence over the former in case of equally named attributes). | 9 | | Update | *e1* `//` *e2* | right | Return a set consisting of the attributes in *e1* and *e2* (with the latter taking precedence over the former in case of equally named attributes). | 9 |
| Less Than | *e1* `<` *e2*, | none | Arithmetic comparison. | 10 | | Less Than | *e1* `<` *e2*, | none | Arithmetic/lexicographic comparison. | 10 |
| Less Than or Equal To | *e1* `<=` *e2* | none | Arithmetic comparison. | 10 | | Less Than or Equal To | *e1* `<=` *e2* | none | Arithmetic/lexicographic comparison. | 10 |
| Greater Than | *e1* `>` *e2* | none | Arithmetic comparison. | 10 | | Greater Than | *e1* `>` *e2* | none | Arithmetic/lexicographic comparison. | 10 |
| Greater Than or Equal To | *e1* `>=` *e2* | none | Arithmetic comparison. | 10 | | Greater Than or Equal To | *e1* `>=` *e2* | none | Arithmetic/lexicographic comparison. | 10 |
| Equality | *e1* `==` *e2* | none | Equality. | 11 | | Equality | *e1* `==` *e2* | none | Equality. | 11 |
| Inequality | *e1* `!=` *e2* | none | Inequality. | 11 | | Inequality | *e1* `!=` *e2* | none | Inequality. | 11 |
| Logical AND | *e1* `&&` *e2* | left | Logical AND. | 12 | | Logical AND | *e1* `&&` *e2* | left | Logical AND. | 12 |
| Logical OR | *e1* `\|\|` *e2* | left | Logical OR. | 13 | | Logical OR | *e1* <code>&#124;&#124;</code> *e2* | left | Logical OR. | 13 |
| Logical Implication | *e1* `->` *e2* | none | Logical implication (equivalent to `!e1 \|\| e2`). | 14 | | Logical Implication | *e1* `->` *e2* | none | Logical implication (equivalent to <code>!e1 &#124;&#124; e2</code>). | 14 |

View file

@ -64,7 +64,7 @@ Nix has the following basic data types:
the start of each line. To be precise, it strips from each line a the start of each line. To be precise, it strips from each line a
number of spaces equal to the minimal indentation of the string as a number of spaces equal to the minimal indentation of the string as a
whole (disregarding the indentation of empty lines). For instance, whole (disregarding the indentation of empty lines). For instance,
the first and second line are indented two space, while the third the first and second line are indented two spaces, while the third
line is indented four spaces. Thus, two spaces are stripped from line is indented four spaces. Thus, two spaces are stripped from
each line, so the resulting string is each line, so the resulting string is
@ -139,6 +139,13 @@ Nix has the following basic data types:
environment variable `NIX_PATH` will be searched for the given file environment variable `NIX_PATH` will be searched for the given file
or directory name. or directory name.
Antiquotation is supported in any paths except those in angle brackets.
`./${foo}-${bar}.nix` is a more convenient way of writing
`./. + "/" + foo + "-" + bar + ".nix"` or `./. + "/${foo}-${bar}.nix"`. At
least one slash must appear *before* any antiquotations for this to be
recognized as a path. `a.${foo}/b.${bar}` is a syntactically valid division
operation. `./a.${foo}/b.${bar}` is a path.
- *Booleans* with values `true` and `false`. - *Booleans* with values `true` and `false`.
- The null value, denoted as `null`. - The null value, denoted as `null`.

View file

@ -1,6 +1,6 @@
# Building and Testing # Building and Testing
You can now try to build Hello. Of course, you could do `nix-env -i You can now try to build Hello. Of course, you could do `nix-env -f . -iA
hello`, but you may not want to install a possibly broken package just hello`, but you may not want to install a possibly broken package just
yet. The best way to test the package is by using the command yet. The best way to test the package is by using the command
`nix-build`, which builds a Nix expression and creates a symlink named `nix-build`, which builds a Nix expression and creates a symlink named

View file

@ -1,9 +1,9 @@
# Building Nix from Source # Building Nix from Source
After unpacking or checking out the Nix sources, issue the following After cloning Nix's Git repository, issue the following commands:
commands:
```console ```console
$ ./bootstrap.sh
$ ./configure options... $ ./configure options...
$ make $ make
$ make install $ make install
@ -11,13 +11,6 @@ $ make install
Nix requires GNU Make so you may need to invoke `gmake` instead. Nix requires GNU Make so you may need to invoke `gmake` instead.
When building from the Git repository, these should be preceded by the
command:
```console
$ ./bootstrap.sh
```
The installation path can be specified by passing the `--prefix=prefix` The installation path can be specified by passing the `--prefix=prefix`
to `configure`. The default installation directory is `/usr/local`. You to `configure`. The default installation directory is `/usr/local`. You
can change this to any location you like. You must have write permission can change this to any location you like. You must have write permission

View file

@ -40,7 +40,7 @@ export NIX_SSL_CERT_FILE=/etc/ssl/my-certificate-bundle.crt
> **Note** > **Note**
> >
> You must not add the export and then do the install, as the Nix > You must not add the export and then do the install, as the Nix
> installer will detect the presense of Nix configuration, and abort. > installer will detect the presence of Nix configuration, and abort.
## `NIX_SSL_CERT_FILE` with macOS and the Nix daemon ## `NIX_SSL_CERT_FILE` with macOS and the Nix daemon

View file

@ -1,18 +1,26 @@
# Installing a Binary Distribution # Installing a Binary Distribution
If you are using Linux or macOS versions up to 10.14 (Mojave), the The easiest way to install Nix is to run the following command:
easiest way to install Nix is to run the following command:
```console ```console
$ sh <(curl -L https://nixos.org/nix/install) $ sh <(curl -L https://nixos.org/nix/install)
``` ```
If you're using macOS 10.15 (Catalina) or newer, consult [the macOS This will run the installer interactively (causing it to explain what
installation instructions](#macos-installation) before installing. it is doing more explicitly), and perform the default "type" of install
for your platform:
- single-user on Linux
- multi-user on macOS
As of Nix 2.1.0, the Nix installer will always default to creating a > **Notes on read-only filesystem root in macOS 10.15 Catalina +**
single-user installation, however opting in to the multi-user >
installation is highly recommended. > - It took some time to support this cleanly. You may see posts,
> examples, and tutorials using obsolete workarounds.
> - Supporting it cleanly made macOS installs too complex to qualify
> as single-user, so this type is no longer supported on macOS.
We recommend the multi-user install if it supports your platform and
you can authenticate with `sudo`.
# Single User Installation # Single User Installation
@ -50,9 +58,9 @@ $ rm -rf /nix
The multi-user Nix installation creates system users, and a system The multi-user Nix installation creates system users, and a system
service for the Nix daemon. service for the Nix daemon.
- Linux running systemd, with SELinux disabled **Supported Systems**
- Linux running systemd, with SELinux disabled
- macOS - macOS
You can instruct the installer to perform a multi-user installation on You can instruct the installer to perform a multi-user installation on
your system: your system:
@ -96,165 +104,28 @@ sudo rm /Library/LaunchDaemons/org.nixos.nix-daemon.plist
There may also be references to Nix in `/etc/profile`, `/etc/bashrc`, There may also be references to Nix in `/etc/profile`, `/etc/bashrc`,
and `/etc/zshrc` which you may remove. and `/etc/zshrc` which you may remove.
# macOS Installation # macOS Installation <a name="sect-macos-installation-change-store-prefix"></a><a name="sect-macos-installation-encrypted-volume"></a><a name="sect-macos-installation-symlink"></a><a name="sect-macos-installation-recommended-notes"></a>
<!-- Note: anchors above to catch permalinks to old explanations -->
Starting with macOS 10.15 (Catalina), the root filesystem is read-only. We believe we have ironed out how to cleanly support the read-only root
This means `/nix` can no longer live on your system volume, and that on modern macOS. New installs will do this automatically, and you can
you'll need a workaround to install Nix. also re-run a new installer to convert your existing setup.
The recommended approach, which creates an unencrypted APFS volume for This section previously detailed the situation, options, and trade-offs,
your Nix store and a "synthetic" empty directory to mount it over at but it now only outlines what the installer does. You don't need to know
`/nix`, is least likely to impair Nix or your system. this to run the installer, but it may help if you run into trouble:
> **Note** - create a new APFS volume for your Nix store
> - update `/etc/synthetic.conf` to direct macOS to create a "synthetic"
> With all separate-volume approaches, it's possible something on your empty root directory to mount your volume
> system (particularly daemons/services and restored apps) may need - specify mount options for the volume in `/etc/fstab`
> access to your Nix store before the volume is mounted. Adding - if you have FileVault enabled
> additional encryption makes this more likely. - generate an encryption password
- put it in your system Keychain
If you're using a recent Mac with a [T2 - use it to encrypt the volume
chip](https://www.apple.com/euro/mac/shared/docs/Apple_T2_Security_Chip_Overview.pdf), - create a system LaunchDaemon to mount this volume early enough in the
your drive will still be encrypted at rest (in which case "unencrypted" boot process to avoid problems loading or restoring any programs that
is a bit of a misnomer). To use this approach, just install Nix with: need access to your Nix store
```console
$ sh <(curl -L https://nixos.org/nix/install) --darwin-use-unencrypted-nix-store-volume
```
If you don't like the sound of this, you'll want to weigh the other
approaches and tradeoffs detailed in this section.
> **Note**
>
> All of the known workarounds have drawbacks, but we hope better
> solutions will be available in the future. Some that we have our eye
> on are:
>
> 1. A true firmlink would enable the Nix store to live on the primary
> data volume without the build problems caused by the symlink
> approach. End users cannot currently create true firmlinks.
>
> 2. If the Nix store volume shared FileVault encryption with the
> primary data volume (probably by using the same volume group and
> role), FileVault encryption could be easily supported by the
> installer without requiring manual setup by each user.
## Change the Nix store path prefix
Changing the default prefix for the Nix store is a simple approach which
enables you to leave it on your root volume, where it can take full
advantage of FileVault encryption if enabled. Unfortunately, this
approach also opts your device out of some benefits that are enabled by
using the same prefix across systems:
- Your system won't be able to take advantage of the binary cache
(unless someone is able to stand up and support duplicate caching
infrastructure), which means you'll spend more time waiting for
builds.
- It's harder to build and deploy packages to Linux systems.
It would also possible (and often requested) to just apply this change
ecosystem-wide, but it's an intrusive process that has side effects we
want to avoid for now.
## Use a separate encrypted volume
If you like, you can also add encryption to the recommended approach
taken by the installer. You can do this by pre-creating an encrypted
volume before you run the installer--or you can run the installer and
encrypt the volume it creates later.
In either case, adding encryption to a second volume isn't quite as
simple as enabling FileVault for your boot volume. Before you dive in,
there are a few things to weigh:
1. The additional volume won't be encrypted with your existing
FileVault key, so you'll need another mechanism to decrypt the
volume.
2. You can store the password in Keychain to automatically decrypt the
volume on boot--but it'll have to wait on Keychain and may not mount
before your GUI apps restore. If any of your launchd agents or apps
depend on Nix-installed software (for example, if you use a
Nix-installed login shell), the restore may fail or break.
On a case-by-case basis, you may be able to work around this problem
by using `wait4path` to block execution until your executable is
available.
It's also possible to decrypt and mount the volume earlier with a
login hook--but this mechanism appears to be deprecated and its
future is unclear.
3. You can hard-code the password in the clear, so that your store
volume can be decrypted before Keychain is available.
If you are comfortable navigating these tradeoffs, you can encrypt the
volume with something along the lines of:
```console
$ diskutil apfs enableFileVault /nix -user disk
```
## Symlink the Nix store to a custom location
Another simple approach is using `/etc/synthetic.conf` to symlink the
Nix store to the data volume. This option also enables your store to
share any configured FileVault encryption. Unfortunately, builds that
resolve the symlink may leak the canonical path or even fail.
Because of these downsides, we can't recommend this approach.
## Notes on the recommended approach
This section goes into a little more detail on the recommended approach.
You don't need to understand it to run the installer, but it can serve
as a helpful reference if you run into trouble.
1. In order to compose user-writable locations into the new read-only
system root, Apple introduced a new concept called `firmlinks`,
which it describes as a "bi-directional wormhole" between two
filesystems. You can see the current firmlinks in
`/usr/share/firmlinks`. Unfortunately, firmlinks aren't (currently?)
user-configurable.
For special cases like NFS mount points or package manager roots,
[synthetic.conf(5)](https://developer.apple.com/library/archive/documentation/System/Conceptual/ManPages_iPhoneOS/man5/synthetic.conf.5.html)
supports limited user-controlled file-creation (of symlinks, and
synthetic empty directories) at `/`. To create a synthetic empty
directory for mounting at `/nix`, add the following line to
`/etc/synthetic.conf` (create it if necessary):
nix
2. This configuration is applied at boot time, but you can use
`apfs.util` to trigger creation (not deletion) of new entries
without a reboot:
```console
$ /System/Library/Filesystems/apfs.fs/Contents/Resources/apfs.util -B
```
3. Create the new APFS volume with diskutil:
```console
$ sudo diskutil apfs addVolume diskX APFS 'Nix Store' -mountpoint /nix
```
4. Using `vifs`, add the new mount to `/etc/fstab`. If it doesn't
already have other entries, it should look something like:
#
# Warning - this file should only be modified with vifs(8)
#
# Failure to do so is unsupported and may be destructive.
#
LABEL=Nix\040Store /nix apfs rw,nobrowse
The nobrowse setting will keep Spotlight from indexing this volume,
and keep it from showing up on your desktop.
# Installing a pinned Nix version from a URL # Installing a pinned Nix version from a URL

View file

@ -0,0 +1,59 @@
# Using Nix within Docker
To run the latest stable release of Nix with Docker run the following command:
```console
$ docker -ti run nixos/nix
Unable to find image 'nixos/nix:latest' locally
latest: Pulling from nixos/nix
5843afab3874: Pull complete
b52bf13f109c: Pull complete
1e2415612aa3: Pull complete
Digest: sha256:27f6e7f60227e959ee7ece361f75d4844a40e1cc6878b6868fe30140420031ff
Status: Downloaded newer image for nixos/nix:latest
35ca4ada6e96:/# nix --version
nix (Nix) 2.3.12
35ca4ada6e96:/# exit
```
# What is included in Nix' Docker image?
The official Docker image is created using `pkgs.dockerTools.buildLayeredImage`
(and not with `Dockerfile` as it is usual with Docker images). You can still
base your custom Docker image on it as you would do with any other Docker
image.
The Docker image is also not based on any other image and includes minimal set
of runtime dependencies that are required to use Nix:
- pkgs.nix
- pkgs.bashInteractive
- pkgs.coreutils-full
- pkgs.gnutar
- pkgs.gzip
- pkgs.gnugrep
- pkgs.which
- pkgs.curl
- pkgs.less
- pkgs.wget
- pkgs.man
- pkgs.cacert.out
- pkgs.findutils
# Docker image with the latest development version of Nix
To get the latest image that was built by [Hydra](https://hydra.nixos.org) run
the following command:
```console
$ curl -L https://hydra.nixos.org/job/nix/master/dockerImage.x86_64-linux/latest/download/1 | docker load
$ docker run -ti nix:2.5pre20211105
```
You can also build a Docker image from source yourself:
```console
$ nix build ./\#hydraJobs.dockerImage.x86_64-linux
$ docker load -i ./result
$ docker run -ti nix:2.5pre20211105
```

View file

@ -1,4 +1,4 @@
# Installing Nix from Source # Installing Nix from Source
If no binary package is available, you can download and compile a source If no binary package is available or if you want to hack on Nix, you
distribution. can build Nix from its Git repository.

View file

@ -1,14 +1,9 @@
# Obtaining a Source Distribution # Obtaining the Source
The source tarball of the most recent stable release can be downloaded The most recent sources of Nix can be obtained from its [Git
from the [Nix homepage](http://nixos.org/nix/download.html). You can repository](https://github.com/NixOS/nix). For example, the following
also grab the [most recent development command will check out the latest revision into a directory called
release](http://hydra.nixos.org/job/nix/master/release/latest-finished#tabs-constituents). `nix`:
Alternatively, the most recent sources of Nix can be obtained from its
[Git repository](https://github.com/NixOS/nix). For example, the
following command will check out the latest revision into a directory
called `nix`:
```console ```console
$ git clone https://github.com/NixOS/nix $ git clone https://github.com/NixOS/nix

View file

@ -2,9 +2,8 @@
- GNU Autoconf (<https://www.gnu.org/software/autoconf/>) and the - GNU Autoconf (<https://www.gnu.org/software/autoconf/>) and the
autoconf-archive macro collection autoconf-archive macro collection
(<https://www.gnu.org/software/autoconf-archive/>). These are only (<https://www.gnu.org/software/autoconf-archive/>). These are
needed to run the bootstrap script, and are not necessary if your needed to run the bootstrap script.
source distribution came with a pre-built `./configure` script.
- GNU Make. - GNU Make.
@ -26,15 +25,6 @@
available for download from the official repository available for download from the official repository
<https://github.com/google/brotli>. <https://github.com/google/brotli>.
- The bzip2 compressor program and the `libbz2` library. Thus you must
have bzip2 installed, including development headers and libraries.
If your distribution does not provide these, you can obtain bzip2
from
<https://sourceware.org/bzip2/>.
- `liblzma`, which is provided by XZ Utils. If your distribution does
not provide this, you can get it from <https://tukaani.org/xz/>.
- cURL and its library. If your distribution does not provide it, you - cURL and its library. If your distribution does not provide it, you
can get it from <https://curl.haxx.se/>. can get it from <https://curl.haxx.se/>.
@ -61,8 +51,7 @@
you need version 2.5.35, which is available on you need version 2.5.35, which is available on
[SourceForge](http://lex.sourceforge.net/). Slightly older versions [SourceForge](http://lex.sourceforge.net/). Slightly older versions
may also work, but ancient versions like the ubiquitous 2.5.4a may also work, but ancient versions like the ubiquitous 2.5.4a
won't. Note that these are only required if you modify the parser or won't.
when you are building from the Git repository.
- The `libseccomp` is used to provide syscall filtering on Linux. This - The `libseccomp` is used to provide syscall filtering on Linux. This
is an optional dependency and can be disabled passing a is an optional dependency and can be disabled passing a

View file

@ -76,7 +76,7 @@ there after an upgrade. This means that you can _roll back_ to the
old version: old version:
```console ```console
$ nix-env --upgrade some-packages $ nix-env --upgrade -A nixpkgs.some-package
$ nix-env --rollback $ nix-env --rollback
``` ```
@ -122,12 +122,12 @@ Nix expressions generally describe how to build a package from
source, so an installation action like source, so an installation action like
```console ```console
$ nix-env --install firefox $ nix-env --install -A nixpkgs.firefox
``` ```
_could_ cause quite a bit of build activity, as not only Firefox but _could_ cause quite a bit of build activity, as not only Firefox but
also all its dependencies (all the way up to the C library and the also all its dependencies (all the way up to the C library and the
compiler) would have to built, at least if they are not already in the compiler) would have to be built, at least if they are not already in the
Nix store. This is a _source deployment model_. For most users, Nix store. This is a _source deployment model_. For most users,
building from source is not very pleasant as it takes far too long. building from source is not very pleasant as it takes far too long.
However, Nix can automatically skip building from source and instead However, Nix can automatically skip building from source and instead

View file

@ -24,7 +24,7 @@ collection; you could write your own Nix expressions based on Nixpkgs,
or completely new ones.) or completely new ones.)
You can manually download the latest version of Nixpkgs from You can manually download the latest version of Nixpkgs from
<http://nixos.org/nixpkgs/download.html>. However, its much more <https://github.com/NixOS/nixpkgs>. However, its much more
convenient to use the Nixpkgs [*channel*](channels.md), since it makes convenient to use the Nixpkgs [*channel*](channels.md), since it makes
it easy to stay up to date with new versions of Nixpkgs. Nixpkgs is it easy to stay up to date with new versions of Nixpkgs. Nixpkgs is
automatically added to your list of “subscribed” channels when you automatically added to your list of “subscribed” channels when you
@ -47,41 +47,45 @@ $ nix-channel --update
You can view the set of available packages in Nixpkgs: You can view the set of available packages in Nixpkgs:
```console ```console
$ nix-env -qa $ nix-env -qaP
aterm-2.2 nixpkgs.aterm aterm-2.2
bash-3.0 nixpkgs.bash bash-3.0
binutils-2.15 nixpkgs.binutils binutils-2.15
bison-1.875d nixpkgs.bison bison-1.875d
blackdown-1.4.2 nixpkgs.blackdown blackdown-1.4.2
bzip2-1.0.2 nixpkgs.bzip2 bzip2-1.0.2
``` ```
The flag `-q` specifies a query operation, and `-a` means that you want The flag `-q` specifies a query operation, `-a` means that you want
to show the “available” (i.e., installable) packages, as opposed to the to show the “available” (i.e., installable) packages, as opposed to the
installed packages. If you downloaded Nixpkgs yourself, or if you installed packages, and `-P` prints the attribute paths that can be used
checked it out from GitHub, then you need to pass the path to your to unambiguously select a package for installation (listed in the first column).
Nixpkgs tree using the `-f` flag: If you downloaded Nixpkgs yourself, or if you checked it out from GitHub,
then you need to pass the path to your Nixpkgs tree using the `-f` flag:
```console ```console
$ nix-env -qaf /path/to/nixpkgs $ nix-env -qaPf /path/to/nixpkgs
aterm aterm-2.2
bash bash-3.0
``` ```
where */path/to/nixpkgs* is where youve unpacked or checked out where */path/to/nixpkgs* is where youve unpacked or checked out
Nixpkgs. Nixpkgs.
You can select specific packages by name: You can filter the packages by name:
```console ```console
$ nix-env -qa firefox $ nix-env -qaP firefox
firefox-34.0.5 nixpkgs.firefox-esr firefox-91.3.0esr
firefox-with-plugins-34.0.5 nixpkgs.firefox firefox-94.0.1
``` ```
and using regular expressions: and using regular expressions:
```console ```console
$ nix-env -qa 'firefox.*' $ nix-env -qaP 'firefox.*'
``` ```
It is also possible to see the *status* of available packages, i.e., It is also possible to see the *status* of available packages, i.e.,
@ -89,11 +93,11 @@ whether they are installed into the user environment and/or present in
the system: the system:
```console ```console
$ nix-env -qas $ nix-env -qaPs
-PS bash-3.0 -PS nixpkgs.bash bash-3.0
--S binutils-2.15 --S nixpkgs.binutils binutils-2.15
IPS bison-1.875d IPS nixpkgs.bison bison-1.875d
``` ```
@ -106,13 +110,13 @@ which is Nixs mechanism for doing binary deployment. It just means that
Nix knows that it can fetch a pre-built package from somewhere Nix knows that it can fetch a pre-built package from somewhere
(typically a network server) instead of building it locally. (typically a network server) instead of building it locally.
You can install a package using `nix-env -i`. For instance, You can install a package using `nix-env -iA`. For instance,
```console ```console
$ nix-env -i subversion $ nix-env -iA nixpkgs.subversion
``` ```
will install the package called `subversion` (which is, of course, the will install the package called `subversion` from `nixpkgs` channel (which is, of course, the
[Subversion version management system](http://subversion.tigris.org/)). [Subversion version management system](http://subversion.tigris.org/)).
> **Note** > **Note**
@ -122,7 +126,7 @@ will install the package called `subversion` (which is, of course, the
> binary cache <https://cache.nixos.org>; it contains binaries for most > binary cache <https://cache.nixos.org>; it contains binaries for most
> packages in Nixpkgs. Only if no binary is available in the binary > packages in Nixpkgs. Only if no binary is available in the binary
> cache, Nix will build the package from source. So if `nix-env > cache, Nix will build the package from source. So if `nix-env
> -i subversion` results in Nix building stuff from source, then either > -iA nixpkgs.subversion` results in Nix building stuff from source, then either
> the package is not built for your platform by the Nixpkgs build > the package is not built for your platform by the Nixpkgs build
> servers, or your version of Nixpkgs is too old or too new. For > servers, or your version of Nixpkgs is too old or too new. For
> instance, if you have a very recent checkout of Nixpkgs, then the > instance, if you have a very recent checkout of Nixpkgs, then the
@ -133,7 +137,10 @@ will install the package called `subversion` (which is, of course, the
> using a Git checkout of the Nixpkgs tree), you will get binaries for > using a Git checkout of the Nixpkgs tree), you will get binaries for
> most packages. > most packages.
Naturally, packages can also be uninstalled: Naturally, packages can also be uninstalled. Unlike when installing, you will
need to use the derivation name (though the version part can be omitted),
instead of the attribute path, as `nix-env` does not record which attribute
was used for installing:
```console ```console
$ nix-env -e subversion $ nix-env -e subversion
@ -143,7 +150,7 @@ Upgrading to a new version is just as easy. If you have a new release of
Nix Packages, you can do: Nix Packages, you can do:
```console ```console
$ nix-env -u subversion $ nix-env -uA nixpkgs.subversion
``` ```
This will *only* upgrade Subversion if there is a “newer” version in the This will *only* upgrade Subversion if there is a “newer” version in the

View file

@ -9,7 +9,7 @@ The daemon that handles binary cache requests via HTTP, `nix-serve`, is
not part of the Nix distribution, but you can install it from Nixpkgs: not part of the Nix distribution, but you can install it from Nixpkgs:
```console ```console
$ nix-env -i nix-serve $ nix-env -iA nixpkgs.nix-serve
``` ```
You can then start the server, listening for HTTP connections on You can then start the server, listening for HTTP connections on
@ -35,7 +35,7 @@ On the client side, you can tell Nix to use your binary cache using
`--option extra-binary-caches`, e.g.: `--option extra-binary-caches`, e.g.:
```console ```console
$ nix-env -i firefox --option extra-binary-caches http://avalon:8080/ $ nix-env -iA nixpkgs.firefox --option extra-binary-caches http://avalon:8080/
``` ```
The option `extra-binary-caches` tells Nix to use this binary cache in The option `extra-binary-caches` tells Nix to use this binary cache in

View file

@ -44,7 +44,7 @@ collector as follows:
$ nix-store --gc $ nix-store --gc
``` ```
The behaviour of the gargage collector is affected by the The behaviour of the garbage collector is affected by the
`keep-derivations` (default: true) and `keep-outputs` (default: false) `keep-derivations` (default: true) and `keep-outputs` (default: false)
options in the Nix configuration file. The defaults will ensure that all options in the Nix configuration file. The defaults will ensure that all
derivations that are build-time dependencies of garbage collector roots derivations that are build-time dependencies of garbage collector roots

View file

@ -39,7 +39,7 @@ just Subversion 1.1.2 (arrows in the figure indicate symlinks). This
would be what we would obtain if we had done would be what we would obtain if we had done
```console ```console
$ nix-env -i subversion $ nix-env -iA nixpkgs.subversion
``` ```
on a set of Nix expressions that contained Subversion 1.1.2. on a set of Nix expressions that contained Subversion 1.1.2.
@ -54,7 +54,7 @@ environment is generated based on the current one. For instance,
generation 43 was created from generation 42 when we did generation 43 was created from generation 42 when we did
```console ```console
$ nix-env -i subversion firefox $ nix-env -iA nixpkgs.subversion nixpkgs.firefox
``` ```
on a set of Nix expressions that contained Firefox and a new version of on a set of Nix expressions that contained Firefox and a new version of
@ -127,7 +127,7 @@ All `nix-env` operations work on the profile pointed to by
(abbreviation `-p`): (abbreviation `-p`):
```console ```console
$ nix-env -p /nix/var/nix/profiles/other-profile -i subversion $ nix-env -p /nix/var/nix/profiles/other-profile -iA nixpkgs.subversion
``` ```
This will *not* change the `~/.nix-profile` symlink. This will *not* change the `~/.nix-profile` symlink.

View file

@ -6,7 +6,7 @@ automatically fetching any store paths in Firefoxs closure if they are
available on the server `avalon`: available on the server `avalon`:
```console ```console
$ nix-env -i firefox --substituters ssh://alice@avalon $ nix-env -iA nixpkgs.firefox --substituters ssh://alice@avalon
``` ```
This works similar to the binary cache substituter that Nix usually This works similar to the binary cache substituter that Nix usually

View file

@ -19,19 +19,19 @@ to subsequent chapters.
channel: channel:
```console ```console
$ nix-env -qa $ nix-env -qaP
docbook-xml-4.3 nixpkgs.docbook_xml_dtd_43 docbook-xml-4.3
docbook-xml-4.5 nixpkgs.docbook_xml_dtd_45 docbook-xml-4.5
firefox-33.0.2 nixpkgs.firefox firefox-33.0.2
hello-2.9 nixpkgs.hello hello-2.9
libxslt-1.1.28 nixpkgs.libxslt libxslt-1.1.28
``` ```
1. Install some packages from the channel: 1. Install some packages from the channel:
```console ```console
$ nix-env -i hello $ nix-env -iA nixpkgs.hello
``` ```
This should download pre-built packages; it should not build them This should download pre-built packages; it should not build them

View file

@ -1,8 +1,539 @@
# Release 2.4 (202X-XX-XX) # Release 2.4 (2021-11-01)
- It is now an error to modify the `plugin-files` setting via a This is the first release in more than two years and is the result of
command-line flag that appears after the first non-flag argument more than 2800 commits from 195 contributors since release 2.3.
to any command, including a subcommand to `nix`. For example,
`nix-instantiate default.nix --plugin-files ""` must now become ## Highlights
`nix-instantiate --plugin-files "" default.nix`.
- Plugins that add new `nix` subcommands are now actually respected. * Nix's **error messages** have been improved a lot. For instance,
evaluation errors now point out the location of the error:
```
$ nix build
error: undefined variable 'bzip3'
at /nix/store/449lv242z0zsgwv95a8124xi11sp419f-source/flake.nix:88:13:
87| [ curl
88| bzip3 xz brotli editline
| ^
89| openssl sqlite
```
* The **`nix` command** has seen a lot of work and is now almost at
feature parity with the old command-line interface (the `nix-*`
commands). It aims to be [more modern, consistent and pleasant to
use](../contributing/cli-guideline.md) than the old CLI. It is still
marked as experimental but its interface should not change much
anymore in future releases.
* **Flakes** are a new format to package Nix-based projects in a more
discoverable, composable, consistent and reproducible way. A flake
is just a repository or tarball containing a file named `flake.nix`
that specifies dependencies on other flakes and returns any Nix
assets such as packages, Nixpkgs overlays, NixOS modules or CI
tests. The new `nix` CLI is primarily based around flakes; for
example, a command like `nix run nixpkgs#hello` runs the `hello`
application from the `nixpkgs` flake.
Flakes are currently marked as experimental. For an introduction,
see [this blog
post](https://www.tweag.io/blog/2020-05-25-flakes/). For detailed
information about flake syntax and semantics, see the [`nix flake`
manual page](../command-ref/new-cli/nix3-flake.md).
* Nix's store can now be **content-addressed**, meaning that the hash
component of a store path is the hash of the path's
contents. Previously Nix could only build **input-addressed** store
paths, where the hash is computed from the derivation dependency
graph. Content-addressing allows deduplication, early cutoff in
build systems, and unprivileged closure copying. This is still [an
experimental
feature](https://discourse.nixos.org/t/content-addressed-nix-call-for-testers/12881).
* The Nix manual has been converted into Markdown, making it easier to
contribute. In addition, every `nix` subcommand now has a manual
page, documenting every option.
* A new setting that allows **experimental features** to be enabled
selectively. This allows us to merge unstable features into Nix more
quickly and do more frequent releases.
## Other features
* There are many new `nix` subcommands:
- `nix develop` is intended to replace `nix-shell`. It has a number
of new features:
* It automatically sets the output environment variables (such as
`$out`) to writable locations (such as `./outputs/out`).
* It can store the environment in a profile. This is useful for
offline work.
* It can run specific phases directly. For instance, `nix develop
--build` runs `buildPhase`.
- It allows dependencies in the Nix store to be "redirected" to
arbitrary directories using the `--redirect` flag. This is
useful if you want to hack on a package *and* some of its
dependencies at the same time.
- `nix print-dev-env` prints the environment variables and bash
functions defined by a derivation. This is useful for users of
other shells than bash (especially with `--json`).
- `nix shell` was previously named `nix run` and is intended to
replace `nix-shell -p`, but without the `stdenv` overhead. It
simply starts a shell where some packages have been added to
`$PATH`.
- `nix run` (not to be confused with the old subcommand that has
been renamed to `nix shell`) runs an "app", a flake output that
specifies a command to run, or an eponymous program from a
package. For example, `nix run nixpkgs#hello` runs the `hello`
program from the `hello` package in `nixpkgs`.
- `nix flake` is the container for flake-related operations, such as
creating a new flake, querying the contents of a flake or updating
flake lock files.
- `nix registry` allows you to query and update the flake registry,
which maps identifiers such as `nixpkgs` to concrete flake URLs.
- `nix profile` is intended to replace `nix-env`. Its main advantage
is that it keeps track of the provenance of installed packages
(e.g. exactly which flake version a package came from). It also
has some helpful subcommands:
* `nix profile history` shows what packages were added, upgraded
or removed between each version of a profile.
* `nix profile diff-closures` shows the changes between the
closures of each version of a profile. This allows you to
discover the addition or removal of dependencies or size
changes.
**Warning**: after a profile has been updated using `nix profile`,
it is no longer usable with `nix-env`.
- `nix store diff-closures` shows the differences between the
closures of two store paths in terms of the versions and sizes of
dependencies in the closures.
- `nix store make-content-addressable` rewrites an arbitrary closure
to make it content-addressed. Such paths can be copied into other
stores without requiring signatures.
- `nix bundle` uses the [`nix-bundle`
program](https://github.com/matthewbauer/nix-bundle) to convert a
closure into a self-extracting executable.
- Various other replacements for the old CLI, e.g. `nix store gc`,
`nix store delete`, `nix store repair`, `nix nar dump-path`, `nix
store prefetch-file`, `nix store prefetch-tarball`, `nix key` and
`nix daemon`.
* Nix now has an **evaluation cache** for flake outputs. For example,
a second invocation of the command `nix run nixpkgs#firefox` will
not need to evaluate the `firefox` attribute because it's already in
the evaluation cache. This is made possible by the hermetic
evaluation model of flakes.
* The new `--offline` flag disables substituters and causes all
locally cached tarballs and repositories to be considered
up-to-date.
* The new `--refresh` flag causes all locally cached tarballs and
repositories to be considered out-of-date.
* Many `nix` subcommands now have a `--json` option to produce
machine-readable output.
* `nix repl` has a new `:doc` command to show documentation about
builtin functions (e.g. `:doc builtins.map`).
* Binary cache stores now have an option `index-debug-info` to create
an index of DWARF debuginfo files for use by
[`dwarffs`](https://github.com/edolstra/dwarffs).
* To support flakes, Nix now has an extensible mechanism for fetching
source trees. Currently it has the following backends:
* Git repositories
* Mercurial repositories
* GitHub and GitLab repositories (an optimisation for faster
fetching than Git)
* Tarballs
* Arbitrary directories
The fetcher infrastructure is exposed via flake input specifications
and via the `fetchTree` built-in.
* **Languages changes**: the only new language feature is that you can
now have antiquotations in paths, e.g. `./${foo}` instead of `./. +
foo`.
* **New built-in functions**:
- `builtins.fetchTree` allows fetching a source tree using any
backends supported by the fetcher infrastructure. It subsumes the
functionality of existing built-ins like `fetchGit`,
`fetchMercurial` and `fetchTarball`.
- `builtins.getFlake` fetches a flake and returns its output
attributes. This function should not be used inside flakes! Use
flake inputs instead.
- `builtins.floor` and `builtins.ceil` round a floating-point number
down and up, respectively.
* Experimental support for recursive Nix. This means that Nix
derivations can now call Nix to build other derivations. This is not
in a stable state yet and not well
[documented](https://github.com/NixOS/nix/commit/c4d7c76b641d82b2696fef73ce0ac160043c18da).
* The new experimental feature `no-url-literals` disables URL
literals. This helps to implement [RFC
45](https://github.com/NixOS/rfcs/pull/45).
* Nix now uses `libarchive` to decompress and unpack tarballs and zip
files, so `tar` is no longer required.
* The priority of substituters can now be overridden using the
`priority` substituter setting (e.g. `--substituters
'http://cache.nixos.org?priority=100 daemon?priority=10'`).
* `nix edit` now supports non-derivation attributes, e.g. `nix edit
.#nixosConfigurations.bla`.
* The `nix` command now provides command line completion for `bash`,
`zsh` and `fish`. Since the support for getting completions is built
into `nix`, it's easy to add support for other shells.
* The new `--log-format` flag selects what Nix's output looks like. It
defaults to a terse progress indicator. There is a new
`internal-json` output format for use by other programs.
* `nix eval` has a new `--apply` flag that applies a function to the
evaluation result.
* `nix eval` has a new `--write-to` flag that allows it to write a
nested attribute set of string leaves to a corresponding directory
tree.
* Memory improvements: many operations that add paths to the store or
copy paths between stores now run in constant memory.
* Many `nix` commands now support the flag `--derivation` to operate
on a `.drv` file itself instead of its outputs.
* There is a new store called `dummy://` that does not support
building or adding paths. This is useful if you want to use the Nix
evaluator but don't have a Nix store.
* The `ssh-ng://` store now allows substituting paths on the remote,
as `ssh://` already did.
* When auto-calling a function with an ellipsis, all arguments are now
passed.
* New `nix-shell` features:
- It preserves the `PS1` environment variable if
`NIX_SHELL_PRESERVE_PROMPT` is set.
- With `-p`, it passes any `--arg`s as Nixpkgs arguments.
- Support for structured attributes.
* `nix-prefetch-url` has a new `--executable` flag.
* On `x86_64` systems, [`x86_64` microarchitecture
levels](https://lwn.net/Articles/844831/) are mapped to additional
system types (e.g. `x86_64-v1-linux`).
* The new `--eval-store` flag allows you to use a different store for
evaluation than for building or storing the build result. This is
primarily useful when you want to query whether something exists in
a read-only store, such as a binary cache:
```
# nix path-info --json --store https://cache.nixos.org \
--eval-store auto nixpkgs#hello
```
(Here `auto` indicates the local store.)
* The Nix daemon has a new low-latency mechanism for copying
closures. This is useful when building on remote stores such as
`ssh-ng://`.
* Plugins can now register `nix` subcommands.
## Incompatible changes
* The `nix` command is now marked as an experimental feature. This
means that you need to add
```
experimental-features = nix-command
```
to your `nix.conf` if you want to use it, or pass
`--extra-experimental-features nix-command` on the command line.
* The `nix` command no longer has a syntax for referring to packages
in a channel. This means that the following no longer works:
```console
nix build nixpkgs.hello # Nix 2.3
```
Instead, you can either use the `#` syntax to select a package from
a flake, e.g.
```console
nix build nixpkgs#hello
```
Or, if you want to use the `nixpkgs` channel in the `NIX_PATH`
environment variable:
```console
nix build -f '<nixpkgs>' hello
```
* The old `nix run` has been renamed to `nix shell`, while there is a
new `nix run` that runs a default command. So instead of
```console
nix run nixpkgs.hello -c hello # Nix 2.3
```
you should use
```console
nix shell nixpkgs#hello -c hello
```
or just
```console
nix run nixpkgs#hello
```
if the command you want to run has the same name as the package.
* It is now an error to modify the `plugin-files` setting via a
command-line flag that appears after the first non-flag argument to
any command, including a subcommand to `nix`. For example,
`nix-instantiate default.nix --plugin-files ""` must now become
`nix-instantiate --plugin-files "" default.nix`.
* We no longer release source tarballs. If you want to build from
source, please build from the tags in the Git repository.
## Contributors
This release has contributions from
Adam Höse,
Albert Safin,
Alex Kovar,
Alex Zero,
Alexander Bantyev,
Alexandre Esteves,
Alyssa Ross,
Anatole Lucet,
Anders Kaseorg,
Andreas Rammhold,
Antoine Eiche,
Antoine Martin,
Arnout Engelen,
Arthur Gautier,
aszlig,
Ben Burdette,
Benjamin Hipple,
Bernardo Meurer,
Björn Gohla,
Bjørn Forsman,
Bob van der Linden,
Brian Leung,
Brian McKenna,
Brian Wignall,
Bruce Toll,
Bryan Richter,
Calle Rosenquist,
Calvin Loncaric,
Carlo Nucera,
Carlos D'Agostino,
Chaz Schlarp,
Christian Höppner,
Christian Kampka,
Chua Hou,
Chuck,
Cole Helbling,
Daiderd Jordan,
Dan Callahan,
Dani,
Daniel Fitzpatrick,
Danila Fedorin,
Daniël de Kok,
Danny Bautista,
DavHau,
David McFarland,
Dima,
Domen Kožar,
Dominik Schrempf,
Dominique Martinet,
dramforever,
Dustin DeWeese,
edef,
Eelco Dolstra,
Emilio Karakey,
Emily,
Eric Culp,
Ersin Akinci,
Fabian Möller,
Farid Zakaria,
Federico Pellegrin,
Finn Behrens,
Florian Franzen,
Félix Baylac-Jacqué,
Gabriel Gonzalez,
Geoff Reedy,
Georges Dubus,
Graham Christensen,
Greg Hale,
Greg Price,
Gregor Kleen,
Gregory Hale,
Griffin Smith,
Guillaume Bouchard,
Harald van Dijk,
illustris,
Ivan Zvonimir Horvat,
Jade,
Jake Waksbaum,
jakobrs,
James Ottaway,
Jan Tojnar,
Janne Heß,
Jaroslavas Pocepko,
Jarrett Keifer,
Jeremy Schlatter,
Joachim Breitner,
Joe Hermaszewski,
Joe Pea,
John Ericson,
Jonathan Ringer,
Josef Kemetmüller,
Joseph Lucas,
Jude Taylor,
Julian Stecklina,
Julien Tanguy,
Jörg Thalheim,
Kai Wohlfahrt,
keke,
Keshav Kini,
Kevin Quick,
Kevin Stock,
Kjetil Orbekk,
Krzysztof Gogolewski,
kvtb,
Lars Mühmel,
Leonhard Markert,
Lily Ballard,
Linus Heckemann,
Lorenzo Manacorda,
Lucas Desgouilles,
Lucas Franceschino,
Lucas Hoffmann,
Luke Granger-Brown,
Madeline Haraj,
Marwan Aljubeh,
Mat Marini,
Mateusz Piotrowski,
Matthew Bauer,
Matthew Kenigsberg,
Mauricio Scheffer,
Maximilian Bosch,
Michael Adler,
Michael Bishop,
Michael Fellinger,
Michael Forney,
Michael Reilly,
mlatus,
Mykola Orliuk,
Nathan van Doorn,
Naïm Favier,
ng0,
Nick Van den Broeck,
Nicolas Stig124 Formichella,
Niels Egberts,
Niklas Hambüchen,
Nikola Knezevic,
oxalica,
p01arst0rm,
Pamplemousse,
Patrick Hilhorst,
Paul Opiyo,
Pavol Rusnak,
Peter Kolloch,
Philipp Bartsch,
Philipp Middendorf,
Piotr Szubiakowski,
Profpatsch,
Puck Meerburg,
Ricardo M. Correia,
Rickard Nilsson,
Robert Hensing,
Robin Gloster,
Rodrigo,
Rok Garbas,
Ronnie Ebrin,
Rovanion Luckey,
Ryan Burns,
Ryan Mulligan,
Ryne Everett,
Sam Doshi,
Sam Lidder,
Samir Talwar,
Samuel Dionne-Riel,
Sebastian Ullrich,
Sergei Trofimovich,
Sevan Janiyan,
Shao Cheng,
Shea Levy,
Silvan Mosberger,
Stefan Frijters,
Stefan Jaax,
sternenseemann,
Steven Shaw,
Stéphan Kochen,
SuperSandro2000,
Suraj Barkale,
Taeer Bar-Yam,
Thomas Churchman,
Théophane Hufschmitt,
Timothy DeHerrera,
Timothy Klim,
Tobias Möst,
Tobias Pflug,
Tom Bereknyei,
Travis A. Everett,
Ujjwal Jain,
Vladimír Čunát,
Wil Taylor,
Will Dietz,
Yaroslav Bolyukin,
Yestin L. Harrison,
YI,
Yorick van Pelt,
Yuriy Taraday and
zimbatm.

View file

@ -0,0 +1,7 @@
# Release 2.5 (2021-XX-XX)
* Binary cache stores now have a setting `compression-level`.
* `nix develop` now has a flag `--unpack` to run `unpackPhase`.
* Lists can now be compared lexicographically using the `<` operator.

251
docker.nix Normal file
View file

@ -0,0 +1,251 @@
{ pkgs ? import <nixpkgs> { }
, lib ? pkgs.lib
, name ? "nix"
, tag ? "latest"
, channelName ? "nixpkgs"
, channelURL ? "https://nixos.org/channels/nixpkgs-unstable"
}:
let
defaultPkgs = with pkgs; [
nix
bashInteractive
coreutils-full
gnutar
gzip
gnugrep
which
curl
less
wget
man
cacert.out
findutils
];
users = {
root = {
uid = 0;
shell = "/bin/bash";
home = "/root";
gid = 0;
};
} // lib.listToAttrs (
map
(
n: {
name = "nixbld${toString n}";
value = {
uid = 30000 + n;
gid = 30000;
groups = [ "nixbld" ];
description = "Nix build user ${toString n}";
};
}
)
(lib.lists.range 1 32)
);
groups = {
root.gid = 0;
nixbld.gid = 30000;
};
userToPasswd = (
k:
{ uid
, gid ? 65534
, home ? "/var/empty"
, description ? ""
, shell ? "/bin/false"
, groups ? [ ]
}: "${k}:x:${toString uid}:${toString gid}:${description}:${home}:${shell}"
);
passwdContents = (
lib.concatStringsSep "\n"
(lib.attrValues (lib.mapAttrs userToPasswd users))
);
userToShadow = k: { ... }: "${k}:!:1::::::";
shadowContents = (
lib.concatStringsSep "\n"
(lib.attrValues (lib.mapAttrs userToShadow users))
);
# Map groups to members
# {
# group = [ "user1" "user2" ];
# }
groupMemberMap = (
let
# Create a flat list of user/group mappings
mappings = (
builtins.foldl'
(
acc: user:
let
groups = users.${user}.groups or [ ];
in
acc ++ map
(group: {
inherit user group;
})
groups
)
[ ]
(lib.attrNames users)
);
in
(
builtins.foldl'
(
acc: v: acc // {
${v.group} = acc.${v.group} or [ ] ++ [ v.user ];
}
)
{ }
mappings)
);
groupToGroup = k: { gid }:
let
members = groupMemberMap.${k} or [ ];
in
"${k}:x:${toString gid}:${lib.concatStringsSep "," members}";
groupContents = (
lib.concatStringsSep "\n"
(lib.attrValues (lib.mapAttrs groupToGroup groups))
);
nixConf = {
sandbox = "false";
build-users-group = "nixbld";
trusted-public-keys = "cache.nixos.org-1:6NCHdD59X431o0gWypbMrAURkbJ16ZPMQFGspcDShjY=";
};
nixConfContents = (lib.concatStringsSep "\n" (lib.mapAttrsFlatten (n: v: "${n} = ${v}") nixConf)) + "\n";
baseSystem =
let
nixpkgs = pkgs.path;
channel = pkgs.runCommand "channel-nixos" { } ''
mkdir $out
ln -s ${nixpkgs} $out/nixpkgs
echo "[]" > $out/manifest.nix
'';
rootEnv = pkgs.buildPackages.buildEnv {
name = "root-profile-env";
paths = defaultPkgs;
};
profile = pkgs.buildPackages.runCommand "user-environment" { } ''
mkdir $out
cp -a ${rootEnv}/* $out/
cat > $out/manifest.nix <<EOF
[
${lib.concatStringsSep "\n" (builtins.map (drv: let
outputs = drv.outputsToInstall or [ "out" ];
in ''
{
${lib.concatStringsSep "\n" (builtins.map (output: ''
${output} = { outPath = "${lib.getOutput output drv}"; };
'') outputs)}
outputs = [ ${lib.concatStringsSep " " (builtins.map (x: "\"${x}\"") outputs)} ];
name = "${drv.name}";
outPath = "${drv}";
system = "${drv.system}";
type = "derivation";
meta = { };
}
'') defaultPkgs)}
]
EOF
'';
in
pkgs.runCommand "base-system"
{
inherit passwdContents groupContents shadowContents nixConfContents;
passAsFile = [
"passwdContents"
"groupContents"
"shadowContents"
"nixConfContents"
];
allowSubstitutes = false;
preferLocalBuild = true;
} ''
env
set -x
mkdir -p $out/etc
cat $passwdContentsPath > $out/etc/passwd
echo "" >> $out/etc/passwd
cat $groupContentsPath > $out/etc/group
echo "" >> $out/etc/group
cat $shadowContentsPath > $out/etc/shadow
echo "" >> $out/etc/shadow
mkdir -p $out/usr
ln -s /nix/var/nix/profiles/share $out/usr/
mkdir -p $out/nix/var/nix/gcroots
mkdir $out/tmp
mkdir -p $out/etc/nix
cat $nixConfContentsPath > $out/etc/nix/nix.conf
mkdir -p $out/root
mkdir -p $out/nix/var/nix/profiles/per-user/root
ln -s ${profile} $out/nix/var/nix/profiles/default-1-link
ln -s $out/nix/var/nix/profiles/default-1-link $out/nix/var/nix/profiles/default
ln -s /nix/var/nix/profiles/default $out/root/.nix-profile
ln -s ${channel} $out/nix/var/nix/profiles/per-user/root/channels-1-link
ln -s $out/nix/var/nix/profiles/per-user/root/channels-1-link $out/nix/var/nix/profiles/per-user/root/channels
mkdir -p $out/root/.nix-defexpr
ln -s $out/nix/var/nix/profiles/per-user/root/channels $out/root/.nix-defexpr/channels
echo "${channelURL} ${channelName}" > $out/root/.nix-channels
mkdir -p $out/bin $out/usr/bin
ln -s ${pkgs.coreutils}/bin/env $out/usr/bin/env
ln -s ${pkgs.bashInteractive}/bin/bash $out/bin/sh
'';
in
pkgs.dockerTools.buildLayeredImageWithNixDb {
inherit name tag;
contents = [ baseSystem ];
extraCommands = ''
rm -rf nix-support
ln -s /nix/var/nix/profiles nix/var/nix/gcroots/profiles
'';
config = {
Cmd = [ "/root/.nix-profile/bin/bash" ];
Env = [
"USER=root"
"PATH=${lib.concatStringsSep ":" [
"/root/.nix-profile/bin"
"/nix/var/nix/profiles/default/bin"
"/nix/var/nix/profiles/default/sbin"
]}"
"MANPATH=${lib.concatStringsSep ":" [
"/root/.nix-profile/share/man"
"/nix/var/nix/profiles/default/share/man"
]}"
"SSL_CERT_FILE=/nix/var/nix/profiles/default/etc/ssl/certs/ca-bundle.crt"
"GIT_SSL_CAINFO=/nix/var/nix/profiles/default/etc/ssl/certs/ca-bundle.crt"
"NIX_SSL_CERT_FILE=/nix/var/nix/profiles/default/etc/ssl/certs/ca-bundle.crt"
"NIX_PATH=/nix/var/nix/profiles/per-user/root/channels:/root/.nix-defexpr/channels"
];
};
}

View file

@ -3,32 +3,31 @@
"lowdown-src": { "lowdown-src": {
"flake": false, "flake": false,
"locked": { "locked": {
"lastModified": 1617481909, "lastModified": 1633514407,
"narHash": "sha256-SqnfOFuLuVRRNeVJr1yeEPJue/qWoCp5N6o5Kr///p4=", "narHash": "sha256-Dw32tiMjdK9t3ETl5fzGrutQTzh2rufgZV4A/BbxuD4=",
"owner": "kristapsdz", "owner": "kristapsdz",
"repo": "lowdown", "repo": "lowdown",
"rev": "148f9b2f586c41b7e36e73009db43ea68c7a1a4d", "rev": "d2c2b44ff6c27b936ec27358a2653caaef8f73b8",
"type": "github" "type": "github"
}, },
"original": { "original": {
"owner": "kristapsdz", "owner": "kristapsdz",
"ref": "VERSION_0_8_4",
"repo": "lowdown", "repo": "lowdown",
"type": "github" "type": "github"
} }
}, },
"nixpkgs": { "nixpkgs": {
"locked": { "locked": {
"lastModified": 1614309161, "lastModified": 1632864508,
"narHash": "sha256-93kRxDPyEW9QIpxU71kCaV1r+hgOgP6/aVgC7vvO8IU=", "narHash": "sha256-d127FIvGR41XbVRDPVvozUPQ/uRHbHwvfyKHwEt5xFM=",
"owner": "NixOS", "owner": "NixOS",
"repo": "nixpkgs", "repo": "nixpkgs",
"rev": "0e499fde7af3c28d63e9b13636716b86c3162b93", "rev": "82891b5e2c2359d7e58d08849e4c89511ab94234",
"type": "github" "type": "github"
}, },
"original": { "original": {
"id": "nixpkgs", "id": "nixpkgs",
"ref": "nixos-20.09-small", "ref": "nixos-21.05-small",
"type": "indirect" "type": "indirect"
} }
}, },

337
flake.nix
View file

@ -1,8 +1,8 @@
{ {
description = "The purely functional package manager"; description = "The purely functional package manager";
inputs.nixpkgs.url = "nixpkgs/nixos-20.09-small"; inputs.nixpkgs.url = "nixpkgs/nixos-21.05-small";
inputs.lowdown-src = { url = "github:kristapsdz/lowdown/VERSION_0_8_4"; flake = false; }; inputs.lowdown-src = { url = "github:kristapsdz/lowdown"; flake = false; };
outputs = { self, nixpkgs, lowdown-src }: outputs = { self, nixpkgs, lowdown-src }:
@ -18,7 +18,9 @@
linux64BitSystems = [ "x86_64-linux" "aarch64-linux" ]; linux64BitSystems = [ "x86_64-linux" "aarch64-linux" ];
linuxSystems = linux64BitSystems ++ [ "i686-linux" ]; linuxSystems = linux64BitSystems ++ [ "i686-linux" ];
systems = linuxSystems ++ [ "x86_64-darwin" ]; systems = linuxSystems ++ [ "x86_64-darwin" "aarch64-darwin" ];
crossSystems = [ "armv6l-linux" "armv7l-linux" ];
forAllSystems = f: nixpkgs.lib.genAttrs systems (system: f system); forAllSystems = f: nixpkgs.lib.genAttrs systems (system: f system);
@ -59,6 +61,7 @@
configureFlags = configureFlags =
lib.optionals stdenv.isLinux [ lib.optionals stdenv.isLinux [
"--with-boost=${boost}/lib"
"--with-sandbox-shell=${sh}/bin/busybox" "--with-sandbox-shell=${sh}/bin/busybox"
"LDFLAGS=-fuse-ld=gold" "LDFLAGS=-fuse-ld=gold"
]; ];
@ -68,7 +71,7 @@
[ [
buildPackages.bison buildPackages.bison
buildPackages.flex buildPackages.flex
(lib.getBin buildPackages.lowdown) (lib.getBin buildPackages.lowdown-nix)
buildPackages.mdbook buildPackages.mdbook
buildPackages.autoconf-archive buildPackages.autoconf-archive
buildPackages.autoreconfHook buildPackages.autoreconfHook
@ -76,10 +79,10 @@
# Tests # Tests
buildPackages.git buildPackages.git
buildPackages.mercurial buildPackages.mercurial # FIXME: remove? only needed for tests
buildPackages.jq buildPackages.jq
] ]
++ lib.optionals stdenv.isLinux [(pkgs.util-linuxMinimal or pkgs.utillinuxMinimal)]; ++ lib.optionals stdenv.hostPlatform.isLinux [(buildPackages.util-linuxMinimal or buildPackages.utillinuxMinimal)];
buildDeps = buildDeps =
[ curl [ curl
@ -87,13 +90,12 @@
openssl sqlite openssl sqlite
libarchive libarchive
boost boost
nlohmann_json lowdown-nix
lowdown gtest
gmock
] ]
++ lib.optionals stdenv.isLinux [libseccomp] ++ lib.optionals stdenv.isLinux [libseccomp]
++ lib.optional (stdenv.isLinux || stdenv.isDarwin) libsodium ++ lib.optional (stdenv.isLinux || stdenv.isDarwin) libsodium
++ lib.optional stdenv.isx86_64 libcpuid; ++ lib.optional stdenv.hostPlatform.isx86_64 libcpuid;
awsDeps = lib.optional (stdenv.isLinux || stdenv.isDarwin) awsDeps = lib.optional (stdenv.isLinux || stdenv.isDarwin)
(aws-sdk-cpp.override { (aws-sdk-cpp.override {
@ -102,7 +104,13 @@
}); });
propagatedDeps = propagatedDeps =
[ (boehmgc.override { enableLargeConfig = true; }) [ ((boehmgc.override {
enableLargeConfig = true;
}).overrideAttrs(o: {
patches = (o.patches or []) ++ [
./boehmgc-coroutine-sp-fallback.diff
];
}))
]; ];
perlDeps = perlDeps =
@ -119,8 +127,7 @@
'' ''
mkdir -p $out/nix-support mkdir -p $out/nix-support
# Converts /nix/store/50p3qk8kka9dl6wyq40vydq945k0j3kv-nix-2.4pre20201102_550e11f/bin/nix # Converts /nix/store/50p3qk8k...-nix-2.4pre20201102_550e11f/bin/nix to 50p3qk8k.../bin/nix.
# To 50p3qk8kka9dl6wyq40vydq945k0j3kv/bin/nix
tarballPath() { tarballPath() {
# Remove the store prefix # Remove the store prefix
local path=''${1#${builtins.storeDir}/} local path=''${1#${builtins.storeDir}/}
@ -133,10 +140,11 @@
substitute ${./scripts/install.in} $out/install \ substitute ${./scripts/install.in} $out/install \
${pkgs.lib.concatMapStrings ${pkgs.lib.concatMapStrings
(system: (system: let
'' \ tarball = if builtins.elem system crossSystems then self.hydraJobs.binaryTarballCross.x86_64-linux.${system} else self.hydraJobs.binaryTarball.${system};
--replace '@tarballHash_${system}@' $(nix --experimental-features nix-command hash-file --base16 --type sha256 ${self.hydraJobs.binaryTarball.${system}}/*.tar.xz) \ in '' \
--replace '@tarballPath_${system}@' $(tarballPath ${self.hydraJobs.binaryTarball.${system}}/*.tar.xz) \ --replace '@tarballHash_${system}@' $(nix --experimental-features nix-command hash-file --base16 --type sha256 ${tarball}/*.tar.xz) \
--replace '@tarballPath_${system}@' $(tarballPath ${tarball}/*.tar.xz) \
'' ''
) )
systems systems
@ -145,13 +153,15 @@
echo "file installer $out/install" >> $out/nix-support/hydra-build-products echo "file installer $out/install" >> $out/nix-support/hydra-build-products
''; '';
testNixVersions = pkgs: client: daemon: with commonDeps pkgs; pkgs.stdenv.mkDerivation { testNixVersions = pkgs: client: daemon: with commonDeps pkgs; with pkgs.lib; pkgs.stdenv.mkDerivation {
NIX_DAEMON_PACKAGE = daemon; NIX_DAEMON_PACKAGE = daemon;
NIX_CLIENT_PACKAGE = client; NIX_CLIENT_PACKAGE = client;
# Must keep this name short as OSX has a rather strict limit on the name =
# socket path length, and this name appears in the path of the "nix-tests"
# nix-daemon socket used in the tests + optionalString
name = "nix-tests"; (versionAtLeast daemon.version "2.4pre20211005" &&
versionAtLeast client.version "2.4pre20211005")
"-${client.version}-against-${daemon.version}";
inherit version; inherit version;
src = self; src = self;
@ -170,21 +180,92 @@
installPhase = '' installPhase = ''
mkdir -p $out mkdir -p $out
''; '';
installCheckPhase = "make installcheck";
installCheckPhase = "make installcheck -j$NIX_BUILD_CORES -l$NIX_BUILD_CORES";
}; };
binaryTarball = buildPackages: nix: pkgs: let
inherit (pkgs) cacert;
installerClosureInfo = buildPackages.closureInfo { rootPaths = [ nix cacert ]; };
in
buildPackages.runCommand "nix-binary-tarball-${version}"
{ #nativeBuildInputs = lib.optional (system != "aarch64-linux") shellcheck;
meta.description = "Distribution-independent Nix bootstrap binaries for ${pkgs.system}";
}
''
cp ${installerClosureInfo}/registration $TMPDIR/reginfo
cp ${./scripts/create-darwin-volume.sh} $TMPDIR/create-darwin-volume.sh
substitute ${./scripts/install-nix-from-closure.sh} $TMPDIR/install \
--subst-var-by nix ${nix} \
--subst-var-by cacert ${cacert}
substitute ${./scripts/install-darwin-multi-user.sh} $TMPDIR/install-darwin-multi-user.sh \
--subst-var-by nix ${nix} \
--subst-var-by cacert ${cacert}
substitute ${./scripts/install-systemd-multi-user.sh} $TMPDIR/install-systemd-multi-user.sh \
--subst-var-by nix ${nix} \
--subst-var-by cacert ${cacert}
substitute ${./scripts/install-multi-user.sh} $TMPDIR/install-multi-user \
--subst-var-by nix ${nix} \
--subst-var-by cacert ${cacert}
if type -p shellcheck; then
# SC1090: Don't worry about not being able to find
# $nix/etc/profile.d/nix.sh
shellcheck --exclude SC1090 $TMPDIR/install
shellcheck $TMPDIR/create-darwin-volume.sh
shellcheck $TMPDIR/install-darwin-multi-user.sh
shellcheck $TMPDIR/install-systemd-multi-user.sh
# SC1091: Don't panic about not being able to source
# /etc/profile
# SC2002: Ignore "useless cat" "error", when loading
# .reginfo, as the cat is a much cleaner
# implementation, even though it is "useless"
# SC2116: Allow ROOT_HOME=$(echo ~root) for resolving
# root's home directory
shellcheck --external-sources \
--exclude SC1091,SC2002,SC2116 $TMPDIR/install-multi-user
fi
chmod +x $TMPDIR/install
chmod +x $TMPDIR/create-darwin-volume.sh
chmod +x $TMPDIR/install-darwin-multi-user.sh
chmod +x $TMPDIR/install-systemd-multi-user.sh
chmod +x $TMPDIR/install-multi-user
dir=nix-${version}-${pkgs.system}
fn=$out/$dir.tar.xz
mkdir -p $out/nix-support
echo "file binary-dist $fn" >> $out/nix-support/hydra-build-products
tar cvfJ $fn \
--owner=0 --group=0 --mode=u+rw,uga+r \
--absolute-names \
--hard-dereference \
--transform "s,$TMPDIR/install,$dir/install," \
--transform "s,$TMPDIR/create-darwin-volume.sh,$dir/create-darwin-volume.sh," \
--transform "s,$TMPDIR/reginfo,$dir/.reginfo," \
--transform "s,$NIX_STORE,$dir/store,S" \
$TMPDIR/install \
$TMPDIR/create-darwin-volume.sh \
$TMPDIR/install-darwin-multi-user.sh \
$TMPDIR/install-systemd-multi-user.sh \
$TMPDIR/install-multi-user \
$TMPDIR/reginfo \
$(cat ${installerClosureInfo}/store-paths)
'';
in { in {
# A Nixpkgs overlay that overrides the 'nix' and # A Nixpkgs overlay that overrides the 'nix' and
# 'nix.perl-bindings' packages. # 'nix.perl-bindings' packages.
overlay = final: prev: { overlay = final: prev: {
# An older version of Nix to test against when using the daemon.
# Currently using `nixUnstable` as the stable one doesn't respect
# `NIX_DAEMON_SOCKET_PATH` which is needed for the tests.
nixStable = prev.nix; nixStable = prev.nix;
# Forward from the previous stage as we dont want it to pick the lowdown override
nixUnstable = prev.nixUnstable;
nix = with final; with commonDeps pkgs; stdenv.mkDerivation { nix = with final; with commonDeps pkgs; stdenv.mkDerivation {
name = "nix-${version}"; name = "nix-${version}";
inherit version; inherit version;
@ -254,9 +335,9 @@
xz xz
pkgs.perl pkgs.perl
boost boost
nlohmann_json
] ]
++ lib.optional (stdenv.isLinux || stdenv.isDarwin) libsodium; ++ lib.optional (stdenv.isLinux || stdenv.isDarwin) libsodium
++ lib.optional stdenv.isDarwin darwin.apple_sdk.frameworks.Security;
configureFlags = '' configureFlags = ''
--with-dbi=${perlPackages.DBI}/${pkgs.perl.libPrefix} --with-dbi=${perlPackages.DBI}/${pkgs.perl.libPrefix}
@ -270,24 +351,17 @@
}; };
lowdown = with final; stdenv.mkDerivation rec { lowdown-nix = with final; stdenv.mkDerivation rec {
name = "lowdown-0.8.4"; name = "lowdown-0.9.0";
/*
src = fetchurl {
url = "https://kristaps.bsd.lv/lowdown/snapshots/${name}.tar.gz";
hash = "sha512-U9WeGoInT9vrawwa57t6u9dEdRge4/P+0wLxmQyOL9nhzOEUU2FRz2Be9H0dCjYE7p2v3vCXIYk40M+jjULATw==";
};
*/
src = lowdown-src; src = lowdown-src;
outputs = [ "out" "bin" "dev" ]; outputs = [ "out" "bin" "dev" ];
nativeBuildInputs = [ which ]; nativeBuildInputs = [ buildPackages.which ];
configurePhase = configurePhase = ''
'' ${if (stdenv.isDarwin && stdenv.isAarch64) then "echo \"HAVE_SANDBOX_INIT=false\" > configure.local" else ""}
./configure \ ./configure \
PREFIX=${placeholder "dev"} \ PREFIX=${placeholder "dev"} \
BINDIR=${placeholder "bin"}/bin BINDIR=${placeholder "bin"}/bin
@ -303,92 +377,48 @@
buildStatic = nixpkgs.lib.genAttrs linux64BitSystems (system: self.packages.${system}.nix-static); buildStatic = nixpkgs.lib.genAttrs linux64BitSystems (system: self.packages.${system}.nix-static);
buildCross = nixpkgs.lib.genAttrs crossSystems (crossSystem:
nixpkgs.lib.genAttrs ["x86_64-linux"] (system: self.packages.${system}."nix-${crossSystem}"));
# Perl bindings for various platforms. # Perl bindings for various platforms.
perlBindings = nixpkgs.lib.genAttrs systems (system: self.packages.${system}.nix.perl-bindings); perlBindings = nixpkgs.lib.genAttrs systems (system: self.packages.${system}.nix.perl-bindings);
# Binary tarball for various platforms, containing a Nix store # Binary tarball for various platforms, containing a Nix store
# with the closure of 'nix' package, and the second half of # with the closure of 'nix' package, and the second half of
# the installation script. # the installation script.
binaryTarball = nixpkgs.lib.genAttrs systems (system: binaryTarball = nixpkgs.lib.genAttrs systems (system: binaryTarball nixpkgsFor.${system} nixpkgsFor.${system}.nix nixpkgsFor.${system});
with nixpkgsFor.${system}; binaryTarballCross = nixpkgs.lib.genAttrs ["x86_64-linux"] (system: builtins.listToAttrs (map (crossSystem: {
name = crossSystem;
let value = let
installerClosureInfo = closureInfo { rootPaths = [ nix cacert ]; }; nixpkgsCross = import nixpkgs {
in inherit system crossSystem;
overlays = [ self.overlay ];
runCommand "nix-binary-tarball-${version}" };
{ #nativeBuildInputs = lib.optional (system != "aarch64-linux") shellcheck; in binaryTarball nixpkgsFor.${system} self.packages.${system}."nix-${crossSystem}" nixpkgsCross;
meta.description = "Distribution-independent Nix bootstrap binaries for ${system}"; }) crossSystems));
}
''
cp ${installerClosureInfo}/registration $TMPDIR/reginfo
cp ${./scripts/create-darwin-volume.sh} $TMPDIR/create-darwin-volume.sh
substitute ${./scripts/install-nix-from-closure.sh} $TMPDIR/install \
--subst-var-by nix ${nix} \
--subst-var-by cacert ${cacert}
substitute ${./scripts/install-darwin-multi-user.sh} $TMPDIR/install-darwin-multi-user.sh \
--subst-var-by nix ${nix} \
--subst-var-by cacert ${cacert}
substitute ${./scripts/install-systemd-multi-user.sh} $TMPDIR/install-systemd-multi-user.sh \
--subst-var-by nix ${nix} \
--subst-var-by cacert ${cacert}
substitute ${./scripts/install-multi-user.sh} $TMPDIR/install-multi-user \
--subst-var-by nix ${nix} \
--subst-var-by cacert ${cacert}
if type -p shellcheck; then
# SC1090: Don't worry about not being able to find
# $nix/etc/profile.d/nix.sh
shellcheck --exclude SC1090 $TMPDIR/install
shellcheck $TMPDIR/create-darwin-volume.sh
shellcheck $TMPDIR/install-darwin-multi-user.sh
shellcheck $TMPDIR/install-systemd-multi-user.sh
# SC1091: Don't panic about not being able to source
# /etc/profile
# SC2002: Ignore "useless cat" "error", when loading
# .reginfo, as the cat is a much cleaner
# implementation, even though it is "useless"
# SC2116: Allow ROOT_HOME=$(echo ~root) for resolving
# root's home directory
shellcheck --external-sources \
--exclude SC1091,SC2002,SC2116 $TMPDIR/install-multi-user
fi
chmod +x $TMPDIR/install
chmod +x $TMPDIR/create-darwin-volume.sh
chmod +x $TMPDIR/install-darwin-multi-user.sh
chmod +x $TMPDIR/install-systemd-multi-user.sh
chmod +x $TMPDIR/install-multi-user
dir=nix-${version}-${system}
fn=$out/$dir.tar.xz
mkdir -p $out/nix-support
echo "file binary-dist $fn" >> $out/nix-support/hydra-build-products
tar cvfJ $fn \
--owner=0 --group=0 --mode=u+rw,uga+r \
--absolute-names \
--hard-dereference \
--transform "s,$TMPDIR/install,$dir/install," \
--transform "s,$TMPDIR/create-darwin-volume.sh,$dir/create-darwin-volume.sh," \
--transform "s,$TMPDIR/reginfo,$dir/.reginfo," \
--transform "s,$NIX_STORE,$dir/store,S" \
$TMPDIR/install \
$TMPDIR/create-darwin-volume.sh \
$TMPDIR/install-darwin-multi-user.sh \
$TMPDIR/install-systemd-multi-user.sh \
$TMPDIR/install-multi-user \
$TMPDIR/reginfo \
$(cat ${installerClosureInfo}/store-paths)
'');
# The first half of the installation script. This is uploaded # The first half of the installation script. This is uploaded
# to https://nixos.org/nix/install. It downloads the binary # to https://nixos.org/nix/install. It downloads the binary
# tarball for the user's system and calls the second half of the # tarball for the user's system and calls the second half of the
# installation script. # installation script.
installerScript = installScriptFor [ "x86_64-linux" "i686-linux" "x86_64-darwin" "aarch64-linux" ]; installerScript = installScriptFor [ "x86_64-linux" "i686-linux" "aarch64-linux" "x86_64-darwin" "aarch64-darwin" "armv6l-linux" "armv7l-linux" ];
installerScriptForGHA = installScriptFor [ "x86_64-linux" "x86_64-darwin" ]; installerScriptForGHA = installScriptFor [ "x86_64-linux" "x86_64-darwin" "armv6l-linux" "armv7l-linux"];
# docker image with Nix inside
dockerImage = nixpkgs.lib.genAttrs linux64BitSystems (system:
let
pkgs = nixpkgsFor.${system};
image = import ./docker.nix { inherit pkgs; tag = version; };
in pkgs.runCommand "docker-image-tarball-${version}"
{ meta.description = "Docker image with Nix for ${system}";
}
''
mkdir -p $out/nix-support
image=$out/image.tar.gz
ln -s ${image} $image
echo "file binary-dist $image" >> $out/nix-support/hydra-build-products
'');
# Line coverage analysis. # Line coverage analysis.
coverage = coverage =
@ -430,6 +460,12 @@
inherit (self) overlay; inherit (self) overlay;
}; };
tests.nssPreload = (import ./tests/nss-preload.nix rec {
system = "x86_64-linux";
inherit nixpkgs;
inherit (self) overlay;
});
tests.githubFlakes = (import ./tests/github-flakes.nix rec { tests.githubFlakes = (import ./tests/github-flakes.nix rec {
system = "x86_64-linux"; system = "x86_64-linux";
inherit nixpkgs; inherit nixpkgs;
@ -468,25 +504,33 @@
''; '';
*/ */
installTests = forAllSystems (system:
let pkgs = nixpkgsFor.${system}; in
pkgs.runCommand "install-tests" {
againstSelf = testNixVersions pkgs pkgs.nix pkgs.pkgs.nix;
againstCurrentUnstable =
# FIXME: temporarily disable this on macOS because of #3605.
if system == "x86_64-linux"
then testNixVersions pkgs pkgs.nix pkgs.nixUnstable
else null;
# Disabled because the latest stable version doesn't handle
# `NIX_DAEMON_SOCKET_PATH` which is required for the tests to work
# againstLatestStable = testNixVersions pkgs pkgs.nix pkgs.nixStable;
} "touch $out");
}; };
checks = forAllSystems (system: { checks = forAllSystems (system: {
binaryTarball = self.hydraJobs.binaryTarball.${system}; binaryTarball = self.hydraJobs.binaryTarball.${system};
perlBindings = self.hydraJobs.perlBindings.${system}; perlBindings = self.hydraJobs.perlBindings.${system};
installTests = installTests = self.hydraJobs.installTests.${system};
let pkgs = nixpkgsFor.${system}; in } // (if system == "x86_64-linux" then {
pkgs.runCommand "install-tests" { dockerImage = self.hydraJobs.dockerImage.${system};
againstSelf = testNixVersions pkgs pkgs.nix pkgs.pkgs.nix; } else {}));
againstCurrentUnstable = testNixVersions pkgs pkgs.nix pkgs.nixUnstable;
# Disabled because the latest stable version doesn't handle
# `NIX_DAEMON_SOCKET_PATH` which is required for the tests to work
# againstLatestStable = testNixVersions pkgs pkgs.nix pkgs.nixStable;
} "touch $out";
});
packages = forAllSystems (system: { packages = forAllSystems (system: {
inherit (nixpkgsFor.${system}) nix; inherit (nixpkgsFor.${system}) nix;
} // nixpkgs.lib.optionalAttrs (builtins.elem system linux64BitSystems) { } // (nixpkgs.lib.optionalAttrs (builtins.elem system linux64BitSystems) {
nix-static = let nix-static = let
nixpkgs = nixpkgsFor.${system}.pkgsStatic; nixpkgs = nixpkgsFor.${system}.pkgsStatic;
in with commonDeps nixpkgs; nixpkgs.stdenv.mkDerivation { in with commonDeps nixpkgs; nixpkgs.stdenv.mkDerivation {
@ -524,8 +568,49 @@
stripAllList = ["bin"]; stripAllList = ["bin"];
strictDeps = true; strictDeps = true;
hardeningDisable = [ "pie" ];
}; };
}); } // builtins.listToAttrs (map (crossSystem: {
name = "nix-${crossSystem}";
value = let
nixpkgsCross = import nixpkgs {
inherit system crossSystem;
overlays = [ self.overlay ];
};
in with commonDeps nixpkgsCross; nixpkgsCross.stdenv.mkDerivation {
name = "nix-${version}";
src = self;
VERSION_SUFFIX = versionSuffix;
outputs = [ "out" "dev" "doc" ];
nativeBuildInputs = nativeBuildDeps;
buildInputs = buildDeps ++ propagatedDeps;
configureFlags = [ "--sysconfdir=/etc" "--disable-doc-gen" ];
enableParallelBuilding = true;
makeFlags = "profiledir=$(out)/etc/profile.d";
doCheck = true;
installFlags = "sysconfdir=$(out)/etc";
postInstall = ''
mkdir -p $doc/nix-support
echo "doc manual $doc/share/doc/nix/manual" >> $doc/nix-support/hydra-build-products
mkdir -p $out/nix-support
echo "file binary-dist $out/bin/nix" >> $out/nix-support/hydra-build-products
'';
doInstallCheck = true;
installCheckFlags = "sysconfdir=$(out)/etc";
};
}) crossSystems)));
defaultPackage = forAllSystems (system: self.packages.${system}.nix); defaultPackage = forAllSystems (system: self.packages.${system}.nix);

View file

@ -19,6 +19,8 @@ my $nixpkgsDir = "/home/eelco/Dev/nixpkgs-pristine";
my $TMPDIR = $ENV{'TMPDIR'} // "/tmp"; my $TMPDIR = $ENV{'TMPDIR'} // "/tmp";
my $isLatest = ($ENV{'IS_LATEST'} // "") eq "1";
# FIXME: cut&paste from nixos-channel-scripts. # FIXME: cut&paste from nixos-channel-scripts.
sub fetch { sub fetch {
my ($url, $type) = @_; my ($url, $type) = @_;
@ -35,16 +37,18 @@ sub fetch {
my $evalUrl = "https://hydra.nixos.org/eval/$evalId"; my $evalUrl = "https://hydra.nixos.org/eval/$evalId";
my $evalInfo = decode_json(fetch($evalUrl, 'application/json')); my $evalInfo = decode_json(fetch($evalUrl, 'application/json'));
#print Dumper($evalInfo); #print Dumper($evalInfo);
my $flakeUrl = $evalInfo->{flake} or die;
my $flakeInfo = decode_json(`nix flake metadata --json "$flakeUrl"` or die);
my $nixRev = $flakeInfo->{revision} or die;
my $nixRev = $evalInfo->{jobsetevalinputs}->{nix}->{revision} or die; my $buildInfo = decode_json(fetch("$evalUrl/job/build.x86_64-linux", 'application/json'));
#print Dumper($buildInfo);
my $tarballInfo = decode_json(fetch("$evalUrl/job/tarball", 'application/json')); my $releaseName = $buildInfo->{nixname};
my $releaseName = $tarballInfo->{releasename};
$releaseName =~ /nix-(.*)$/ or die; $releaseName =~ /nix-(.*)$/ or die;
my $version = $1; my $version = $1;
print STDERR "Nix revision is $nixRev, version is $version\n"; print STDERR "Flake URL is $flakeUrl, Nix revision is $nixRev, version is $version\n";
my $releaseDir = "nix/$releaseName"; my $releaseDir = "nix/$releaseName";
@ -83,12 +87,12 @@ sub downloadFile {
if (!-e $tmpFile) { if (!-e $tmpFile) {
print STDERR "downloading $srcFile to $tmpFile...\n"; print STDERR "downloading $srcFile to $tmpFile...\n";
system("NIX_REMOTE=https://cache.nixos.org/ nix cat-store '$srcFile' > '$tmpFile'") == 0 system("NIX_REMOTE=https://cache.nixos.org/ nix store cat '$srcFile' > '$tmpFile'") == 0
or die "unable to fetch $srcFile\n"; or die "unable to fetch $srcFile\n";
} }
my $sha256_expected = $buildInfo->{buildproducts}->{$productNr}->{sha256hash} or die; my $sha256_expected = $buildInfo->{buildproducts}->{$productNr}->{sha256hash} or die;
my $sha256_actual = `nix hash-file --base16 --type sha256 '$tmpFile'`; my $sha256_actual = `nix hash file --base16 --type sha256 '$tmpFile'`;
chomp $sha256_actual; chomp $sha256_actual;
if ($sha256_expected ne $sha256_actual) { if ($sha256_expected ne $sha256_actual) {
print STDERR "file $tmpFile is corrupt, got $sha256_actual, expected $sha256_expected\n"; print STDERR "file $tmpFile is corrupt, got $sha256_actual, expected $sha256_expected\n";
@ -104,12 +108,13 @@ sub downloadFile {
return $sha256_expected; return $sha256_expected;
} }
downloadFile("tarball", "2"); # .tar.bz2
my $tarballHash = downloadFile("tarball", "3"); # .tar.xz
downloadFile("binaryTarball.i686-linux", "1"); downloadFile("binaryTarball.i686-linux", "1");
downloadFile("binaryTarball.x86_64-linux", "1"); downloadFile("binaryTarball.x86_64-linux", "1");
downloadFile("binaryTarball.aarch64-linux", "1"); downloadFile("binaryTarball.aarch64-linux", "1");
downloadFile("binaryTarball.x86_64-darwin", "1"); downloadFile("binaryTarball.x86_64-darwin", "1");
downloadFile("binaryTarball.aarch64-darwin", "1");
downloadFile("binaryTarballCross.x86_64-linux.armv6l-linux", "1");
downloadFile("binaryTarballCross.x86_64-linux.armv7l-linux", "1");
downloadFile("installerScript", "1"); downloadFile("installerScript", "1");
for my $fn (glob "$tmpDir/*") { for my $fn (glob "$tmpDir/*") {
@ -131,53 +136,38 @@ for my $fn (glob "$tmpDir/*") {
} }
} }
exit if $version =~ /pre/; # Update nix-fallback-paths.nix.
if ($isLatest) {
system("cd $nixpkgsDir && git pull") == 0 or die;
# Update Nixpkgs in a very hacky way. sub getStorePath {
system("cd $nixpkgsDir && git pull") == 0 or die; my ($jobName) = @_;
my $oldName = `nix-instantiate --eval $nixpkgsDir -A nix.name`; chomp $oldName; my $buildInfo = decode_json(fetch("$evalUrl/job/$jobName", 'application/json'));
my $oldHash = `nix-instantiate --eval $nixpkgsDir -A nix.src.outputHash`; chomp $oldHash; return $buildInfo->{buildoutputs}->{out}->{path} or die "cannot get store path for '$jobName'";
print STDERR "old stable version in Nixpkgs = $oldName / $oldHash\n";
my $fn = "$nixpkgsDir/pkgs/tools/package-management/nix/default.nix";
my $oldFile = read_file($fn);
$oldFile =~ s/$oldName/"$releaseName"/g;
$oldFile =~ s/$oldHash/"$tarballHash"/g;
write_file($fn, $oldFile);
$oldName =~ s/nix-//g;
$oldName =~ s/"//g;
sub getStorePath {
my ($jobName) = @_;
my $buildInfo = decode_json(fetch("$evalUrl/job/$jobName", 'application/json'));
for my $product (values %{$buildInfo->{buildproducts}}) {
next unless $product->{type} eq "nix-build";
next if $product->{path} =~ /[a-z]+$/;
return $product->{path};
} }
die;
write_file("$nixpkgsDir/nixos/modules/installer/tools/nix-fallback-paths.nix",
"{\n" .
" x86_64-linux = \"" . getStorePath("build.x86_64-linux") . "\";\n" .
" i686-linux = \"" . getStorePath("build.i686-linux") . "\";\n" .
" aarch64-linux = \"" . getStorePath("build.aarch64-linux") . "\";\n" .
" x86_64-darwin = \"" . getStorePath("build.x86_64-darwin") . "\";\n" .
" aarch64-darwin = \"" . getStorePath("build.aarch64-darwin") . "\";\n" .
"}\n");
system("cd $nixpkgsDir && git commit -a -m 'nix-fallback-paths.nix: Update to $version'") == 0 or die;
} }
write_file("$nixpkgsDir/nixos/modules/installer/tools/nix-fallback-paths.nix",
"{\n" .
" x86_64-linux = \"" . getStorePath("build.x86_64-linux") . "\";\n" .
" i686-linux = \"" . getStorePath("build.i686-linux") . "\";\n" .
" aarch64-linux = \"" . getStorePath("build.aarch64-linux") . "\";\n" .
" x86_64-darwin = \"" . getStorePath("build.x86_64-darwin") . "\";\n" .
"}\n");
system("cd $nixpkgsDir && git commit -a -m 'nix: $oldName -> $version'") == 0 or die;
# Update the "latest" symlink. # Update the "latest" symlink.
$channelsBucket->add_key( $channelsBucket->add_key(
"nix-latest/install", "", "nix-latest/install", "",
{ "x-amz-website-redirect-location" => "https://releases.nixos.org/$releaseDir/install" }) { "x-amz-website-redirect-location" => "https://releases.nixos.org/$releaseDir/install" })
or die $channelsBucket->err . ": " . $channelsBucket->errstr; or die $channelsBucket->err . ": " . $channelsBucket->errstr
if $isLatest;
# Tag the release in Git. # Tag the release in Git.
chdir("/home/eelco/Dev/nix-pristine") or die; chdir("/home/eelco/Dev/nix-pristine") or die;
system("git remote update origin") == 0 or die; system("git remote update origin") == 0 or die;
system("git tag --force --sign $version $nixRev -m 'Tagging release $version'") == 0 or die; system("git tag --force --sign $version $nixRev -m 'Tagging release $version'") == 0 or die;
system("git push --tags") == 0 or die; system("git push --tags") == 0 or die;
system("git push --force-with-lease origin $nixRev:refs/heads/latest-release") == 0 or die; system("git push --force-with-lease origin $nixRev:refs/heads/latest-release") == 0 or die if $isLatest;

37
misc/fish/completion.fish Normal file
View file

@ -0,0 +1,37 @@
function _nix_complete
# Get the current command up to a cursor.
# - Behaves correctly even with pipes and nested in commands like env.
# - TODO: Returns the command verbatim (does not interpolate variables).
# That might not be optimal for arguments like -f.
set -l nix_args (commandline --current-process --tokenize --cut-at-cursor)
# --cut-at-cursor with --tokenize removes the current token so we need to add it separately.
# https://github.com/fish-shell/fish-shell/issues/7375
# Can be an empty string.
set -l current_token (commandline --current-token --cut-at-cursor)
# Nix wants the index of the argv item to complete but the $nix_args variable
# also contains the program name (argv[0]) so we would need to subtract 1.
# But the variable also misses the current token so it cancels out.
set -l nix_arg_to_complete (count $nix_args)
env NIX_GET_COMPLETIONS=$nix_arg_to_complete $nix_args $current_token
end
function _nix_accepts_files
set -l response (_nix_complete)
# First line is either filenames or no-filenames.
test $response[1] = 'filenames'
end
function _nix
set -l response (_nix_complete)
# Skip the first line since it handled by _nix_accepts_files.
# Tail lines each contain a command followed by a tab character and, optionally, a description.
# This is also the format fish expects.
string collect -- $response[2..-1]
end
# Disable file path completion if paths do not belong in the current context.
complete --command nix --condition 'not _nix_accepts_files' --no-files
complete --command nix --arguments '(_nix)'

1
misc/fish/local.mk Normal file
View file

@ -0,0 +1 @@
$(eval $(call install-file-as, $(d)/completion.fish, $(datarootdir)/fish/vendor_completions.d/nix.fish, 0644))

View file

@ -1,4 +1,4 @@
ifeq ($(OS), Darwin) ifdef HOST_DARWIN
$(eval $(call install-data-in, $(d)/org.nixos.nix-daemon.plist, $(prefix)/Library/LaunchDaemons)) $(eval $(call install-data-in, $(d)/org.nixos.nix-daemon.plist, $(prefix)/Library/LaunchDaemons))

View file

@ -19,7 +19,7 @@
<array> <array>
<string>/bin/sh</string> <string>/bin/sh</string>
<string>-c</string> <string>-c</string>
<string>/bin/wait4path /nix/var/nix/profiles/default/bin/nix-daemon &amp;&amp; /nix/var/nix/profiles/default/bin/nix-daemon</string> <string>/bin/wait4path /nix/var/nix/profiles/default/bin/nix-daemon &amp;&amp; exec /nix/var/nix/profiles/default/bin/nix-daemon</string>
</array> </array>
<key>StandardErrorPath</key> <key>StandardErrorPath</key>
<string>/var/log/nix-daemon.log</string> <string>/var/log/nix-daemon.log</string>

View file

@ -1,4 +1,4 @@
ifeq ($(OS), Linux) ifdef HOST_LINUX
$(foreach n, nix-daemon.socket nix-daemon.service, $(eval $(call install-file-in, $(d)/$(n), $(prefix)/lib/systemd/system, 0644))) $(foreach n, nix-daemon.socket nix-daemon.service, $(eval $(call install-file-in, $(d)/$(n), $(prefix)/lib/systemd/system, 0644)))

View file

@ -1,4 +1,4 @@
ifeq ($(OS), Linux) ifdef HOST_LINUX
$(foreach n, nix-daemon.conf, $(eval $(call install-file-in, $(d)/$(n), $(sysconfdir)/init, 0644))) $(foreach n, nix-daemon.conf, $(eval $(call install-file-in, $(d)/$(n), $(sysconfdir)/init, 0644)))

View file

@ -1,3 +1,5 @@
#compdef nix
function _nix() { function _nix() {
local ifs_bk="$IFS" local ifs_bk="$IFS"
local input=("${(Q)words[@]}") local input=("${(Q)words[@]}")
@ -18,4 +20,4 @@ function _nix() {
_describe 'nix' suggestions _describe 'nix' suggestions
} }
compdef _nix nix _nix "$@"

1
misc/zsh/local.mk Normal file
View file

@ -0,0 +1 @@
$(eval $(call install-file-as, $(d)/completion.zsh, $(datarootdir)/zsh/site-functions/_nix, 0644))

View file

@ -10,8 +10,25 @@ bin-scripts :=
noinst-scripts := noinst-scripts :=
man-pages := man-pages :=
install-tests := install-tests :=
OS = $(shell uname -s)
ifdef HOST_OS
HOST_KERNEL = $(firstword $(subst -, ,$(HOST_OS)))
ifeq ($(HOST_KERNEL), cygwin)
HOST_CYGWIN = 1
endif
ifeq ($(patsubst darwin%,,$(HOST_KERNEL)),)
HOST_DARWIN = 1
endif
ifeq ($(patsubst freebsd%,,$(HOST_KERNEL)),)
HOST_FREEBSD = 1
endif
ifeq ($(HOST_KERNEL), linux)
HOST_LINUX = 1
endif
ifeq ($(patsubst solaris%,,$(HOST_KERNEL)),)
HOST_SOLARIS = 1
endif
endif
# Hack to define a literal space. # Hack to define a literal space.
space := space :=
@ -50,16 +67,16 @@ endif
BUILD_SHARED_LIBS ?= 1 BUILD_SHARED_LIBS ?= 1
ifeq ($(BUILD_SHARED_LIBS), 1) ifeq ($(BUILD_SHARED_LIBS), 1)
ifeq (CYGWIN,$(findstring CYGWIN,$(OS))) ifdef HOST_CYGWIN
GLOBAL_CFLAGS += -U__STRICT_ANSI__ -D_GNU_SOURCE GLOBAL_CFLAGS += -U__STRICT_ANSI__ -D_GNU_SOURCE
GLOBAL_CXXFLAGS += -U__STRICT_ANSI__ -D_GNU_SOURCE GLOBAL_CXXFLAGS += -U__STRICT_ANSI__ -D_GNU_SOURCE
else else
GLOBAL_CFLAGS += -fPIC GLOBAL_CFLAGS += -fPIC
GLOBAL_CXXFLAGS += -fPIC GLOBAL_CXXFLAGS += -fPIC
endif endif
ifneq ($(OS), Darwin) ifndef HOST_DARWIN
ifneq ($(OS), SunOS) ifndef HOST_SOLARIS
ifneq ($(OS), FreeBSD) ifndef HOST_FREEBSD
GLOBAL_LDFLAGS += -Wl,--no-copy-dt-needed-entries GLOBAL_LDFLAGS += -Wl,--no-copy-dt-needed-entries
endif endif
endif endif

View file

@ -1,9 +1,9 @@
libs-list := libs-list :=
ifeq ($(OS), Darwin) ifdef HOST_DARWIN
SO_EXT = dylib SO_EXT = dylib
else else
ifeq (CYGWIN,$(findstring CYGWIN,$(OS))) ifdef HOST_CYGWIN
SO_EXT = dll SO_EXT = dll
else else
SO_EXT = so SO_EXT = so
@ -59,7 +59,7 @@ define build-library
$(1)_OBJS := $$(addprefix $(buildprefix), $$(addsuffix .o, $$(basename $$(_srcs)))) $(1)_OBJS := $$(addprefix $(buildprefix), $$(addsuffix .o, $$(basename $$(_srcs))))
_libs := $$(foreach lib, $$($(1)_LIBS), $$($$(lib)_PATH)) _libs := $$(foreach lib, $$($(1)_LIBS), $$($$(lib)_PATH))
ifeq (CYGWIN,$(findstring CYGWIN,$(OS))) ifdef HOST_CYGWIN
$(1)_INSTALL_DIR ?= $$(bindir) $(1)_INSTALL_DIR ?= $$(bindir)
else else
$(1)_INSTALL_DIR ?= $$(libdir) $(1)_INSTALL_DIR ?= $$(libdir)
@ -73,18 +73,18 @@ define build-library
ifeq ($(BUILD_SHARED_LIBS), 1) ifeq ($(BUILD_SHARED_LIBS), 1)
ifdef $(1)_ALLOW_UNDEFINED ifdef $(1)_ALLOW_UNDEFINED
ifeq ($(OS), Darwin) ifdef HOST_DARWIN
$(1)_LDFLAGS += -undefined suppress -flat_namespace $(1)_LDFLAGS += -undefined suppress -flat_namespace
endif endif
else else
ifneq ($(OS), Darwin) ifndef HOST_DARWIN
ifneq (CYGWIN,$(findstring CYGWIN,$(OS))) ifndef HOST_CYGWIN
$(1)_LDFLAGS += -Wl,-z,defs $(1)_LDFLAGS += -Wl,-z,defs
endif endif
endif endif
endif endif
ifneq ($(OS), Darwin) ifndef HOST_DARWIN
$(1)_LDFLAGS += -Wl,-soname=$$($(1)_NAME).$(SO_EXT) $(1)_LDFLAGS += -Wl,-soname=$$($(1)_NAME).$(SO_EXT)
endif endif
@ -93,7 +93,7 @@ define build-library
$$($(1)_PATH): $$($(1)_OBJS) $$(_libs) | $$(_d)/ $$($(1)_PATH): $$($(1)_OBJS) $$(_libs) | $$(_d)/
$$(trace-ld) $(CXX) -o $$(abspath $$@) -shared $$(LDFLAGS) $$(GLOBAL_LDFLAGS) $$($(1)_OBJS) $$($(1)_LDFLAGS) $$($(1)_LDFLAGS_PROPAGATED) $$(foreach lib, $$($(1)_LIBS), $$($$(lib)_LDFLAGS_USE)) $$($(1)_LDFLAGS_UNINSTALLED) $$(trace-ld) $(CXX) -o $$(abspath $$@) -shared $$(LDFLAGS) $$(GLOBAL_LDFLAGS) $$($(1)_OBJS) $$($(1)_LDFLAGS) $$($(1)_LDFLAGS_PROPAGATED) $$(foreach lib, $$($(1)_LIBS), $$($$(lib)_LDFLAGS_USE)) $$($(1)_LDFLAGS_UNINSTALLED)
ifneq ($(OS), Darwin) ifndef HOST_DARWIN
$(1)_LDFLAGS_USE += -Wl,-rpath,$$(abspath $$(_d)) $(1)_LDFLAGS_USE += -Wl,-rpath,$$(abspath $$(_d))
endif endif
$(1)_LDFLAGS_USE += -L$$(_d) -l$$(patsubst lib%,%,$$(strip $$($(1)_NAME))) $(1)_LDFLAGS_USE += -L$$(_d) -l$$(patsubst lib%,%,$$(strip $$($(1)_NAME)))
@ -108,7 +108,7 @@ define build-library
$$(trace-ld) $(CXX) -o $$@ -shared $$(LDFLAGS) $$(GLOBAL_LDFLAGS) $$($(1)_OBJS) $$($(1)_LDFLAGS) $$($(1)_LDFLAGS_PROPAGATED) $$(foreach lib, $$($(1)_LIBS), $$($$(lib)_LDFLAGS_USE_INSTALLED)) $$(trace-ld) $(CXX) -o $$@ -shared $$(LDFLAGS) $$(GLOBAL_LDFLAGS) $$($(1)_OBJS) $$($(1)_LDFLAGS) $$($(1)_LDFLAGS_PROPAGATED) $$(foreach lib, $$($(1)_LIBS), $$($$(lib)_LDFLAGS_USE_INSTALLED))
$(1)_LDFLAGS_USE_INSTALLED += -L$$(DESTDIR)$$($(1)_INSTALL_DIR) -l$$(patsubst lib%,%,$$(strip $$($(1)_NAME))) $(1)_LDFLAGS_USE_INSTALLED += -L$$(DESTDIR)$$($(1)_INSTALL_DIR) -l$$(patsubst lib%,%,$$(strip $$($(1)_NAME)))
ifneq ($(OS), Darwin) ifndef HOST_DARWIN
ifeq ($(SET_RPATH_TO_LIBS), 1) ifeq ($(SET_RPATH_TO_LIBS), 1)
$(1)_LDFLAGS_USE_INSTALLED += -Wl,-rpath,$$($(1)_INSTALL_DIR) $(1)_LDFLAGS_USE_INSTALLED += -Wl,-rpath,$$($(1)_INSTALL_DIR)
else else
@ -125,8 +125,8 @@ define build-library
$(1)_PATH := $$(_d)/$$($(1)_NAME).a $(1)_PATH := $$(_d)/$$($(1)_NAME).a
$$($(1)_PATH): $$($(1)_OBJS) | $$(_d)/ $$($(1)_PATH): $$($(1)_OBJS) | $$(_d)/
$(trace-ld) $(LD) -Ur -o $$(_d)/$$($(1)_NAME).o $$? $$(trace-ld) $(LD) -Ur -o $$(_d)/$$($(1)_NAME).o $$?
$(trace-ar) $(AR) crs $$@ $$(_d)/$$($(1)_NAME).o $$(trace-ar) $(AR) crs $$@ $$(_d)/$$($(1)_NAME).o
$(1)_LDFLAGS_USE += $$($(1)_PATH) $$($(1)_LDFLAGS) $(1)_LDFLAGS_USE += $$($(1)_PATH) $$($(1)_LDFLAGS)

View file

@ -13,3 +13,7 @@ define run-install-test
endef endef
.PHONY: check installcheck .PHONY: check installcheck
print-top-help += \
echo " check: Run unit tests"; \
echo " installcheck: Run functional tests";

View file

@ -8,10 +8,15 @@ endif
libnixrust_PATH := $(d)/target/$(RUST_DIR)/libnixrust.$(SO_EXT) libnixrust_PATH := $(d)/target/$(RUST_DIR)/libnixrust.$(SO_EXT)
libnixrust_INSTALL_PATH := $(libdir)/libnixrust.$(SO_EXT) libnixrust_INSTALL_PATH := $(libdir)/libnixrust.$(SO_EXT)
libnixrust_LDFLAGS_USE := -L$(d)/target/$(RUST_DIR) -lnixrust -ldl libnixrust_LDFLAGS_USE := -L$(d)/target/$(RUST_DIR) -lnixrust
libnixrust_LDFLAGS_USE_INSTALLED := -L$(libdir) -lnixrust -ldl libnixrust_LDFLAGS_USE_INSTALLED := -L$(libdir) -lnixrust
ifeq ($(OS), Darwin) ifdef HOST_LINUX
libnixrust_LDFLAGS_USE += -ldl
libnixrust_LDFLAGS_USE_INSTALLED += -ldl
endif
ifdef HOST_DARWIN
libnixrust_BUILD_FLAGS = NIX_LDFLAGS="-undefined dynamic_lookup" libnixrust_BUILD_FLAGS = NIX_LDFLAGS="-undefined dynamic_lookup"
else else
libnixrust_LDFLAGS_USE += -Wl,-rpath,$(abspath $(d)/target/$(RUST_DIR)) libnixrust_LDFLAGS_USE += -Wl,-rpath,$(abspath $(d)/target/$(RUST_DIR))
@ -26,7 +31,7 @@ $(libnixrust_PATH): $(call rwildcard, $(d)/src, *.rs) $(d)/Cargo.toml
$(libnixrust_INSTALL_PATH): $(libnixrust_PATH) $(libnixrust_INSTALL_PATH): $(libnixrust_PATH)
$(target-gen) cp $^ $@ $(target-gen) cp $^ $@
ifeq ($(OS), Darwin) ifdef HOST_DARWIN
install_name_tool -id $@ $@ install_name_tool -id $@ $@
endif endif
@ -35,7 +40,7 @@ clean: clean-rust
clean-rust: clean-rust:
$(suppress) rm -rfv nix-rust/target $(suppress) rm -rfv nix-rust/target
ifneq ($(OS), Darwin) ifndef HOST_DARWIN
check: rust-tests check: rust-tests
rust-tests: rust-tests:

View file

@ -1,6 +1,6 @@
makefiles = local.mk makefiles = local.mk
GLOBAL_CXXFLAGS += -g -Wall -std=c++17 GLOBAL_CXXFLAGS += -g -Wall -std=c++17 -I ../src
-include Makefile.config -include Makefile.config

View file

@ -1,3 +1,4 @@
HOST_OS = @host_os@
CC = @CC@ CC = @CC@
CFLAGS = @CFLAGS@ CFLAGS = @CFLAGS@
CXX = @CXX@ CXX = @CXX@

View file

@ -7,6 +7,8 @@ CXXFLAGS=
AC_PROG_CC AC_PROG_CC
AC_PROG_CXX AC_PROG_CXX
AC_CANONICAL_HOST
# Use 64-bit file system calls so that we can support files > 2 GiB. # Use 64-bit file system calls so that we can support files > 2 GiB.
AC_SYS_LARGEFILE AC_SYS_LARGEFILE

View file

@ -1,6 +1,7 @@
package Nix::Config; package Nix::Config;
use MIME::Base64; use MIME::Base64;
use Nix::Store;
$version = "@PACKAGE_VERSION@"; $version = "@PACKAGE_VERSION@";

View file

@ -22,6 +22,7 @@ our @EXPORT = qw(
derivationFromPath derivationFromPath
addTempRoot addTempRoot
getBinDir getStoreDir getBinDir getStoreDir
queryRawRealisation
); );
our $VERSION = '0.15'; our $VERSION = '0.15';

View file

@ -15,6 +15,7 @@
#include "crypto.hh" #include "crypto.hh"
#include <sodium.h> #include <sodium.h>
#include <nlohmann/json.hpp>
using namespace nix; using namespace nix;
@ -120,6 +121,18 @@ SV * queryPathInfo(char * path, int base32)
croak("%s", e.what()); croak("%s", e.what());
} }
SV * queryRawRealisation(char * outputId)
PPCODE:
try {
auto realisation = store()->queryRealisation(DrvOutput::parse(outputId));
if (realisation)
XPUSHs(sv_2mortal(newSVpv(realisation->toJSON().dump().c_str(), 0)));
else
XPUSHs(sv_2mortal(newSVpv("", 0)));
} catch (Error & e) {
croak("%s", e.what());
}
SV * queryPathFromHashPart(char * hashPart) SV * queryPathFromHashPart(char * hashPart)
PPCODE: PPCODE:

View file

@ -28,7 +28,7 @@ Store_CXXFLAGS = \
Store_LDFLAGS := $(SODIUM_LIBS) $(NIX_LIBS) Store_LDFLAGS := $(SODIUM_LIBS) $(NIX_LIBS)
ifeq (CYGWIN,$(findstring CYGWIN,$(OS))) ifdef HOST_CYGWIN
archlib = $(shell perl -E 'use Config; print $$Config{archlib};') archlib = $(shell perl -E 'use Config; print $$Config{archlib};')
libperl = $(shell perl -E 'use Config; print $$Config{libperl};') libperl = $(shell perl -E 'use Config; print $$Config{libperl};')
Store_LDFLAGS += $(shell find ${archlib} -name ${libperl}) Store_LDFLAGS += $(shell find ${archlib} -name ${libperl})

View file

@ -1,33 +1,262 @@
#!/bin/sh #!/usr/bin/env bash
set -e set -eu
set -o pipefail
root_disk() { # I'm a little agnostic on the choices, but supporting a wide
diskutil info -plist / # slate of uses for now, including:
} # - import-only: `. create-darwin-volume.sh no-main[ ...]`
# - legacy: `./create-darwin-volume.sh` or `. create-darwin-volume.sh`
# (both will run main())
# - external alt-routine: `./create-darwin-volume.sh no-main func[ ...]`
if [ "${1-}" = "no-main" ]; then
shift
readonly _CREATE_VOLUME_NO_MAIN=1
else
readonly _CREATE_VOLUME_NO_MAIN=0
# declare some things we expect to inherit from install-multi-user
# I don't love this (because it's a bit of a kludge).
#
# CAUTION: (Dec 19 2020)
# This is a stopgap. It doesn't cover the full slate of
# identifiers we inherit--just those necessary to:
# - avoid breaking direct invocations of this script (here/now)
# - avoid hard-to-reverse structural changes before the call to rm
# single-user support is verified
#
# In the near-mid term, I (personally) think we should:
# - decide to deprecate the direct call and add a notice
# - fold all of this into install-darwin-multi-user.sh
# - intentionally remove the old direct-invocation form (kill the
# routine, replace this script w/ deprecation notice and a note
# on the remove-after date)
#
readonly NIX_ROOT="${NIX_ROOT:-/nix}"
# i.e., "disk1" _sudo() {
shift # throw away the 'explanation'
/usr/bin/sudo "$@"
}
failure() {
if [ "$*" = "" ]; then
cat
else
echo "$@"
fi
exit 1
}
task() {
echo "$@"
}
fi
# usually "disk1"
root_disk_identifier() { root_disk_identifier() {
diskutil info -plist / | xmllint --xpath "/plist/dict/key[text()='ParentWholeDisk']/following-sibling::string[1]/text()" - # For performance (~10ms vs 280ms) I'm parsing 'diskX' from stat output
# (~diskXsY)--but I'm retaining the more-semantic approach since
# it documents intent better.
# /usr/sbin/diskutil info -plist / | xmllint --xpath "/plist/dict/key[text()='ParentWholeDisk']/following-sibling::string[1]/text()" -
#
local special_device
special_device="$(/usr/bin/stat -f "%Sd" /)"
echo "${special_device%s[0-9]*}"
} }
find_nix_volume() { # make it easy to play w/ 'Case-sensitive APFS'
diskutil apfs list -plist "$1" | xmllint --xpath "(/plist/dict/array/dict/key[text()='Volumes']/following-sibling::array/dict/key[text()='Name']/following-sibling::string[starts-with(translate(text(),'N','n'),'nix')]/text())[1]" - 2>/dev/null || true readonly NIX_VOLUME_FS="${NIX_VOLUME_FS:-APFS}"
readonly NIX_VOLUME_LABEL="${NIX_VOLUME_LABEL:-Nix Store}"
# Strongly assuming we'll make a volume on the device / is on
# But you can override NIX_VOLUME_USE_DISK to create it on some other device
readonly NIX_VOLUME_USE_DISK="${NIX_VOLUME_USE_DISK:-$(root_disk_identifier)}"
NIX_VOLUME_USE_SPECIAL="${NIX_VOLUME_USE_SPECIAL:-}"
NIX_VOLUME_USE_UUID="${NIX_VOLUME_USE_UUID:-}"
readonly NIX_VOLUME_MOUNTD_DEST="${NIX_VOLUME_MOUNTD_DEST:-/Library/LaunchDaemons/org.nixos.darwin-store.plist}"
if /usr/bin/fdesetup isactive >/dev/null; then
test_filevault_in_use() { return 0; }
# no readonly; we may modify if user refuses from cure_volume
NIX_VOLUME_DO_ENCRYPT="${NIX_VOLUME_DO_ENCRYPT:-1}"
else
test_filevault_in_use() { return 1; }
NIX_VOLUME_DO_ENCRYPT="${NIX_VOLUME_DO_ENCRYPT:-0}"
fi
should_encrypt_volume() {
test_filevault_in_use && (( NIX_VOLUME_DO_ENCRYPT == 1 ))
}
substep() {
printf " %s\n" "" "- $1" "" "${@:2}"
}
volumes_labeled() {
local label="$1"
xsltproc --novalid --stringparam label "$label" - <(/usr/sbin/ioreg -ra -c "AppleAPFSVolume") <<'EOF'
<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform" version="1.0">
<xsl:output method="text"/>
<xsl:template match="/">
<xsl:apply-templates select="/plist/array/dict/key[text()='IORegistryEntryName']/following-sibling::*[1][text()=$label]/.."/>
</xsl:template>
<xsl:template match="dict">
<xsl:apply-templates match="string" select="key[text()='BSD Name']/following-sibling::*[1]"/>
<xsl:text>=</xsl:text>
<xsl:apply-templates match="string" select="key[text()='UUID']/following-sibling::*[1]"/>
<xsl:text>&#xA;</xsl:text>
</xsl:template>
</xsl:stylesheet>
EOF
# I cut label out of the extracted values, but here it is for reference:
# <xsl:apply-templates match="string" select="key[text()='IORegistryEntryName']/following-sibling::*[1]"/>
# <xsl:text>=</xsl:text>
}
right_disk() {
local volume_special="$1" # (i.e., disk1s7)
[[ "$volume_special" == "$NIX_VOLUME_USE_DISK"s* ]]
}
right_volume() {
local volume_special="$1" # (i.e., disk1s7)
# if set, it must match; otherwise ensure it's on the right disk
if [ -z "$NIX_VOLUME_USE_SPECIAL" ]; then
if right_disk "$volume_special"; then
NIX_VOLUME_USE_SPECIAL="$volume_special" # latch on
return 0
else
return 1
fi
else
[ "$volume_special" = "$NIX_VOLUME_USE_SPECIAL" ]
fi
}
right_uuid() {
local volume_uuid="$1"
# if set, it must match; otherwise allow
if [ -z "$NIX_VOLUME_USE_UUID" ]; then
NIX_VOLUME_USE_UUID="$volume_uuid" # latch on
return 0
else
[ "$volume_uuid" = "$NIX_VOLUME_USE_UUID" ]
fi
}
cure_volumes() {
local found volume special uuid
# loop just in case they have more than one volume
# (nothing stops you from doing this)
for volume in $(volumes_labeled "$NIX_VOLUME_LABEL"); do
# CAUTION: this could (maybe) be a more normal read
# loop like:
# while IFS== read -r special uuid; do
# # ...
# done <<<"$(volumes_labeled "$NIX_VOLUME_LABEL")"
#
# I did it with for to skirt a problem with the obvious
# pattern replacing stdin and causing user prompts
# inside (which also use read and access stdin) to skip
#
# If there's an existing encrypted volume we can't find
# in keychain, the user never gets prompted to delete
# the volume, and the install fails.
#
# If you change this, a human needs to test a very
# specific scenario: you already have an encrypted
# Nix Store volume, and have deleted its credential
# from keychain. Ensure the script asks you if it can
# delete the volume, and then prompts for your sudo
# password to confirm.
#
# shellcheck disable=SC1097
IFS== read -r special uuid <<< "$volume"
# take the first one that's on the right disk
if [ -z "${found:-}" ]; then
if right_volume "$special" && right_uuid "$uuid"; then
cure_volume "$special" "$uuid"
found="${special} (${uuid})"
else
warning <<EOF
Ignoring ${special} (${uuid}) because I am looking for:
disk=${NIX_VOLUME_USE_DISK} special=${NIX_VOLUME_USE_SPECIAL:-${NIX_VOLUME_USE_DISK}sX} uuid=${NIX_VOLUME_USE_UUID:-any}
EOF
# TODO: give chance to delete if ! headless?
fi
else
warning <<EOF
Ignoring ${special} (${uuid}), already found target: $found
EOF
# TODO reminder? I feel like I want one
# idiom that reminds some warnings, or warns
# some reminders?
# TODO: if ! headless, chance to delete?
fi
done
if [ -z "${found:-}" ]; then
readonly NIX_VOLUME_USE_SPECIAL NIX_VOLUME_USE_UUID
fi
}
volume_encrypted() {
local volume_special="$1" # (i.e., disk1s7)
# Trying to match the first line of output; known first lines:
# No cryptographic users for <special>
# Cryptographic user for <special> (1 found)
# Cryptographic users for <special> (2 found)
/usr/sbin/diskutil apfs listCryptoUsers -plist "$volume_special" | /usr/bin/grep -q APFSCryptoUserUUID
} }
test_fstab() { test_fstab() {
grep -q "/nix apfs rw" /etc/fstab 2>/dev/null /usr/bin/grep -q "$NIX_ROOT apfs rw" /etc/fstab 2>/dev/null
} }
test_nix_symlink() { test_nix_root_is_symlink() {
[ -L "/nix" ] || grep -q "^nix." /etc/synthetic.conf 2>/dev/null [ -L "$NIX_ROOT" ]
} }
test_synthetic_conf() { test_synthetic_conf_either(){
grep -q "^nix$" /etc/synthetic.conf 2>/dev/null /usr/bin/grep -qE "^${NIX_ROOT:1}($|\t.{3,}$)" /etc/synthetic.conf 2>/dev/null
}
test_synthetic_conf_mountable() {
/usr/bin/grep -q "^${NIX_ROOT:1}$" /etc/synthetic.conf 2>/dev/null
}
test_synthetic_conf_symlinked() {
/usr/bin/grep -qE "^${NIX_ROOT:1}\t.{3,}$" /etc/synthetic.conf 2>/dev/null
}
test_nix_volume_mountd_installed() {
test -e "$NIX_VOLUME_MOUNTD_DEST"
}
# current volume password
test_keychain_by_uuid() {
local volume_uuid="$1"
# Note: doesn't need sudo just to check; doesn't output pw
security find-generic-password -s "$volume_uuid" &>/dev/null
}
get_volume_pass() {
local volume_uuid="$1"
_sudo \
"to confirm keychain has a password that unlocks this volume" \
security find-generic-password -s "$volume_uuid" -w
}
verify_volume_pass() {
local volume_special="$1" # (i.e., disk1s7)
local volume_uuid="$2"
/usr/sbin/diskutil apfs unlockVolume "$volume_special" -verify -stdinpassphrase -user "$volume_uuid"
}
volume_pass_works() {
local volume_special="$1" # (i.e., disk1s7)
local volume_uuid="$2"
get_volume_pass "$volume_uuid" | verify_volume_pass "$volume_special" "$volume_uuid"
} }
# Create the paths defined in synthetic.conf, saving us a reboot. # Create the paths defined in synthetic.conf, saving us a reboot.
create_synthetic_objects(){ create_synthetic_objects() {
# Big Sur takes away the -B flag we were using and replaces it # Big Sur takes away the -B flag we were using and replaces it
# with a -t flag that appears to do the same thing (but they # with a -t flag that appears to do the same thing (but they
# don't behave exactly the same way in terms of return values). # don't behave exactly the same way in terms of return values).
@ -41,129 +270,578 @@ create_synthetic_objects(){
} }
test_nix() { test_nix() {
test -d "/nix" test -d "$NIX_ROOT"
} }
test_t2_chip_present(){ test_voldaemon() {
# Use xartutil to see if system has a t2 chip. test -f "$NIX_VOLUME_MOUNTD_DEST"
#
# This isn't well-documented on its own; until it is,
# let's keep track of knowledge/assumptions.
#
# Warnings:
# - Don't search "xart" if porn will cause you trouble :)
# - Other xartutil flags do dangerous things. Don't run them
# naively. If you must, search "xartutil" first.
#
# Assumptions:
# - the "xART session seeds recovery utility"
# appears to interact with xartstorageremoted
# - `sudo xartutil --list` lists xART sessions
# and their seeds and exits 0 if successful. If
# not, it exits 1 and prints an error such as:
# xartutil: ERROR: No supported link to the SEP present
# - xART sessions/seeds are present when a T2 chip is
# (and not, otherwise)
# - the presence of a T2 chip means a newly-created
# volume on the primary drive will be
# encrypted at rest
# - all together: `sudo xartutil --list`
# should exit 0 if a new Nix Store volume will
# be encrypted at rest, and exit 1 if not.
sudo xartutil --list >/dev/null 2>/dev/null
} }
test_filevault_in_use() { generate_mount_command() {
fdesetup isactive >/dev/null local cmd_type="$1" # encrypted|unencrypted
local volume_uuid mountpoint cmd=()
printf -v volume_uuid "%q" "$2"
printf -v mountpoint "%q" "$NIX_ROOT"
case "$cmd_type" in
encrypted)
cmd=(/bin/sh -c "/usr/bin/security find-generic-password -s '$volume_uuid' -w | /usr/sbin/diskutil apfs unlockVolume '$volume_uuid' -mountpoint '$mountpoint' -stdinpassphrase");;
unencrypted)
cmd=(/usr/sbin/diskutil mount -mountPoint "$mountpoint" "$volume_uuid");;
*)
failure "Invalid first arg $cmd_type to generate_mount_command";;
esac
printf " <string>%s</string>\n" "${cmd[@]}"
} }
# use after error msg for conditions we don't understand generate_mount_daemon() {
suggest_report_error(){ local cmd_type="$1" # encrypted|unencrypted
# ex "error: something sad happened :(" >&2 local volume_uuid="$2"
echo " please report this @ https://github.com/nixos/nix/issues" >&2 cat <<EOF
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE plist PUBLIC "-//Apple Computer//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
<plist version="1.0">
<dict>
<key>RunAtLoad</key>
<true/>
<key>Label</key>
<string>org.nixos.darwin-store</string>
<key>ProgramArguments</key>
<array>
$(generate_mount_command "$cmd_type" "$volume_uuid")
</array>
</dict>
</plist>
EOF
} }
main() { _eat_bootout_err() {
( /usr/bin/grep -v "Boot-out failed: 36: Operation now in progress"
echo "" }
echo " ------------------------------------------------------------------ "
echo " | This installer will create a volume for the nix store and |"
echo " | configure it to mount at /nix. Follow these steps to uninstall. |"
echo " ------------------------------------------------------------------ "
echo ""
echo " 1. Remove the entry from fstab using 'sudo vifs'"
echo " 2. Destroy the data volume using 'diskutil apfs deleteVolume'"
echo " 3. Remove the 'nix' line from /etc/synthetic.conf or the file"
echo ""
) >&2
if test_nix_symlink; then # TODO: remove with --uninstall?
echo "error: /nix is a symlink, please remove it and make sure it's not in synthetic.conf (in which case a reboot is required)" >&2 uninstall_launch_daemon_directions() {
echo " /nix -> $(readlink "/nix")" >&2 local daemon_label="$1" # i.e., org.nixos.blah-blah
exit 2 local daemon_plist="$2" # abspath
substep "Uninstall LaunchDaemon $daemon_label" \
" sudo launchctl bootout system/$daemon_label" \
" sudo rm $daemon_plist"
}
uninstall_launch_daemon_prompt() {
local daemon_label="$1" # i.e., org.nixos.blah-blah
local daemon_plist="$2" # abspath
local reason_for_daemon="$3"
cat <<EOF
The installer adds a LaunchDaemon to $reason_for_daemon: $daemon_label
EOF
if ui_confirm "Can I remove it?"; then
_sudo "to terminate the daemon" \
launchctl bootout "system/$daemon_label" 2> >(_eat_bootout_err >&2) || true
# this can "fail" with a message like:
# Boot-out failed: 36: Operation now in progress
_sudo "to remove the daemon definition" rm "$daemon_plist"
fi
}
nix_volume_mountd_uninstall_directions() {
uninstall_launch_daemon_directions "org.nixos.darwin-store" \
"$NIX_VOLUME_MOUNTD_DEST"
}
nix_volume_mountd_uninstall_prompt() {
uninstall_launch_daemon_prompt "org.nixos.darwin-store" \
"$NIX_VOLUME_MOUNTD_DEST" \
"mount your Nix volume"
}
# TODO: move nix_daemon to install-darwin-multi-user if/when uninstall_launch_daemon_prompt moves up to install-multi-user
nix_daemon_uninstall_prompt() {
uninstall_launch_daemon_prompt "org.nixos.nix-daemon" \
"$NIX_DAEMON_DEST" \
"run the nix-daemon"
}
# TODO: remove with --uninstall?
nix_daemon_uninstall_directions() {
uninstall_launch_daemon_directions "org.nixos.nix-daemon" \
"$NIX_DAEMON_DEST"
}
# TODO: remove with --uninstall?
synthetic_conf_uninstall_directions() {
# :1 to strip leading slash
substep "Remove ${NIX_ROOT:1} from /etc/synthetic.conf" \
" If nix is the only entry: sudo rm /etc/synthetic.conf" \
" Otherwise: sudo /usr/bin/sed -i '' -e '/^${NIX_ROOT:1}$/d' /etc/synthetic.conf"
}
synthetic_conf_uninstall_prompt() {
cat <<EOF
During install, I add '${NIX_ROOT:1}' to /etc/synthetic.conf, which instructs
macOS to create an empty root directory for mounting the Nix volume.
EOF
# make the edit to a copy
/usr/bin/grep -vE "^${NIX_ROOT:1}($|\t.{3,}$)" /etc/synthetic.conf > "$SCRATCH/synthetic.conf.edit"
if test_synthetic_conf_symlinked; then
warning <<EOF
/etc/synthetic.conf already contains a line instructing your system
to make '${NIX_ROOT}' as a symlink:
$(/usr/bin/grep -nE "^${NIX_ROOT:1}\t.{3,}$" /etc/synthetic.conf)
This may mean your system has/had a non-standard Nix install.
The volume-creation process in this installer is *not* compatible
with a symlinked store, so I'll have to remove this instruction to
continue.
If you want/need to keep this instruction, answer 'n' to abort.
EOF
fi fi
if ! test_synthetic_conf; then # ask to rm if this left the file empty aside from comments, else edit
echo "Configuring /etc/synthetic.conf..." >&2 if /usr/bin/diff -q <(:) <(/usr/bin/grep -v "^#" "$SCRATCH/synthetic.conf.edit") &>/dev/null; then
echo nix | sudo tee -a /etc/synthetic.conf if confirm_rm "/etc/synthetic.conf"; then
if ! test_synthetic_conf; then if test_nix_root_is_symlink; then
echo "error: failed to configure synthetic.conf;" >&2 failure >&2 <<EOF
suggest_report_error I removed /etc/synthetic.conf, but $NIX_ROOT is already a symlink
exit 1 (-> $(readlink "$NIX_ROOT")). The system should remove it when you reboot.
Once you've rebooted, run the installer again.
EOF
fi
return 0
fi
else
if confirm_edit "$SCRATCH/synthetic.conf.edit" "/etc/synthetic.conf"; then
if test_nix_root_is_symlink; then
failure >&2 <<EOF
I edited Nix out of /etc/synthetic.conf, but $NIX_ROOT is already a symlink
(-> $(readlink "$NIX_ROOT")). The system should remove it when you reboot.
Once you've rebooted, run the installer again.
EOF
fi
return 0
fi fi
fi fi
# fallback instructions
echo "Manually remove nix from /etc/synthetic.conf"
return 1
}
if ! test_nix; then add_nix_vol_fstab_line() {
echo "Creating mountpoint for /nix..." >&2 local uuid="$1"
create_synthetic_objects # the ones we defined in synthetic.conf # shellcheck disable=SC1003,SC2026
if ! test_nix; then local escaped_mountpoint="${NIX_ROOT/ /'\\\'040}"
sudo mkdir -p /nix 2>/dev/null || true shift
EDITOR="/usr/bin/ex" _sudo "to add nix to fstab" "$@" <<EOF
:a
UUID=$uuid $escaped_mountpoint apfs rw,noauto,nobrowse,suid,owners
.
:x
EOF
# TODO: preserving my notes on suid,owners above until resolved
# There *may* be some issue regarding volume ownership, see nix#3156
#
# It seems like the cheapest fix is adding "suid,owners" to fstab, but:
# - We don't have much info on this condition yet
# - I'm not certain if these cause other problems?
# - There's a "chown" component some people claim to need to fix this
# that I don't understand yet
# (Note however that I've had to add a chown step to handle
# single->multi-user reinstalls, which may cover this)
#
# I'm not sure if it's safe to approach this way?
#
# I think I think the most-proper way to test for it is:
# diskutil info -plist "$NIX_VOLUME_LABEL" | xmllint --xpath "(/plist/dict/key[text()='GlobalPermissionsEnabled'])/following-sibling::*[1][name()='true']" -; echo $?
#
# There's also `sudo /usr/sbin/vsdbutil -c /path` (which is much faster, but is also
# deprecated and needs minor parsing).
#
# If no one finds a problem with doing so, I think the simplest approach
# is to just eagerly set this. I found a few imperative approaches:
# (diskutil enableOwnership, ~100ms), a cheap one (/usr/sbin/vsdbutil -a, ~40-50ms),
# a very cheap one (append the internal format to /var/db/volinfo.database).
#
# But vsdbutil's deprecation notice suggests using fstab, so I want to
# give that a whirl first.
#
# TODO: when this is workable, poke infinisil about reproducing the issue
# and confirming this fix?
}
delete_nix_vol_fstab_line() {
# TODO: I'm scaffolding this to handle the new nix volumes
# but it might be nice to generalize a smidge further to
# go ahead and set up a pattern for curing "old" things
# we no longer do?
EDITOR="/usr/bin/patch" _sudo "to cut nix from fstab" "$@" < <(/usr/bin/diff /etc/fstab <(/usr/bin/grep -v "$NIX_ROOT apfs rw" /etc/fstab))
# leaving some parts out of the grep; people may fiddle this a little?
}
# TODO: hope to remove with --uninstall
fstab_uninstall_directions() {
substep "Remove ${NIX_ROOT} from /etc/fstab" \
" If nix is the only entry: sudo rm /etc/fstab" \
" Otherwise, run 'sudo /usr/sbin/vifs' to remove the nix line"
}
fstab_uninstall_prompt() {
cat <<EOF
During install, I add '${NIX_ROOT}' to /etc/fstab so that macOS knows what
mount options to use for the Nix volume.
EOF
cp /etc/fstab "$SCRATCH/fstab.edit"
# technically doesn't need the _sudo path, but throwing away the
# output is probably better than mostly-duplicating the code...
delete_nix_vol_fstab_line patch "$SCRATCH/fstab.edit" &>/dev/null
# if the patch test edit, minus comment lines, is equal to empty (:)
if /usr/bin/diff -q <(:) <(/usr/bin/grep -v "^#" "$SCRATCH/fstab.edit") &>/dev/null; then
# this edit would leave it empty; propose deleting it
if confirm_rm "/etc/fstab"; then
return 0
else
echo "Remove nix from /etc/fstab (or remove the file)"
fi fi
if ! test_nix; then else
echo "error: failed to bootstrap /nix; if a reboot doesn't help," >&2 echo "I might be able to help you make this edit. Here's the diff:"
suggest_report_error if ! _diff "/etc/fstab" "$SCRATCH/fstab.edit" && ui_confirm "Does the change above look right?"; then
exit 1 delete_nix_vol_fstab_line /usr/sbin/vifs
else
echo "Remove nix from /etc/fstab (or remove the file)"
fi fi
fi fi
}
disk="$(root_disk_identifier)" remove_volume() {
volume=$(find_nix_volume "$disk") local volume_special="$1" # (i.e., disk1s7)
if [ -z "$volume" ]; then _sudo "to unmount the Nix volume" \
echo "Creating a Nix Store volume..." >&2 /usr/sbin/diskutil unmount force "$volume_special" || true # might not be mounted
_sudo "to delete the Nix volume" \
/usr/sbin/diskutil apfs deleteVolume "$volume_special"
}
if test_filevault_in_use; then # aspiration: robust enough to both fix problems
# TODO: Not sure if it's in-scope now, but `diskutil apfs list` # *and* update older darwin volumes
# shows both filevault and encrypted at rest status, and it cure_volume() {
# may be the more semantic way to test for this? It'll show local volume_special="$1" # (i.e., disk1s7)
# `FileVault: No (Encrypted at rest)` local volume_uuid="$2"
# `FileVault: No` header "Found existing Nix volume"
# `FileVault: Yes (Unlocked)` row " special" "$volume_special"
# and so on. row " uuid" "$volume_uuid"
if test_t2_chip_present; then
echo "warning: boot volume is FileVault-encrypted, but the Nix store volume" >&2 if volume_encrypted "$volume_special"; then
echo " is only encrypted at rest." >&2 row "encrypted" "yes"
echo " See https://nixos.org/nix/manual/#sect-macos-installation" >&2 if volume_pass_works "$volume_special" "$volume_uuid"; then
NIX_VOLUME_DO_ENCRYPT=0
ok "Found a working decryption password in keychain :)"
echo ""
else
# - this is a volume we made, and
# - the user encrypted it on their own
# - something deleted the credential
# - this is an old or BYO volume and the pw
# just isn't somewhere we can find it.
#
# We're going to explain why we're freaking out
# and prompt them to either delete the volume
# (requiring a sudo auth), or abort to fix
warning <<EOF
This volume is encrypted, but I don't see a password to decrypt it.
The quick fix is to let me delete this volume and make you a new one.
If that's okay, enter your (sudo) password to continue. If not, you
can ensure the decryption password is in your system keychain with a
"Where" (service) field set to this volume's UUID:
$volume_uuid
EOF
if password_confirm "delete this volume"; then
remove_volume "$volume_special"
else else
echo "error: refusing to create Nix store volume because the boot volume is" >&2 # TODO: this is a good design case for a warn-and
echo " FileVault encrypted, but encryption-at-rest is not available." >&2 # remind idiom...
echo " Manually create a volume for the store and re-run this script." >&2 failure <<EOF
echo " See https://nixos.org/nix/manual/#sect-macos-installation" >&2 Your Nix volume is encrypted, but I couldn't find its password. Either:
exit 1 - Delete or rename the volume out of the way
- Ensure its decryption password is in the system keychain with a
"Where" (service) field set to this volume's UUID:
$volume_uuid
EOF
fi
fi
elif test_filevault_in_use; then
row "encrypted" "no"
warning <<EOF
FileVault is on, but your $NIX_VOLUME_LABEL volume isn't encrypted.
EOF
# if we're interactive, give them a chance to
# encrypt the volume. If not, /shrug
if ! headless && (( NIX_VOLUME_DO_ENCRYPT == 1 )); then
if ui_confirm "Should I encrypt it and add the decryption key to your keychain?"; then
encrypt_volume "$volume_uuid" "$NIX_VOLUME_LABEL"
NIX_VOLUME_DO_ENCRYPT=0
else
NIX_VOLUME_DO_ENCRYPT=0
reminder "FileVault is on, but your $NIX_VOLUME_LABEL volume isn't encrypted."
fi fi
fi fi
sudo diskutil apfs addVolume "$disk" APFS 'Nix Store' -mountpoint /nix
volume="Nix Store"
else else
echo "Using existing '$volume' volume" >&2 row "encrypted" "no"
fi
if ! test_fstab; then
echo "Configuring /etc/fstab..." >&2
label=$(echo "$volume" | sed 's/ /\\040/g')
# shellcheck disable=SC2209
printf "\$a\nLABEL=%s /nix apfs rw,nobrowse\n.\nwq\n" "$label" | EDITOR=ed sudo vifs
fi fi
} }
main "$@" remove_volume_artifacts() {
if test_synthetic_conf_either; then
# NIX_ROOT is in synthetic.conf
if synthetic_conf_uninstall_prompt; then
# TODO: moot until we tackle uninstall, but when we're
# actually uninstalling, we should issue:
# reminder "macOS will clean up the empty mount-point directory at $NIX_ROOT on reboot."
:
fi
fi
if test_fstab; then
fstab_uninstall_prompt
fi
if test_nix_volume_mountd_installed; then
nix_volume_mountd_uninstall_prompt
fi
}
setup_synthetic_conf() {
if test_nix_root_is_symlink; then
if ! test_synthetic_conf_symlinked; then
failure >&2 <<EOF
error: $NIX_ROOT is a symlink (-> $(readlink "$NIX_ROOT")).
Please remove it. If nix is in /etc/synthetic.conf, remove it and reboot.
EOF
fi
fi
if ! test_synthetic_conf_mountable; then
task "Configuring /etc/synthetic.conf to make a mount-point at $NIX_ROOT" >&2
# technically /etc/synthetic.d/nix is supported in Big Sur+
# but handling both takes even more code...
_sudo "to add Nix to /etc/synthetic.conf" \
/usr/bin/ex /etc/synthetic.conf <<EOF
:a
${NIX_ROOT:1}
.
:x
EOF
if ! test_synthetic_conf_mountable; then
failure "error: failed to configure synthetic.conf" >&2
fi
create_synthetic_objects
if ! test_nix; then
failure >&2 <<EOF
error: failed to bootstrap $NIX_ROOT
If you enabled FileVault after booting, this is likely a known issue
with macOS that you'll have to reboot to fix. If you didn't enable FV,
though, please open an issue describing how the system that you see
this error on was set up.
EOF
fi
fi
}
setup_fstab() {
local volume_uuid="$1"
# fstab used to be responsible for mounting the volume. Now the last
# step adds a LaunchDaemon responsible for mounting. This is technically
# redundant for mounting, but diskutil appears to pick up mount options
# from fstab (and diskutil's support for specifying them directly is not
# consistent across versions/subcommands).
if ! test_fstab; then
task "Configuring /etc/fstab to specify volume mount options" >&2
add_nix_vol_fstab_line "$volume_uuid" /usr/sbin/vifs
fi
}
encrypt_volume() {
local volume_uuid="$1"
local volume_label="$2"
local password
# Note: mount/unmount are late additions to support the right order
# of operations for creating the volume and then baking its uuid into
# other artifacts; not as well-trod wrt to potential errors, race
# conditions, etc.
/usr/sbin/diskutil mount "$volume_label"
password="$(/usr/bin/xxd -l 32 -p -c 256 /dev/random)"
_sudo "to add your Nix volume's password to Keychain" \
/usr/bin/security -i <<EOF
add-generic-password -a "$volume_label" -s "$volume_uuid" -l "$volume_label encryption password" -D "Encrypted volume password" -j "Added automatically by the Nix installer for use by $NIX_VOLUME_MOUNTD_DEST" -w "$password" -T /System/Library/CoreServices/APFSUserAgent -T /System/Library/CoreServices/CSUserAgent -T /usr/bin/security "/Library/Keychains/System.keychain"
EOF
builtin printf "%s" "$password" | _sudo "to encrypt your Nix volume" \
/usr/sbin/diskutil apfs encryptVolume "$volume_label" -user disk -stdinpassphrase
/usr/sbin/diskutil unmount force "$volume_label"
}
create_volume() {
# Notes:
# 1) using `-nomount` instead of `-mountpoint "$NIX_ROOT"` to get
# its UUID and set mount opts in fstab before first mount
#
# 2) system is in some sense less secure than user keychain... (it's
# possible to read the password for decrypting the keychain) but
# the user keychain appears to be available too late. As far as I
# can tell, the file with this password (/var/db/SystemKey) is
# inside the FileVault envelope. If that isn't true, it may make
# sense to store the password inside the envelope?
#
# 3) At some point it would be ideal to have a small binary to serve
# as the daemon itself, and for it to replace /usr/bin/security here.
#
# 4) *UserAgent exemptions should let the system seamlessly supply the
# password if noauto is removed from fstab entry. This is intentional;
# the user will hopefully look for help if the volume stops mounting,
# rather than failing over into subtle race-condition problems.
#
# 5) If we ever get users griping about not having space to do
# anything useful with Nix, it is possibly to specify
# `-reserve 10g` or something, which will fail w/o that much
#
# 6) getting special w/ awk may be fragile, but doing it to:
# - save time over running slow diskutil commands
# - skirt risk we grab wrong volume if multiple match
_sudo "to create a new APFS volume '$NIX_VOLUME_LABEL' on $NIX_VOLUME_USE_DISK" \
/usr/sbin/diskutil apfs addVolume "$NIX_VOLUME_USE_DISK" "$NIX_VOLUME_FS" "$NIX_VOLUME_LABEL" -nomount | /usr/bin/awk '/Created new APFS Volume/ {print $5}'
}
volume_uuid_from_special() {
local volume_special="$1" # (i.e., disk1s7)
# For reasons I won't pretend to fathom, this returns 253 when it works
/System/Library/Filesystems/apfs.fs/Contents/Resources/apfs.util -k "$volume_special" || true
}
# this sometimes clears immediately, and AFAIK clears
# within about 1s. diskutil info on an unmounted path
# fails in around 50-100ms and a match takes about
# 250-300ms. I suspect it's usually ~250-750ms
await_volume() {
# caution: this could, in theory, get stuck
until /usr/sbin/diskutil info "$NIX_ROOT" &>/dev/null; do
:
done
}
setup_volume() {
local use_special use_uuid profile_packages
task "Creating a Nix volume" >&2
use_special="${NIX_VOLUME_USE_SPECIAL:-$(create_volume)}"
_sudo "to ensure the Nix volume is not mounted" \
/usr/sbin/diskutil unmount force "$use_special" || true # might not be mounted
use_uuid=${NIX_VOLUME_USE_UUID:-$(volume_uuid_from_special "$use_special")}
setup_fstab "$use_uuid"
if should_encrypt_volume; then
encrypt_volume "$use_uuid" "$NIX_VOLUME_LABEL"
setup_volume_daemon "encrypted" "$use_uuid"
# TODO: might be able to save ~60ms by caching or setting
# this somewhere rather than re-checking here.
elif volume_encrypted "$use_special"; then
setup_volume_daemon "encrypted" "$use_uuid"
else
setup_volume_daemon "unencrypted" "$use_uuid"
fi
await_volume
if [ "$(/usr/sbin/diskutil info -plist "$NIX_ROOT" | xmllint --xpath "(/plist/dict/key[text()='GlobalPermissionsEnabled'])/following-sibling::*[1]" -)" = "<false/>" ]; then
_sudo "to set enableOwnership (enabling users to own files)" \
/usr/sbin/diskutil enableOwnership "$NIX_ROOT"
fi
# TODO: below is a vague kludge for now; I just don't know
# what if any safe action there is to take here. Also, the
# reminder isn't very helpful.
# I'm less sure where this belongs, but it also wants mounted, pre-install
if type -p nix-env; then
profile_packages="$(nix-env --query --installed)"
# TODO: can probably do below faster w/ read
# intentionally unquoted string to eat whitespace in wc output
# shellcheck disable=SC2046,SC2059
if ! [ $(printf "$profile_packages" | /usr/bin/wc -l) = "0" ]; then
reminder <<EOF
Nix now supports only multi-user installs on Darwin/macOS, and your user's
Nix profile has some packages in it. These packages may obscure those in the
default profile, including the Nix this installer will add. You should
review these packages:
$profile_packages
EOF
fi
fi
}
setup_volume_daemon() {
local cmd_type="$1" # encrypted|unencrypted
local volume_uuid="$2"
if ! test_voldaemon; then
task "Configuring LaunchDaemon to mount '$NIX_VOLUME_LABEL'" >&2
_sudo "to install the Nix volume mounter" /usr/bin/ex "$NIX_VOLUME_MOUNTD_DEST" <<EOF
:a
$(generate_mount_daemon "$cmd_type" "$volume_uuid")
.
:x
EOF
# TODO: should probably alert the user if this is disabled?
_sudo "to launch the Nix volume mounter" \
launchctl bootstrap system "$NIX_VOLUME_MOUNTD_DEST" || true
# TODO: confirm whether kickstart is necessesary?
# I feel a little superstitous, but it can guard
# against multiple problems (doesn't start, old
# version still running for some reason...)
_sudo "to launch the Nix volume mounter" \
launchctl kickstart -k system/org.nixos.darwin-store
fi
}
setup_darwin_volume() {
setup_synthetic_conf
setup_volume
}
if [ "$_CREATE_VOLUME_NO_MAIN" = 1 ]; then
if [ -n "$*" ]; then
"$@" # expose functions in case we want multiple routines?
fi
else
# no reason to pay for bash to process this
main() {
{
echo ""
echo " ------------------------------------------------------------------ "
echo " | This installer will create a volume for the nix store and |"
echo " | configure it to mount at $NIX_ROOT. Follow these steps to uninstall. |"
echo " ------------------------------------------------------------------ "
echo ""
echo " 1. Remove the entry from fstab using 'sudo /usr/sbin/vifs'"
echo " 2. Run 'sudo launchctl bootout system/org.nixos.darwin-store'"
echo " 3. Remove $NIX_VOLUME_MOUNTD_DEST"
echo " 4. Destroy the data volume using '/usr/sbin/diskutil apfs deleteVolume'"
echo " 5. Remove the 'nix' line from /etc/synthetic.conf (or the file)"
echo ""
} >&2
setup_darwin_volume
}
main "$@"
fi

View file

@ -3,59 +3,110 @@
set -eu set -eu
set -o pipefail set -o pipefail
readonly PLIST_DEST=/Library/LaunchDaemons/org.nixos.nix-daemon.plist readonly NIX_DAEMON_DEST=/Library/LaunchDaemons/org.nixos.nix-daemon.plist
# create by default; set 0 to DIY, use a symlink, etc.
readonly NIX_VOLUME_CREATE=${NIX_VOLUME_CREATE:-1} # now default
NIX_FIRST_BUILD_UID="301" NIX_FIRST_BUILD_UID="301"
NIX_BUILD_USER_NAME_TEMPLATE="_nixbld%d" NIX_BUILD_USER_NAME_TEMPLATE="_nixbld%d"
dsclattr() { # caution: may update times on / if not run as normal non-root user
/usr/bin/dscl . -read "$1" \ read_only_root() {
| awk "/$2/ { print \$2 }" # this touch command ~should~ always produce an error
# as of this change I confirmed /usr/bin/touch emits:
# "touch: /: Operation not permitted" Monterey
# "touch: /: Read-only file system" Catalina+ and Big Sur
# "touch: /: Permission denied" Mojave
# (not matching prefix for compat w/ coreutils touch in case using
# an explicit path causes problems; its prefix differs)
case "$(/usr/bin/touch / 2>&1)" in
*"Read-only file system") # Catalina, Big Sur
return 0
;;
*"Operation not permitted") # Monterey
return 0
;;
*)
return 1
;;
esac
# Avoiding the slow semantic way to get this information (~330ms vs ~8ms)
# unless using touch causes problems. Just in case, that approach is:
# diskutil info -plist / | <find the Writable or WritableVolume keys>, i.e.
# diskutil info -plist / | xmllint --xpath "name(/plist/dict/key[text()='Writable']/following-sibling::*[1])" -
} }
poly_validate_assumptions() { if read_only_root && [ "$NIX_VOLUME_CREATE" = 1 ]; then
if [ "$(uname -s)" != "Darwin" ]; then should_create_volume() { return 0; }
failure "This script is for use with macOS!" else
should_create_volume() { return 1; }
fi
# shellcheck source=./create-darwin-volume.sh
. "$EXTRACTED_NIX_PATH/create-darwin-volume.sh" "no-main"
dsclattr() {
/usr/bin/dscl . -read "$1" \
| /usr/bin/awk "/$2/ { print \$2 }"
}
test_nix_daemon_installed() {
test -e "$NIX_DAEMON_DEST"
}
poly_cure_artifacts() {
if should_create_volume; then
task "Fixing any leftover Nix volume state"
cat <<EOF
Before I try to install, I'll check for any existing Nix volume config
and ask for your permission to remove it (so that the installer can
start fresh). I'll also ask for permission to fix any issues I spot.
EOF
cure_volumes
remove_volume_artifacts
fi fi
} }
poly_service_installed_check() { poly_service_installed_check() {
[ -e "$PLIST_DEST" ] if should_create_volume; then
test_nix_daemon_installed || test_nix_volume_mountd_installed
else
test_nix_daemon_installed
fi
} }
poly_service_uninstall_directions() { poly_service_uninstall_directions() {
cat <<EOF echo "$1. Remove macOS-specific components:"
$1. Delete $PLIST_DEST if should_create_volume && test_nix_volume_mountd_installed; then
nix_volume_mountd_uninstall_directions
sudo launchctl unload $PLIST_DEST fi
sudo rm $PLIST_DEST if test_nix_daemon_installed; then
nix_daemon_uninstall_directions
EOF fi
} }
poly_service_setup_note() { poly_service_setup_note() {
cat <<EOF if should_create_volume; then
- load and start a LaunchDaemon (at $PLIST_DEST) for nix-daemon echo " - create a Nix volume and a LaunchDaemon to mount it"
fi
EOF echo " - create a LaunchDaemon (at $NIX_DAEMON_DEST) for nix-daemon"
echo ""
} }
poly_extra_try_me_commands(){ poly_extra_try_me_commands() {
: :
}
poly_extra_setup_instructions(){
:
} }
poly_configure_nix_daemon_service() { poly_configure_nix_daemon_service() {
task "Setting up the nix-daemon LaunchDaemon"
_sudo "to set up the nix-daemon as a LaunchDaemon" \ _sudo "to set up the nix-daemon as a LaunchDaemon" \
cp -f "/nix/var/nix/profiles/default$PLIST_DEST" "$PLIST_DEST" /bin/cp -f "/nix/var/nix/profiles/default$NIX_DAEMON_DEST" "$NIX_DAEMON_DEST"
_sudo "to load the LaunchDaemon plist for nix-daemon" \ _sudo "to load the LaunchDaemon plist for nix-daemon" \
launchctl load /Library/LaunchDaemons/org.nixos.nix-daemon.plist launchctl load /Library/LaunchDaemons/org.nixos.nix-daemon.plist
_sudo "to start the nix-daemon" \ _sudo "to start the nix-daemon" \
launchctl start org.nixos.nix-daemon launchctl kickstart -k system/org.nixos.nix-daemon
} }
poly_group_exists() { poly_group_exists() {
@ -96,6 +147,8 @@ poly_user_home_get() {
} }
poly_user_home_set() { poly_user_home_set() {
# This can trigger a permission prompt now:
# "Terminal" would like to administer your computer. Administration can include modifying passwords, networking, and system settings.
_sudo "in order to give $1 a safe home directory" \ _sudo "in order to give $1 a safe home directory" \
/usr/bin/dscl . -create "/Users/$1" "NFSHomeDirectory" "$2" /usr/bin/dscl . -create "/Users/$1" "NFSHomeDirectory" "$2"
} }
@ -121,7 +174,7 @@ poly_user_shell_set() {
poly_user_in_group_check() { poly_user_in_group_check() {
username=$1 username=$1
group=$2 group=$2
dseditgroup -o checkmember -m "$username" "$group" > /dev/null 2>&1 /usr/sbin/dseditgroup -o checkmember -m "$username" "$group" > /dev/null 2>&1
} }
poly_user_in_group_set() { poly_user_in_group_set() {
@ -151,3 +204,21 @@ poly_create_build_user() {
/usr/bin/dscl . create "/Users/$username" \ /usr/bin/dscl . create "/Users/$username" \
UniqueID "${uid}" UniqueID "${uid}"
} }
poly_prepare_to_install() {
if should_create_volume; then
header "Preparing a Nix volume"
# intentional indent below to match task indent
cat <<EOF
Nix traditionally stores its data in the root directory $NIX_ROOT, but
macOS now (starting in 10.15 Catalina) has a read-only root directory.
To support Nix, I will create a volume and configure macOS to mount it
at $NIX_ROOT.
EOF
setup_darwin_volume
fi
if [ "$(diskutil info -plist /nix | xmllint --xpath "(/plist/dict/key[text()='GlobalPermissionsEnabled'])/following-sibling::*[1]" -)" = "<false/>" ]; then
failure "This script needs a /nix volume with global permissions! This may require running sudo diskutil enableOwnership /nix."
fi
}

View file

@ -33,7 +33,7 @@ NIX_BUILD_USER_NAME_TEMPLATE="nixbld%d"
readonly NIX_ROOT="/nix" readonly NIX_ROOT="/nix"
readonly NIX_EXTRA_CONF=${NIX_EXTRA_CONF:-} readonly NIX_EXTRA_CONF=${NIX_EXTRA_CONF:-}
readonly PROFILE_TARGETS=("/etc/bashrc" "/etc/profile.d/nix.sh" "/etc/zshenv" "/etc/bash.bashrc" "/etc/zsh/zshenv") readonly PROFILE_TARGETS=("/etc/bashrc" "/etc/profile.d/nix.sh" "/etc/zshrc" "/etc/bash.bashrc" "/etc/zsh/zshrc")
readonly PROFILE_BACKUP_SUFFIX=".backup-before-nix" readonly PROFILE_BACKUP_SUFFIX=".backup-before-nix"
readonly PROFILE_NIX_FILE="$NIX_ROOT/var/nix/profiles/default/etc/profile.d/nix-daemon.sh" readonly PROFILE_NIX_FILE="$NIX_ROOT/var/nix/profiles/default/etc/profile.d/nix-daemon.sh"
@ -43,7 +43,7 @@ readonly NIX_INSTALLED_CACERT="@cacert@"
#readonly NIX_INSTALLED_CACERT="/nix/store/7dxhzymvy330i28ii676fl1pqwcahv2f-nss-cacert-3.49.2" #readonly NIX_INSTALLED_CACERT="/nix/store/7dxhzymvy330i28ii676fl1pqwcahv2f-nss-cacert-3.49.2"
readonly EXTRACTED_NIX_PATH="$(dirname "$0")" readonly EXTRACTED_NIX_PATH="$(dirname "$0")"
readonly ROOT_HOME=$(echo ~root) readonly ROOT_HOME=~root
if [ -t 0 ]; then if [ -t 0 ]; then
readonly IS_HEADLESS='no' readonly IS_HEADLESS='no'
@ -59,14 +59,19 @@ headless() {
fi fi
} }
contactme() { contact_us() {
echo "You can open an issue at https://github.com/nixos/nix/issues"
echo ""
echo "Or feel free to contact the team:"
echo " - Matrix: #nix:nixos.org"
echo " - IRC: in #nixos on irc.libera.chat"
echo " - twitter: @nixos_org"
echo " - forum: https://discourse.nixos.org"
}
get_help() {
echo "We'd love to help if you need it." echo "We'd love to help if you need it."
echo "" echo ""
echo "If you can, open an issue at https://github.com/nixos/nix/issues" contact_us
echo ""
echo "Or feel free to contact the team,"
echo " - on IRC #nixos on irc.freenode.net"
echo " - on twitter @nixos_org"
} }
uninstall_directions() { uninstall_directions() {
@ -102,7 +107,6 @@ $step. Delete the files Nix added to your system:
and that is it. and that is it.
EOF EOF
} }
nix_user_for_core() { nix_user_for_core() {
@ -170,7 +174,7 @@ failure() {
header "oh no!" header "oh no!"
_textout "$RED" "$@" _textout "$RED" "$@"
echo "" echo ""
_textout "$RED" "$(contactme)" _textout "$RED" "$(get_help)"
trap finish_cleanup EXIT trap finish_cleanup EXIT
exit 1 exit 1
} }
@ -201,6 +205,95 @@ ui_confirm() {
return 1 return 1
} }
printf -v _UNCHANGED_GRP_FMT "%b" $'\033[2m%='"$ESC" # "dim"
# bold+invert+red and bold+invert+green just for the +/- below
# red/green foreground for rest of the line
printf -v _OLD_LINE_FMT "%b" $'\033[1;7;31m-'"$ESC ${RED}%L${ESC}"
printf -v _NEW_LINE_FMT "%b" $'\033[1;7;32m+'"$ESC ${GREEN}%L${ESC}"
_diff() {
# simple colorized diff comatible w/ pre `--color` versions
diff --unchanged-group-format="$_UNCHANGED_GRP_FMT" --old-line-format="$_OLD_LINE_FMT" --new-line-format="$_NEW_LINE_FMT" --unchanged-line-format=" %L" "$@"
}
confirm_rm() {
local path="$1"
if ui_confirm "Can I remove $path?"; then
_sudo "to remove $path" rm "$path"
fi
}
confirm_edit() {
local path="$1"
local edit_path="$2"
cat <<EOF
Nix isn't the only thing in $path,
but I think I know how to edit it out.
Here's the diff:
EOF
# could technically test the diff, but caller should do it
_diff "$path" "$edit_path"
if ui_confirm "Does the change above look right?"; then
_sudo "remove nix from $path" cp "$edit_path" "$path"
fi
}
_SERIOUS_BUSINESS="${RED}%s:${ESC} "
password_confirm() {
local do_something_consequential="$1"
if ui_confirm "Can I $do_something_consequential?"; then
# shellcheck disable=SC2059
sudo -kv --prompt="$(printf "${_SERIOUS_BUSINESS}" "Enter your password to $do_something_consequential")"
else
return 1
fi
}
# Support accumulating reminders over the course of a run and showing
# them at the end. An example where this helps: the installer changes
# something, but it won't work without a reboot. If you tell the user
# when you do it, they may miss it in the stream. The value of the
# setting isn't enough to decide whether to message because you only
# need to message if you *changed* it.
# reminders stored in array delimited by empty entry; if ! headless,
# user is asked to confirm after each delimiter.
_reminders=()
((_remind_num=1))
remind() {
# (( arithmetic expression ))
if (( _remind_num > 1 )); then
header "Reminders"
for line in "${_reminders[@]}"; do
echo "$line"
if ! headless && [ "${#line}" = 0 ]; then
if read -r -p "Press enter/return to acknowledge."; then
printf $'\033[A\33[2K\r'
fi
fi
done
fi
}
reminder() {
printf -v label "${BLUE}[ %d ]${ESC}" "$_remind_num"
_reminders+=("$label")
if [[ "$*" = "" ]]; then
while read -r line; do
_reminders+=("$line")
done
else
# this expands each arg to an array entry (and each entry will
# ultimately be a separate line in the output)
_reminders+=("$@")
fi
_reminders+=("")
((_remind_num++))
}
__sudo() { __sudo() {
local expl="$1" local expl="$1"
local cmd="$2" local cmd="$2"
@ -221,18 +314,18 @@ _sudo() {
local expl="$1" local expl="$1"
shift shift
if ! headless; then if ! headless; then
__sudo "$expl" "$*" __sudo "$expl" "$*" >&2
fi fi
sudo "$@" sudo "$@"
} }
readonly SCRATCH=$(mktemp -d -t tmp.XXXXXXXXXX) readonly SCRATCH=$(mktemp -d "${TMPDIR:-/tmp/}tmp.XXXXXXXXXX")
function finish_cleanup { finish_cleanup() {
rm -rf "$SCRATCH" rm -rf "$SCRATCH"
} }
function finish_fail { finish_fail() {
finish_cleanup finish_cleanup
failure <<EOF failure <<EOF
@ -244,45 +337,46 @@ EOF
} }
trap finish_fail EXIT trap finish_fail EXIT
channel_update_failed=0 finish_success() {
function finish_success {
finish_cleanup
ok "Alright! We're done!" ok "Alright! We're done!"
if [ "x$channel_update_failed" = x1 ]; then
echo ""
echo "But fetching the nixpkgs channel failed. (Are you offline?)"
echo "To try again later, run \"sudo -i nix-channel --update nixpkgs\"."
fi
cat <<EOF cat <<EOF
Before Nix will work in your existing shells, you'll need to close
them and open them again. Other than that, you should be ready to go.
Try it! Open a new terminal, and type: Try it! Open a new terminal, and type:
$(poly_extra_try_me_commands) $(poly_extra_try_me_commands)
$ nix-shell -p nix-info --run "nix-info -m" $ nix-shell -p nix-info --run "nix-info -m"
$(poly_extra_setup_instructions)
Thank you for using this installer. If you have any feedback, don't
hesitate:
$(contactme) Thank you for using this installer. If you have any feedback or need
help, don't hesitate:
$(contact_us)
EOF EOF
remind
finish_cleanup
} }
finish_uninstall_success() {
ok "Alright! Nix should be removed!"
cat <<EOF
If you spot anything this uninstaller missed or have feedback,
don't hesitate:
$(contact_us)
EOF
remind
finish_cleanup
}
remove_nix_artifacts() {
failure "Not implemented yet"
}
cure_artifacts() {
poly_cure_artifacts
# remove_nix_artifacts (LATER)
}
validate_starting_assumptions() { validate_starting_assumptions() {
poly_validate_assumptions
if [ $EUID -eq 0 ]; then
failure <<EOF
Please do not run this script with root privileges. We will call sudo
when we need to.
EOF
fi
if type nix-env 2> /dev/null >&2; then if type nix-env 2> /dev/null >&2; then
warning <<EOF warning <<EOF
Nix already appears to be installed. This installer may run into issues. Nix already appears to be installed. This installer may run into issues.
@ -293,19 +387,28 @@ EOF
fi fi
for profile_target in "${PROFILE_TARGETS[@]}"; do for profile_target in "${PROFILE_TARGETS[@]}"; do
# TODO: I think it would be good to accumulate a list of all
# of the copies so that people don't hit this 2 or 3x in
# a row for different files.
if [ -e "$profile_target$PROFILE_BACKUP_SUFFIX" ]; then if [ -e "$profile_target$PROFILE_BACKUP_SUFFIX" ]; then
# this backup process first released in Nix 2.1
failure <<EOF failure <<EOF
When this script runs, it backs up the current $profile_target to I back up shell profile/rc scripts before I add Nix to them.
$profile_target$PROFILE_BACKUP_SUFFIX. This backup file already exists, though. I need to back up $profile_target to $profile_target$PROFILE_BACKUP_SUFFIX,
but the latter already exists.
Please follow these instructions to clean up the old backup file: Here's how to clean up the old backup file:
1. Copy $profile_target and $profile_target$PROFILE_BACKUP_SUFFIX to another place, just 1. Back up (copy) $profile_target and $profile_target$PROFILE_BACKUP_SUFFIX
in case. to another location, just in case.
2. Take care to make sure that $profile_target$PROFILE_BACKUP_SUFFIX doesn't look like 2. Ensure $profile_target$PROFILE_BACKUP_SUFFIX does not have anything
it has anything nix-related in it. If it does, something is probably Nix-related in it. If it does, something is probably quite
quite wrong. Please open an issue or get in touch immediately. wrong. Please open an issue or get in touch immediately.
3. Once you confirm $profile_target is backed up and
$profile_target$PROFILE_BACKUP_SUFFIX doesn't mention Nix, run:
mv $profile_target$PROFILE_BACKUP_SUFFIX $profile_target
EOF EOF
fi fi
done done
@ -444,18 +547,46 @@ create_build_users() {
create_directories() { create_directories() {
# FIXME: remove all of this because it duplicates LocalStore::LocalStore(). # FIXME: remove all of this because it duplicates LocalStore::LocalStore().
task "Setting up the basic directory structure"
if [ -d "$NIX_ROOT" ]; then
# if /nix already exists, take ownership
#
# Caution: notes below are macOS-y
# This is a bit of a goldilocks zone for taking ownership
# if there are already files on the volume; the volume is
# now mounted, but we haven't added a bunch of new files
# this is probably a bit slow; I've been seeing 3.3-4s even
# when promptly installed over a fresh single-user install.
# In case anyone's aware of a shortcut.
# `|| true`: .Trashes errors w/o full disk perm
# rumor per #4488 that macOS 11.2 may not have
# sbin on path, and that's where chown is, but
# since this bit is cross-platform:
# - first try with `command -vp` to try and find
# chown in the usual places
# - fall back on `command -v` which would find
# any chown on path
# if we don't find one, the command is already
# hiding behind || true, and the general state
# should be one the user can repair once they
# figure out where chown is...
local get_chr_own="$(command -vp chown)"
if [[ -z "$get_chr_own" ]]; then
get_chr_own="$(command -v chown)"
fi
_sudo "to take root ownership of existing Nix store files" \
"$get_chr_own" -R "root:$NIX_BUILD_GROUP_NAME" "$NIX_ROOT" || true
fi
_sudo "to make the basic directory structure of Nix (part 1)" \ _sudo "to make the basic directory structure of Nix (part 1)" \
mkdir -pv -m 0755 /nix /nix/var /nix/var/log /nix/var/log/nix /nix/var/log/nix/drvs /nix/var/nix{,/db,/gcroots,/profiles,/temproots,/userpool} /nix/var/nix/{gcroots,profiles}/per-user install -dv -m 0755 /nix /nix/var /nix/var/log /nix/var/log/nix /nix/var/log/nix/drvs /nix/var/nix{,/db,/gcroots,/profiles,/temproots,/userpool} /nix/var/nix/{gcroots,profiles}/per-user
_sudo "to make the basic directory structure of Nix (part 2)" \ _sudo "to make the basic directory structure of Nix (part 2)" \
mkdir -pv -m 1775 /nix/store install -dv -g "$NIX_BUILD_GROUP_NAME" -m 1775 /nix/store
_sudo "to make the basic directory structure of Nix (part 3)" \
chgrp "$NIX_BUILD_GROUP_NAME" /nix/store
_sudo "to place the default nix daemon configuration (part 1)" \ _sudo "to place the default nix daemon configuration (part 1)" \
mkdir -pv -m 0555 /etc/nix install -dv -m 0555 /etc/nix
} }
place_channel_configuration() { place_channel_configuration() {
@ -475,9 +606,9 @@ This installation tool will set up your computer with the Nix package
manager. This will happen in a few stages: manager. This will happen in a few stages:
1. Make sure your computer doesn't already have Nix. If it does, I 1. Make sure your computer doesn't already have Nix. If it does, I
will show you instructions on how to clean up your old one. will show you instructions on how to clean up your old install.
2. Show you what we are going to install and where. Then we will ask 2. Show you what I am going to install and where. Then I will ask
if you are ready to continue. if you are ready to continue.
3. Create the system users and groups that the Nix daemon uses to run 3. Create the system users and groups that the Nix daemon uses to run
@ -492,14 +623,14 @@ manager. This will happen in a few stages:
EOF EOF
if ui_confirm "Would you like to see a more detailed list of what we will do?"; then if ui_confirm "Would you like to see a more detailed list of what I will do?"; then
cat <<EOF cat <<EOF
We will: I will:
- make sure your computer doesn't already have Nix files - make sure your computer doesn't already have Nix files
(if it does, I will tell you how to clean them up.) (if it does, I will tell you how to clean them up.)
- create local users (see the list above for the users we'll make) - create local users (see the list above for the users I'll make)
- create a local group ($NIX_BUILD_GROUP_NAME) - create a local group ($NIX_BUILD_GROUP_NAME)
- install Nix in to $NIX_ROOT - install Nix in to $NIX_ROOT
- create a configuration file in /etc/nix - create a configuration file in /etc/nix
@ -534,7 +665,7 @@ run in a headless fashion, like this:
$ curl -L https://nixos.org/nix/install | sh $ curl -L https://nixos.org/nix/install | sh
or maybe in a CI pipeline. Because of that, we're going to skip the or maybe in a CI pipeline. Because of that, I'm going to skip the
verbose output in the interest of brevity. verbose output in the interest of brevity.
If you would like to If you would like to
@ -548,7 +679,7 @@ EOF
fi fi
cat <<EOF cat <<EOF
This script is going to call sudo a lot. Every time we do, it'll This script is going to call sudo a lot. Every time I do, it'll
output exactly what it'll do, and why. output exactly what it'll do, and why.
Just like this: Just like this:
@ -560,25 +691,29 @@ EOF
cat <<EOF cat <<EOF
This might look scary, but everything can be undone by running just a This might look scary, but everything can be undone by running just a
few commands. We used to ask you to confirm each time sudo ran, but it few commands. I used to ask you to confirm each time sudo ran, but it
was too many times. Instead, I'll just ask you this one time: was too many times. Instead, I'll just ask you this one time:
EOF EOF
if ui_confirm "Can we use sudo?"; then if ui_confirm "Can I use sudo?"; then
ok "Yay! Thanks! Let's get going!" ok "Yay! Thanks! Let's get going!"
else else
failure <<EOF failure <<EOF
That is okay, but we can't install. That is okay, but I can't install.
EOF EOF
fi fi
} }
install_from_extracted_nix() { install_from_extracted_nix() {
task "Installing Nix"
( (
cd "$EXTRACTED_NIX_PATH" cd "$EXTRACTED_NIX_PATH"
_sudo "to copy the basic Nix files to the new store at $NIX_ROOT/store" \ _sudo "to copy the basic Nix files to the new store at $NIX_ROOT/store" \
rsync -rlpt --chmod=-w ./store/* "$NIX_ROOT/store/" cp -RLp ./store/* "$NIX_ROOT/store/"
_sudo "to make the new store non-writable at $NIX_ROOT/store" \
chmod -R ugo-w "$NIX_ROOT/store/"
if [ -d "$NIX_INSTALLED_NIX" ]; then if [ -d "$NIX_INSTALLED_NIX" ]; then
echo " Alright! We have our first nix at $NIX_INSTALLED_NIX" echo " Alright! We have our first nix at $NIX_INSTALLED_NIX"
@ -589,9 +724,8 @@ $NIX_INSTALLED_NIX.
EOF EOF
fi fi
cat ./.reginfo \ _sudo "to load data for the first time in to the Nix Database" \
| _sudo "to load data for the first time in to the Nix Database" \ "$NIX_INSTALLED_NIX/bin/nix-store" --load-db < ./.reginfo
"$NIX_INSTALLED_NIX/bin/nix-store" --load-db
echo " Just finished getting the nix database ready." echo " Just finished getting the nix database ready."
) )
@ -610,6 +744,7 @@ EOF
} }
configure_shell_profile() { configure_shell_profile() {
task "Setting up shell profiles: ${PROFILE_TARGETS[*]}"
for profile_target in "${PROFILE_TARGETS[@]}"; do for profile_target in "${PROFILE_TARGETS[@]}"; do
if [ -e "$profile_target" ]; then if [ -e "$profile_target" ]; then
_sudo "to back up your current $profile_target to $profile_target$PROFILE_BACKUP_SUFFIX" \ _sudo "to back up your current $profile_target to $profile_target$PROFILE_BACKUP_SUFFIX" \
@ -629,14 +764,27 @@ configure_shell_profile() {
tee -a "$profile_target" tee -a "$profile_target"
fi fi
done done
# TODO: should we suggest '. $PROFILE_NIX_FILE'? It would get them on
# their way less disruptively, but a counter-argument is that they won't
# immediately notice if something didn't get set up right?
reminder "Nix won't work in active shell sessions until you restart them."
}
cert_in_store() {
# in a subshell
# - change into the cert-file dir
# - get the phyiscal pwd
# and test if this path is in the Nix store
[[ "$(cd -- "$(dirname "$NIX_SSL_CERT_FILE")" && exec pwd -P)" == "$NIX_ROOT/store/"* ]]
} }
setup_default_profile() { setup_default_profile() {
_sudo "to installing a bootstrapping Nix in to the default Profile" \ task "Setting up the default profile"
_sudo "to install a bootstrapping Nix in to the default profile" \
HOME="$ROOT_HOME" "$NIX_INSTALLED_NIX/bin/nix-env" -i "$NIX_INSTALLED_NIX" HOME="$ROOT_HOME" "$NIX_INSTALLED_NIX/bin/nix-env" -i "$NIX_INSTALLED_NIX"
if [ -z "${NIX_SSL_CERT_FILE:-}" ] || ! [ -f "${NIX_SSL_CERT_FILE:-}" ]; then if [ -z "${NIX_SSL_CERT_FILE:-}" ] || ! [ -f "${NIX_SSL_CERT_FILE:-}" ] || cert_in_store; then
_sudo "to installing a bootstrapping SSL certificate just for Nix in to the default Profile" \ _sudo "to install a bootstrapping SSL certificate just for Nix in to the default profile" \
HOME="$ROOT_HOME" "$NIX_INSTALLED_NIX/bin/nix-env" -i "$NIX_INSTALLED_CACERT" HOME="$ROOT_HOME" "$NIX_INSTALLED_NIX/bin/nix-env" -i "$NIX_INSTALLED_CACERT"
export NIX_SSL_CERT_FILE=/nix/var/nix/profiles/default/etc/ssl/certs/ca-bundle.crt export NIX_SSL_CERT_FILE=/nix/var/nix/profiles/default/etc/ssl/certs/ca-bundle.crt
fi fi
@ -645,9 +793,13 @@ setup_default_profile() {
# Have to explicitly pass NIX_SSL_CERT_FILE as part of the sudo call, # Have to explicitly pass NIX_SSL_CERT_FILE as part of the sudo call,
# otherwise it will be lost in environments where sudo doesn't pass # otherwise it will be lost in environments where sudo doesn't pass
# all the environment variables by default. # all the environment variables by default.
_sudo "to update the default channel in the default profile" \ if ! _sudo "to update the default channel in the default profile" \
HOME="$ROOT_HOME" NIX_SSL_CERT_FILE="$NIX_SSL_CERT_FILE" "$NIX_INSTALLED_NIX/bin/nix-channel" --update nixpkgs \ HOME="$ROOT_HOME" NIX_SSL_CERT_FILE="$NIX_SSL_CERT_FILE" "$NIX_INSTALLED_NIX/bin/nix-channel" --update nixpkgs; then
|| channel_update_failed=1 reminder <<EOF
I had trouble fetching the nixpkgs channel (are you offline?)
To try again later, run: sudo -i nix-channel --update nixpkgs
EOF
fi
fi fi
} }
@ -662,6 +814,17 @@ EOF
} }
main() { main() {
# TODO: I've moved this out of validate_starting_assumptions so we
# can fail faster in this case. Sourcing install-darwin... now runs
# `touch /` to detect Read-only root, but it could update times on
# pre-Catalina macOS if run as root user.
if [ "$EUID" -eq 0 ]; then
failure <<EOF
Please do not run this script with root privileges. I will call sudo
when I need to.
EOF
fi
if [ "$(uname -s)" = "Darwin" ]; then if [ "$(uname -s)" = "Darwin" ]; then
# shellcheck source=./install-darwin-multi-user.sh # shellcheck source=./install-darwin-multi-user.sh
. "$EXTRACTED_NIX_PATH/install-darwin-multi-user.sh" . "$EXTRACTED_NIX_PATH/install-darwin-multi-user.sh"
@ -675,17 +838,24 @@ main() {
welcome_to_nix welcome_to_nix
chat_about_sudo chat_about_sudo
cure_artifacts
# TODO: there's a tension between cure and validate. I moved the
# the sudo/root check out of validate to the head of this func.
# Cure is *intended* to subsume the validate-and-abort approach,
# so it may eventually obsolete it.
validate_starting_assumptions validate_starting_assumptions
setup_report setup_report
if ! ui_confirm "Ready to continue?"; then if ! ui_confirm "Ready to continue?"; then
ok "Alright, no changes have been made :)" ok "Alright, no changes have been made :)"
contactme get_help
trap finish_cleanup EXIT trap finish_cleanup EXIT
exit 1 exit 1
fi fi
poly_prepare_to_install
create_build_group create_build_group
create_build_users create_build_users
create_directories create_directories
@ -695,6 +865,7 @@ main() {
configure_shell_profile configure_shell_profile
set +eu set +eu
# shellcheck disable=SC1091
. /etc/profile . /etc/profile
set -eu set -eu
@ -706,5 +877,20 @@ main() {
trap finish_success EXIT trap finish_success EXIT
} }
# set an empty initial arg for bare invocations in case we need to
# disambiguate someone directly invoking this later.
if [ "${#@}" = 0 ]; then
set ""
fi
main # ACTION for override
case "${1-}" in
# uninstall)
# shift
# uninstall "$@";;
# install == same as the no-arg condition for now (but, explicit)
""|install)
main;;
*) # holding space for future options (like uninstall + install?)
failure "install-multi-user: invalid argument";;
esac

View file

@ -26,18 +26,9 @@ fi
# macOS support for 10.12.6 or higher # macOS support for 10.12.6 or higher
if [ "$(uname -s)" = "Darwin" ]; then if [ "$(uname -s)" = "Darwin" ]; then
IFS='.' read macos_major macos_minor macos_patch << EOF IFS='.' read -r macos_major macos_minor macos_patch << EOF
$(sw_vers -productVersion) $(sw_vers -productVersion)
EOF EOF
# TODO: this is a temporary speed-bump to keep people from naively installing Nix
# on macOS Big Sur (11.0+, 10.16+) until nixpkgs updates are ready for them.
# *Ideally* this is gone before next Nix release. If you're intentionally working on
# Nix + Big Sur, just comment out this block and be on your way :)
if [ "$macos_major" -gt 10 ] || { [ "$macos_major" -eq 10 ] && [ "$macos_minor" -gt 15 ]; }; then
echo "$0: nixpkgs isn't quite ready to support macOS $(sw_vers -productVersion) yet"
exit 1
fi
if [ "$macos_major" -lt 10 ] || { [ "$macos_major" -eq 10 ] && [ "$macos_minor" -lt 12 ]; } || { [ "$macos_minor" -eq 12 ] && [ "$macos_patch" -lt 6 ]; }; then if [ "$macos_major" -lt 10 ] || { [ "$macos_major" -eq 10 ] && [ "$macos_minor" -lt 12 ]; } || { [ "$macos_minor" -eq 12 ] && [ "$macos_patch" -lt 6 ]; }; then
# patch may not be present; command substitution for simplicity # patch may not be present; command substitution for simplicity
echo "$0: macOS $(sw_vers -productVersion) is not supported, upgrade to 10.12.6 or higher" echo "$0: macOS $(sw_vers -productVersion) is not supported, upgrade to 10.12.6 or higher"
@ -46,21 +37,40 @@ EOF
fi fi
# Determine if we could use the multi-user installer or not # Determine if we could use the multi-user installer or not
if [ "$(uname -s)" = "Darwin" ]; then if [ "$(uname -s)" = "Linux" ]; then
echo "Note: a multi-user installation is possible. See https://nixos.org/nix/manual/#sect-multi-user-installation" >&2
elif [ "$(uname -s)" = "Linux" ]; then
echo "Note: a multi-user installation is possible. See https://nixos.org/nix/manual/#sect-multi-user-installation" >&2 echo "Note: a multi-user installation is possible. See https://nixos.org/nix/manual/#sect-multi-user-installation" >&2
fi fi
INSTALL_MODE=no-daemon case "$(uname -s)" in
CREATE_DARWIN_VOLUME=0 "Darwin")
INSTALL_MODE=daemon;;
*)
INSTALL_MODE=no-daemon;;
esac
# space-separated string
ACTIONS=
# handle the command line flags # handle the command line flags
while [ $# -gt 0 ]; do while [ $# -gt 0 ]; do
case $1 in case $1 in
--daemon) --daemon)
INSTALL_MODE=daemon;; INSTALL_MODE=daemon
ACTIONS="${ACTIONS}install "
;;
--no-daemon) --no-daemon)
INSTALL_MODE=no-daemon;; if [ "$(uname -s)" = "Darwin" ]; then
printf '\e[1;31mError: --no-daemon installs are no-longer supported on Darwin/macOS!\e[0m\n' >&2
exit 1
fi
INSTALL_MODE=no-daemon
# intentional tail space
ACTIONS="${ACTIONS}install "
;;
# --uninstall)
# # intentional tail space
# ACTIONS="${ACTIONS}uninstall "
# ;;
--no-channel-add) --no-channel-add)
export NIX_INSTALLER_NO_CHANNEL_ADD=1;; export NIX_INSTALLER_NO_CHANNEL_ADD=1;;
--daemon-user-count) --daemon-user-count)
@ -69,13 +79,18 @@ while [ $# -gt 0 ]; do
--no-modify-profile) --no-modify-profile)
NIX_INSTALLER_NO_MODIFY_PROFILE=1;; NIX_INSTALLER_NO_MODIFY_PROFILE=1;;
--darwin-use-unencrypted-nix-store-volume) --darwin-use-unencrypted-nix-store-volume)
CREATE_DARWIN_VOLUME=1;; {
echo "Warning: the flag --darwin-use-unencrypted-nix-store-volume"
echo " is no longer needed and will be removed in the future."
echo ""
} >&2;;
--nix-extra-conf-file) --nix-extra-conf-file)
export NIX_EXTRA_CONF="$(cat $2)" # shellcheck disable=SC2155
export NIX_EXTRA_CONF="$(cat "$2")"
shift;; shift;;
*) *)
( {
echo "Nix Installer [--daemon|--no-daemon] [--daemon-user-count INT] [--no-channel-add] [--no-modify-profile] [--darwin-use-unencrypted-nix-store-volume] [--nix-extra-conf-file FILE]" echo "Nix Installer [--daemon|--no-daemon] [--daemon-user-count INT] [--no-channel-add] [--no-modify-profile] [--nix-extra-conf-file FILE]"
echo "Choose installation method." echo "Choose installation method."
echo "" echo ""
@ -91,55 +106,25 @@ while [ $# -gt 0 ]; do
echo "" echo ""
echo " --no-channel-add: Don't add any channels. nixpkgs-unstable is installed by default." echo " --no-channel-add: Don't add any channels. nixpkgs-unstable is installed by default."
echo "" echo ""
echo " --no-modify-profile: Skip channel installation. When not provided nixpkgs-unstable" echo " --no-modify-profile: Don't modify the user profile to automatically load nix."
echo " is installed by default."
echo "" echo ""
echo " --daemon-user-count: Number of build users to create. Defaults to 32." echo " --daemon-user-count: Number of build users to create. Defaults to 32."
echo "" echo ""
echo " --nix-extra-conf-file: Path to nix.conf to prepend when installing /etc/nix.conf" echo " --nix-extra-conf-file: Path to nix.conf to prepend when installing /etc/nix/nix.conf"
echo "" echo ""
if [ -n "${INVOKED_FROM_INSTALL_IN:-}" ]; then if [ -n "${INVOKED_FROM_INSTALL_IN:-}" ]; then
echo " --tarball-url-prefix URL: Base URL to download the Nix tarball from." echo " --tarball-url-prefix URL: Base URL to download the Nix tarball from."
fi fi
) >&2 } >&2
# darwin and Catalina+
if [ "$(uname -s)" = "Darwin" ] && { [ "$macos_major" -gt 10 ] || { [ "$macos_major" -eq 10 ] && [ "$macos_minor" -gt 14 ]; }; }; then
(
echo " --darwin-use-unencrypted-nix-store-volume: Create an APFS volume for the Nix"
echo " store and mount it at /nix. This is the recommended way to create"
echo " /nix with a read-only / on macOS >=10.15."
echo " See: https://nixos.org/nix/manual/#sect-macos-installation"
echo ""
) >&2
fi
exit;; exit;;
esac esac
shift shift
done done
if [ "$(uname -s)" = "Darwin" ]; then
if [ "$CREATE_DARWIN_VOLUME" = 1 ]; then
printf '\e[1;31mCreating volume and mountpoint /nix.\e[0m\n'
"$self/create-darwin-volume.sh"
fi
writable="$(diskutil info -plist / | xmllint --xpath "name(/plist/dict/key[text()='Writable']/following-sibling::*[1])" -)"
if ! [ -e $dest ] && [ "$writable" = "false" ]; then
(
echo ""
echo "Installing on macOS >=10.15 requires relocating the store to an apfs volume."
echo "Use sh <(curl -L https://nixos.org/nix/install) --darwin-use-unencrypted-nix-store-volume or run the preparation steps manually."
echo "See https://nixos.org/nix/manual/#sect-macos-installation"
echo ""
) >&2
exit 1
fi
fi
if [ "$INSTALL_MODE" = "daemon" ]; then if [ "$INSTALL_MODE" = "daemon" ]; then
printf '\e[1;31mSwitching to the Multi-user Installer\e[0m\n' printf '\e[1;31mSwitching to the Multi-user Installer\e[0m\n'
exec "$self/install-multi-user" exec "$self/install-multi-user" $ACTIONS # let ACTIONS split
exit 0 exit 0
fi fi
@ -149,7 +134,7 @@ fi
echo "performing a single-user installation of Nix..." >&2 echo "performing a single-user installation of Nix..." >&2
if ! [ -e $dest ]; then if ! [ -e "$dest" ]; then
cmd="mkdir -m 0755 $dest && chown $USER $dest" cmd="mkdir -m 0755 $dest && chown $USER $dest"
echo "directory $dest does not exist; creating it by running '$cmd' using sudo" >&2 echo "directory $dest does not exist; creating it by running '$cmd' using sudo" >&2
if ! sudo sh -c "$cmd"; then if ! sudo sh -c "$cmd"; then
@ -158,12 +143,12 @@ if ! [ -e $dest ]; then
fi fi
fi fi
if ! [ -w $dest ]; then if ! [ -w "$dest" ]; then
echo "$0: directory $dest exists, but is not writable by you. This could indicate that another user has already performed a single-user installation of Nix on this system. If you wish to enable multi-user support see https://nixos.org/nix/manual/#ssec-multi-user. If you wish to continue with a single-user install for $USER please run 'chown -R $USER $dest' as root." >&2 echo "$0: directory $dest exists, but is not writable by you. This could indicate that another user has already performed a single-user installation of Nix on this system. If you wish to enable multi-user support see https://nixos.org/nix/manual/#ssec-multi-user. If you wish to continue with a single-user install for $USER please run 'chown -R $USER $dest' as root." >&2
exit 1 exit 1
fi fi
mkdir -p $dest/store mkdir -p "$dest/store"
printf "copying Nix to %s..." "${dest}/store" >&2 printf "copying Nix to %s..." "${dest}/store" >&2
# Insert a newline if no progress is shown. # Insert a newline if no progress is shown.
@ -194,6 +179,7 @@ if ! "$nix/bin/nix-store" --load-db < "$self/.reginfo"; then
exit 1 exit 1
fi fi
# shellcheck source=./nix-profile.sh.in
. "$nix/etc/profile.d/nix.sh" . "$nix/etc/profile.d/nix.sh"
if ! "$nix/bin/nix-env" -i "$nix"; then if ! "$nix/bin/nix-env" -i "$nix"; then
@ -203,17 +189,17 @@ fi
# Install an SSL certificate bundle. # Install an SSL certificate bundle.
if [ -z "$NIX_SSL_CERT_FILE" ] || ! [ -f "$NIX_SSL_CERT_FILE" ]; then if [ -z "$NIX_SSL_CERT_FILE" ] || ! [ -f "$NIX_SSL_CERT_FILE" ]; then
$nix/bin/nix-env -i "$cacert" "$nix/bin/nix-env" -i "$cacert"
export NIX_SSL_CERT_FILE="$HOME/.nix-profile/etc/ssl/certs/ca-bundle.crt" export NIX_SSL_CERT_FILE="$HOME/.nix-profile/etc/ssl/certs/ca-bundle.crt"
fi fi
# Subscribe the user to the Nixpkgs channel and fetch it. # Subscribe the user to the Nixpkgs channel and fetch it.
if [ -z "$NIX_INSTALLER_NO_CHANNEL_ADD" ]; then if [ -z "$NIX_INSTALLER_NO_CHANNEL_ADD" ]; then
if ! $nix/bin/nix-channel --list | grep -q "^nixpkgs "; then if ! "$nix/bin/nix-channel" --list | grep -q "^nixpkgs "; then
$nix/bin/nix-channel --add https://nixos.org/channels/nixpkgs-unstable "$nix/bin/nix-channel" --add https://nixos.org/channels/nixpkgs-unstable
fi fi
if [ -z "$_NIX_INSTALLER_TEST" ]; then if [ -z "$_NIX_INSTALLER_TEST" ]; then
if ! $nix/bin/nix-channel --update nixpkgs; then if ! "$nix/bin/nix-channel" --update nixpkgs; then
echo "Fetching the nixpkgs channel failed. (Are you offline?)" echo "Fetching the nixpkgs channel failed. (Are you offline?)"
echo "To try again later, run \"nix-channel --update nixpkgs\"." echo "To try again later, run \"nix-channel --update nixpkgs\"."
fi fi
@ -229,7 +215,7 @@ if [ -z "$NIX_INSTALLER_NO_MODIFY_PROFILE" ]; then
if [ -w "$fn" ]; then if [ -w "$fn" ]; then
if ! grep -q "$p" "$fn"; then if ! grep -q "$p" "$fn"; then
echo "modifying $fn..." >&2 echo "modifying $fn..." >&2
echo -e "\nif [ -e $p ]; then . $p; fi # added by Nix installer" >> "$fn" printf '\nif [ -e %s ]; then . %s; fi # added by Nix installer\n' "$p" "$p" >> "$fn"
fi fi
added=1 added=1
break break
@ -240,7 +226,7 @@ if [ -z "$NIX_INSTALLER_NO_MODIFY_PROFILE" ]; then
if [ -w "$fn" ]; then if [ -w "$fn" ]; then
if ! grep -q "$p" "$fn"; then if ! grep -q "$p" "$fn"; then
echo "modifying $fn..." >&2 echo "modifying $fn..." >&2
echo -e "\nif [ -e $p ]; then . $p; fi # added by Nix installer" >> "$fn" printf '\nif [ -e %s ]; then . %s; fi # added by Nix installer\n' "$p" "$p" >> "$fn"
fi fi
added=1 added=1
break break

View file

@ -15,7 +15,7 @@ readonly SERVICE_OVERRIDE=${SERVICE_DEST}.d/override.conf
create_systemd_override() { create_systemd_override() {
header "Configuring proxy for the nix-daemon service" header "Configuring proxy for the nix-daemon service"
_sudo "create directory for systemd unit override" mkdir -p "$(dirname $SERVICE_OVERRIDE)" _sudo "create directory for systemd unit override" mkdir -p "$(dirname "$SERVICE_OVERRIDE")"
cat <<EOF | _sudo "create systemd unit override" tee "$SERVICE_OVERRIDE" cat <<EOF | _sudo "create systemd unit override" tee "$SERVICE_OVERRIDE"
[Service] [Service]
$1 $1
@ -41,10 +41,8 @@ handle_network_proxy() {
fi fi
} }
poly_validate_assumptions() { poly_cure_artifacts() {
if [ "$(uname -s)" != "Linux" ]; then :
failure "This script is for use with Linux!"
fi
} }
poly_service_installed_check() { poly_service_installed_check() {
@ -72,7 +70,7 @@ poly_service_setup_note() {
EOF EOF
} }
poly_extra_try_me_commands(){ poly_extra_try_me_commands() {
if [ -e /run/systemd/system ]; then if [ -e /run/systemd/system ]; then
: :
else else
@ -81,19 +79,10 @@ poly_extra_try_me_commands(){
EOF EOF
fi fi
} }
poly_extra_setup_instructions(){
if [ -e /run/systemd/system ]; then
:
else
cat <<EOF
Additionally, you may want to add nix-daemon to your init-system.
EOF
fi
}
poly_configure_nix_daemon_service() { poly_configure_nix_daemon_service() {
if [ -e /run/systemd/system ]; then if [ -e /run/systemd/system ]; then
task "Setting up the nix-daemon systemd service"
_sudo "to set up the nix-daemon service" \ _sudo "to set up the nix-daemon service" \
systemctl link "/nix/var/nix/profiles/default$SERVICE_SRC" systemctl link "/nix/var/nix/profiles/default$SERVICE_SRC"
@ -110,6 +99,8 @@ poly_configure_nix_daemon_service() {
_sudo "to start the nix-daemon.service" \ _sudo "to start the nix-daemon.service" \
systemctl restart nix-daemon.service systemctl restart nix-daemon.service
else
reminder "I don't support your init system yet; you may want to add nix-daemon manually."
fi fi
} }
@ -207,3 +198,7 @@ poly_create_build_user() {
--password "!" \ --password "!" \
"$username" "$username"
} }
poly_prepare_to_install() {
:
}

View file

@ -40,21 +40,25 @@ case "$(uname -s).$(uname -m)" in
path=@tarballPath_aarch64-linux@ path=@tarballPath_aarch64-linux@
system=aarch64-linux system=aarch64-linux
;; ;;
Linux.armv6l_linux)
hash=@tarballHash_armv6l-linux@
path=@tarballPath_armv6l-linux@
system=armv6l-linux
;;
Linux.armv7l_linux)
hash=@tarballHash_armv7l-linux@
path=@tarballPath_armv7l-linux@
system=armv7l-linux
;;
Darwin.x86_64) Darwin.x86_64)
hash=@tarballHash_x86_64-darwin@ hash=@tarballHash_x86_64-darwin@
path=@tarballPath_x86_64-darwin@ path=@tarballPath_x86_64-darwin@
system=x86_64-darwin system=x86_64-darwin
;; ;;
Darwin.arm64|Darwin.aarch64) Darwin.arm64|Darwin.aarch64)
# check for Rosetta 2 support hash=@tarballHash_aarch64-darwin@
if ! [ -f /Library/Apple/System/Library/LaunchDaemons/com.apple.oahd.plist ]; then path=@tarballPath_aarch64-darwin@
oops "Rosetta 2 is not installed on this ARM64 macOS machine. Run softwareupdate --install-rosetta then restart installation" system=aarch64-darwin
fi
hash=@binaryTarball_x86_64-darwin@
path=@tarballPath_x86_64-darwin@
# eventually maybe: aarch64-darwin
system=x86_64-darwin
;; ;;
*) oops "sorry, there is no binary distribution of Nix for your platform";; *) oops "sorry, there is no binary distribution of Nix for your platform";;
esac esac
@ -72,14 +76,21 @@ fi
tarball=$tmpDir/nix-@nixVersion@-$system.tar.xz tarball=$tmpDir/nix-@nixVersion@-$system.tar.xz
require_util curl "download the binary tarball"
require_util tar "unpack the binary tarball" require_util tar "unpack the binary tarball"
if [ "$(uname -s)" != "Darwin" ]; then if [ "$(uname -s)" != "Darwin" ]; then
require_util xz "unpack the binary tarball" require_util xz "unpack the binary tarball"
fi fi
if command -v curl > /dev/null 2>&1; then
fetch() { curl -L "$1" -o "$2"; }
elif command -v wget > /dev/null 2>&1; then
fetch() { wget "$1" -O "$2"; }
else
oops "you don't have wget or curl installed, which I need to download the binary tarball"
fi
echo "downloading Nix @nixVersion@ binary tarball for $system from '$url' to '$tmpDir'..." echo "downloading Nix @nixVersion@ binary tarball for $system from '$url' to '$tmpDir'..."
curl -L "$url" -o "$tarball" || oops "failed to download '$url'" fetch "$url" "$tarball" || oops "failed to download '$url'"
if command -v sha256sum > /dev/null 2>&1; then if command -v sha256sum > /dev/null 2>&1; then
hash2="$(sha256sum -b "$tarball" | cut -c1-64)" hash2="$(sha256sum -b "$tarball" | cut -c1-64)"

View file

@ -1,7 +1,5 @@
nix_noinst_scripts := \ nix_noinst_scripts := \
$(d)/nix-http-export.cgi \ $(d)/nix-profile.sh
$(d)/nix-profile.sh \
$(d)/nix-reduce-build
noinst-scripts += $(nix_noinst_scripts) noinst-scripts += $(nix_noinst_scripts)

View file

@ -1,51 +0,0 @@
#! /bin/sh
export HOME=/tmp
export NIX_REMOTE=daemon
TMP_DIR="${TMP_DIR:-/tmp/nix-export}"
@coreutils@/mkdir -p "$TMP_DIR" || true
@coreutils@/chmod a+r "$TMP_DIR"
needed_path="?$QUERY_STRING"
needed_path="${needed_path#*[?&]needed_path=}"
needed_path="${needed_path%%&*}"
#needed_path="$(echo $needed_path | ./unhttp)"
needed_path="${needed_path//%2B/+}"
needed_path="${needed_path//%3D/=}"
echo needed_path: "$needed_path" >&2
NIX_STORE="${NIX_STORE_DIR:-/nix/store}"
echo NIX_STORE: "${NIX_STORE}" >&2
full_path="${NIX_STORE}"/"$needed_path"
if [ "$needed_path" != "${needed_path%.drv}" ]; then
echo "Status: 403 You should create the derivation file yourself"
echo "Content-Type: text/plain"
echo
echo "Refusing to disclose derivation contents"
exit
fi
if @bindir@/nix-store --check-validity "$full_path"; then
if ! [ -e nix-export/"$needed_path".nar.gz ]; then
@bindir@/nix-store --export "$full_path" | @gzip@ > "$TMP_DIR"/"$needed_path".nar.gz
@coreutils@/ln -fs "$TMP_DIR"/"$needed_path".nar.gz nix-export/"$needed_path".nar.gz
fi;
echo "Status: 301 Moved"
echo "Location: nix-export/"$needed_path".nar.gz"
echo
else
echo "Status: 404 No such path found"
echo "Content-Type: text/plain"
echo
echo "Path not found:"
echo "$needed_path"
echo "checked:"
echo "$full_path"
fi

View file

@ -5,7 +5,7 @@ __ETC_PROFILE_NIX_SOURCED=1
export NIX_PROFILES="@localstatedir@/nix/profiles/default $HOME/.nix-profile" export NIX_PROFILES="@localstatedir@/nix/profiles/default $HOME/.nix-profile"
# Set $NIX_SSL_CERT_FILE so that Nixpkgs applications like curl work. # Set $NIX_SSL_CERT_FILE so that Nixpkgs applications like curl work.
if [ ! -z "${NIX_SSL_CERT_FILE:-}" ]; then if [ -n "${NIX_SSL_CERT_FILE:-}" ]; then
: # Allow users to override the NIX_SSL_CERT_FILE : # Allow users to override the NIX_SSL_CERT_FILE
elif [ -e /etc/ssl/certs/ca-certificates.crt ]; then # NixOS, Ubuntu, Debian, Gentoo, Arch elif [ -e /etc/ssl/certs/ca-certificates.crt ]; then # NixOS, Ubuntu, Debian, Gentoo, Arch
export NIX_SSL_CERT_FILE=/etc/ssl/certs/ca-certificates.crt export NIX_SSL_CERT_FILE=/etc/ssl/certs/ca-certificates.crt
@ -18,14 +18,14 @@ elif [ -e /etc/pki/tls/certs/ca-bundle.crt ]; then # Fedora, CentOS
else else
# Fall back to what is in the nix profiles, favouring whatever is defined last. # Fall back to what is in the nix profiles, favouring whatever is defined last.
check_nix_profiles() { check_nix_profiles() {
if [ "$ZSH_VERSION" ]; then if [ -n "$ZSH_VERSION" ]; then
# Zsh by default doesn't split words in unquoted parameter expansion. # Zsh by default doesn't split words in unquoted parameter expansion.
# Set local_options for these options to be reverted at the end of the function # Set local_options for these options to be reverted at the end of the function
# and shwordsplit to force splitting words in $NIX_PROFILES below. # and shwordsplit to force splitting words in $NIX_PROFILES below.
setopt local_options shwordsplit setopt local_options shwordsplit
fi fi
for i in $NIX_PROFILES; do for i in $NIX_PROFILES; do
if [ -e $i/etc/ssl/certs/ca-bundle.crt ]; then if [ -e "$i/etc/ssl/certs/ca-bundle.crt" ]; then
export NIX_SSL_CERT_FILE=$i/etc/ssl/certs/ca-bundle.crt export NIX_SSL_CERT_FILE=$i/etc/ssl/certs/ca-bundle.crt
fi fi
done done

View file

@ -1,171 +0,0 @@
#! @bash@
WORKING_DIRECTORY=$(mktemp -d "${TMPDIR:-/tmp}"/nix-reduce-build-XXXXXX);
cd "$WORKING_DIRECTORY";
if test -z "$1" || test "a--help" = "a$1" ; then
echo 'nix-reduce-build (paths or Nix expressions) -- (package sources)' >&2
echo As in: >&2
echo nix-reduce-build /etc/nixos/nixos -- ssh://user@somewhere.nowhere.example.org >&2
echo nix-reduce-build /etc/nixos/nixos -- \\
echo " " \''http://somewhere.nowhere.example.org/nix/nix-http-export.cgi?needed_path='\' >&2
echo " store path name will be added into the end of the URL" >&2
echo nix-reduce-build /etc/nixos/nixos -- file://home/user/nar/ >&2
echo " that should be a directory where gzipped 'nix-store --export' ">&2
echo " files are located (they should have .nar.gz extension)" >&2
echo " Or all together: " >&2
echo -e nix-reduce-build /expr.nix /e2.nix -- \\\\\\\n\
" ssh://a@b.example.com http://n.example.com/get-nar?q= file://nar/" >&2
echo " Also supports best-effort local builds of failing expression set:" >&2
echo "nix-reduce-build /e.nix -- nix-daemon:// nix-self://" >&2
echo " nix-daemon:// builds using daemon"
echo " nix-self:// builds directly using nix-store from current installation" >&2
echo " nix-daemon-fixed:// and nix-self-fixed:// do the same, but only for" >&2;
echo "derivations with specified output hash (sha256, sha1 or md5)." >&2
echo " nix-daemon-substitute:// and nix-self-substitute:// try to substitute" >&2;
echo "maximum amount of paths" >&2;
echo " nix-daemon-build:// and nix-self-build:// try to build (not substitute)" >&2;
echo "maximum amount of paths" >&2;
echo " If no package sources are specified, required paths are listed." >&2;
exit;
fi;
while ! test "$1" = "--" || test "$1" = "" ; do
echo "$1" >> initial; >&2
shift;
done
shift;
echo Will work on $(cat initial | wc -l) targets. >&2
while read ; do
case "$REPLY" in
${NIX_STORE_DIR:-/nix/store}/*)
echo "$REPLY" >> paths; >&2
;;
*)
(
IFS=: ;
nix-instantiate $REPLY >> paths;
);
;;
esac;
done < initial;
echo Proceeding $(cat paths | wc -l) paths. >&2
while read; do
case "$REPLY" in
*.drv)
echo "$REPLY" >> derivers; >&2
;;
*)
nix-store --query --deriver "$REPLY" >>derivers;
;;
esac;
done < paths;
echo Found $(cat derivers | wc -l) derivers. >&2
cat derivers | xargs nix-store --query -R > derivers-closure;
echo Proceeding at most $(cat derivers-closure | wc -l) derivers. >&2
cat derivers-closure | egrep '[.]drv$' | xargs nix-store --query --outputs > wanted-paths;
cat derivers-closure | egrep -v '[.]drv$' >> wanted-paths;
echo Prepared $(cat wanted-paths | wc -l) paths to get. >&2
cat wanted-paths | xargs nix-store --check-validity --print-invalid > needed-paths;
echo We need $(cat needed-paths | wc -l) paths. >&2
egrep '[.]drv$' derivers-closure > critical-derivers;
if test -z "$1" ; then
cat needed-paths;
fi;
refresh_critical_derivers() {
echo "Finding needed derivers..." >&2;
cat critical-derivers | while read; do
if ! (nix-store --query --outputs "$REPLY" | xargs nix-store --check-validity &> /dev/null;); then
echo "$REPLY";
fi;
done > new-critical-derivers;
mv new-critical-derivers critical-derivers;
echo The needed paths are realized by $(cat critical-derivers | wc -l) derivers. >&2
}
build_here() {
cat critical-derivers | while read; do
echo "Realising $REPLY using nix-daemon" >&2
@bindir@/nix-store -r "${REPLY}"
done;
}
try_to_substitute(){
cat needed-paths | while read ; do
echo "Building $REPLY using nix-daemon" >&2
@bindir@/nix-store -r "${NIX_STORE_DIR:-/nix/store}/${REPLY##*/}"
done;
}
for i in "$@"; do
sshHost="${i#ssh://}";
httpHost="${i#http://}";
httpsHost="${i#https://}";
filePath="${i#file:/}";
if [ "$i" != "$sshHost" ]; then
cat needed-paths | while read; do
echo "Getting $REPLY and its closure over ssh" >&2
nix-copy-closure --from "$sshHost" --gzip "$REPLY" </dev/null || true;
done;
elif [ "$i" != "$httpHost" ] || [ "$i" != "$httpsHost" ]; then
cat needed-paths | while read; do
echo "Getting $REPLY over http/https" >&2
curl ${BAD_CERTIFICATE:+-k} -L "$i${REPLY##*/}" | gunzip | nix-store --import;
done;
elif [ "$i" != "$filePath" ] ; then
cat needed-paths | while read; do
echo "Installing $REPLY from file" >&2
gunzip < "$filePath/${REPLY##*/}".nar.gz | nix-store --import;
done;
elif [ "$i" = "nix-daemon://" ] ; then
NIX_REMOTE=daemon try_to_substitute;
refresh_critical_derivers;
NIX_REMOTE=daemon build_here;
elif [ "$i" = "nix-self://" ] ; then
NIX_REMOTE= try_to_substitute;
refresh_critical_derivers;
NIX_REMOTE= build_here;
elif [ "$i" = "nix-daemon-fixed://" ] ; then
refresh_critical_derivers;
cat critical-derivers | while read; do
if egrep '"(md5|sha1|sha256)"' "$REPLY" &>/dev/null; then
echo "Realising $REPLY using nix-daemon" >&2
NIX_REMOTE=daemon @bindir@/nix-store -r "${REPLY}"
fi;
done;
elif [ "$i" = "nix-self-fixed://" ] ; then
refresh_critical_derivers;
cat critical-derivers | while read; do
if egrep '"(md5|sha1|sha256)"' "$REPLY" &>/dev/null; then
echo "Realising $REPLY using direct Nix build" >&2
NIX_REMOTE= @bindir@/nix-store -r "${REPLY}"
fi;
done;
elif [ "$i" = "nix-daemon-substitute://" ] ; then
NIX_REMOTE=daemon try_to_substitute;
elif [ "$i" = "nix-self-substitute://" ] ; then
NIX_REMOTE= try_to_substitute;
elif [ "$i" = "nix-daemon-build://" ] ; then
refresh_critical_derivers;
NIX_REMOTE=daemon build_here;
elif [ "$i" = "nix-self-build://" ] ; then
refresh_critical_derivers;
NIX_REMOTE= build_here;
fi;
mv needed-paths wanted-paths;
cat wanted-paths | xargs nix-store --check-validity --print-invalid > needed-paths;
echo We still need $(cat needed-paths | wc -l) paths. >&2
done;
cd /
rm -r "$WORKING_DIRECTORY"

View file

@ -3,7 +3,7 @@
set -e set -e
script=$(nix-build -A outputs.hydraJobs.installerScriptForGHA --no-out-link) script=$(nix-build -A outputs.hydraJobs.installerScriptForGHA --no-out-link)
installerHash=$(echo $script | cut -b12-43 -) installerHash=$(echo "$script" | cut -b12-43 -)
installerURL=https://$CACHIX_NAME.cachix.org/serve/$installerHash/install installerURL=https://$CACHIX_NAME.cachix.org/serve/$installerHash/install

View file

@ -18,6 +18,7 @@
#include "derivations.hh" #include "derivations.hh"
#include "local-store.hh" #include "local-store.hh"
#include "legacy.hh" #include "legacy.hh"
#include "experimental-features.hh"
using namespace nix; using namespace nix;
using std::cin; using std::cin;
@ -130,11 +131,14 @@ static int main_build_remote(int argc, char * * argv)
for (auto & m : machines) { for (auto & m : machines) {
debug("considering building on remote machine '%s'", m.storeUri); debug("considering building on remote machine '%s'", m.storeUri);
if (m.enabled && std::find(m.systemTypes.begin(), if (m.enabled
m.systemTypes.end(), && (neededSystem == "builtin"
neededSystem) != m.systemTypes.end() && || std::find(m.systemTypes.begin(),
m.systemTypes.end(),
neededSystem) != m.systemTypes.end()) &&
m.allSupported(requiredFeatures) && m.allSupported(requiredFeatures) &&
m.mandatoryMet(requiredFeatures)) { m.mandatoryMet(requiredFeatures))
{
rightType = true; rightType = true;
AutoCloseFD free; AutoCloseFD free;
uint64_t load = 0; uint64_t load = 0;
@ -270,14 +274,23 @@ connected:
{ {
Activity act(*logger, lvlTalkative, actUnknown, fmt("copying dependencies to '%s'", storeUri)); Activity act(*logger, lvlTalkative, actUnknown, fmt("copying dependencies to '%s'", storeUri));
copyPaths(store, ref<Store>(sshStore), store->parseStorePathSet(inputs), NoRepair, NoCheckSigs, substitute); copyPaths(*store, *sshStore, store->parseStorePathSet(inputs), NoRepair, NoCheckSigs, substitute);
} }
uploadLock = -1; uploadLock = -1;
auto drv = store->readDerivation(*drvPath); auto drv = store->readDerivation(*drvPath);
auto outputHashes = staticOutputHashes(*store, drv); auto outputHashes = staticOutputHashes(*store, drv);
drv.inputSrcs = store->parseStorePathSet(inputs);
// Hijack the inputs paths of the derivation to include all the paths
// that come from the `inputDrvs` set.
// We dont do that for the derivations whose `inputDrvs` is empty
// because
// 1. Its not needed
// 2. Changing the `inputSrcs` set changes the associated output ids,
// which break CA derivations
if (!drv.inputDrvs.empty())
drv.inputSrcs = store->parseStorePathSet(inputs);
auto result = sshStore->buildDerivation(*drvPath, drv); auto result = sshStore->buildDerivation(*drvPath, drv);
@ -286,7 +299,7 @@ connected:
std::set<Realisation> missingRealisations; std::set<Realisation> missingRealisations;
StorePathSet missingPaths; StorePathSet missingPaths;
if (settings.isExperimentalFeatureEnabled("ca-derivations") && !derivationHasKnownOutputPaths(drv.type())) { if (settings.isExperimentalFeatureEnabled(Xp::CaDerivations) && !derivationHasKnownOutputPaths(drv.type())) {
for (auto & outputName : wantedOutputs) { for (auto & outputName : wantedOutputs) {
auto thisOutputHash = outputHashes.at(outputName); auto thisOutputHash = outputHashes.at(outputName);
auto thisOutputId = DrvOutput{ thisOutputHash, outputName }; auto thisOutputId = DrvOutput{ thisOutputHash, outputName };
@ -312,13 +325,13 @@ connected:
if (auto localStore = store.dynamic_pointer_cast<LocalStore>()) if (auto localStore = store.dynamic_pointer_cast<LocalStore>())
for (auto & path : missingPaths) for (auto & path : missingPaths)
localStore->locksHeld.insert(store->printStorePath(path)); /* FIXME: ugly */ localStore->locksHeld.insert(store->printStorePath(path)); /* FIXME: ugly */
copyPaths(ref<Store>(sshStore), store, missingPaths, NoRepair, NoCheckSigs, NoSubstitute); copyPaths(*sshStore, *store, missingPaths, NoRepair, NoCheckSigs, NoSubstitute);
} }
// XXX: Should be done as part of `copyPaths` // XXX: Should be done as part of `copyPaths`
for (auto & realisation : missingRealisations) { for (auto & realisation : missingRealisations) {
// Should hold, because if the feature isn't enabled the set // Should hold, because if the feature isn't enabled the set
// of missing realisations should be empty // of missing realisations should be empty
settings.requireExperimentalFeature("ca-derivations"); settings.requireExperimentalFeature(Xp::CaDerivations);
store->registerDrvOutput(realisation); store->registerDrvOutput(realisation);
} }

View file

@ -95,8 +95,21 @@ EvalCommand::~EvalCommand()
evalState->printStats(); evalState->printStats();
} }
ref<Store> EvalCommand::getEvalStore()
{
if (!evalStore)
evalStore = evalStoreUrl ? openStore(*evalStoreUrl) : getStore();
return ref<Store>(evalStore);
}
RealisedPathsCommand::RealisedPathsCommand(bool recursive) ref<EvalState> EvalCommand::getEvalState()
{
if (!evalState)
evalState = std::make_shared<EvalState>(searchPath, getEvalStore(), getStore());
return ref<EvalState>(evalState);
}
BuiltPathsCommand::BuiltPathsCommand(bool recursive)
: recursive(recursive) : recursive(recursive)
{ {
if (recursive) if (recursive)
@ -123,44 +136,53 @@ RealisedPathsCommand::RealisedPathsCommand(bool recursive)
}); });
} }
void RealisedPathsCommand::run(ref<Store> store) void BuiltPathsCommand::run(ref<Store> store)
{ {
std::vector<RealisedPath> paths; BuiltPaths paths;
if (all) { if (all) {
if (installables.size()) if (installables.size())
throw UsageError("'--all' does not expect arguments"); throw UsageError("'--all' does not expect arguments");
// XXX: Only uses opaque paths, ignores all the realisations // XXX: Only uses opaque paths, ignores all the realisations
for (auto & p : store->queryAllValidPaths()) for (auto & p : store->queryAllValidPaths())
paths.push_back(p); paths.push_back(BuiltPath::Opaque{p});
} else { } else {
auto pathSet = toRealisedPaths(store, realiseMode, operateOn, installables); paths = toBuiltPaths(getEvalStore(), store, realiseMode, operateOn, installables);
if (recursive) { if (recursive) {
auto roots = std::move(pathSet); // XXX: This only computes the store path closure, ignoring
pathSet = {}; // intermediate realisations
RealisedPath::closure(*store, roots, pathSet); StorePathSet pathsRoots, pathsClosure;
for (auto & root : paths) {
auto rootFromThis = root.outPaths();
pathsRoots.insert(rootFromThis.begin(), rootFromThis.end());
}
store->computeFSClosure(pathsRoots, pathsClosure);
for (auto & path : pathsClosure)
paths.push_back(BuiltPath::Opaque{path});
} }
for (auto & path : pathSet)
paths.push_back(path);
} }
run(store, std::move(paths)); run(store, std::move(paths));
} }
StorePathsCommand::StorePathsCommand(bool recursive) StorePathsCommand::StorePathsCommand(bool recursive)
: RealisedPathsCommand(recursive) : BuiltPathsCommand(recursive)
{ {
} }
void StorePathsCommand::run(ref<Store> store, std::vector<RealisedPath> paths) void StorePathsCommand::run(ref<Store> store, BuiltPaths && paths)
{ {
StorePaths storePaths; StorePathSet storePaths;
for (auto & p : paths) for (auto & builtPath : paths)
storePaths.push_back(p.path()); for (auto & p : builtPath.outPaths())
storePaths.insert(p);
run(store, std::move(storePaths)); auto sorted = store->topoSortPaths(storePaths);
std::reverse(sorted.begin(), sorted.end());
run(store, std::move(sorted));
} }
void StorePathCommand::run(ref<Store> store, std::vector<StorePath> storePaths) void StorePathCommand::run(ref<Store> store, std::vector<StorePath> && storePaths)
{ {
if (storePaths.size() != 1) if (storePaths.size() != 1)
throw UsageError("this command requires exactly one store path"); throw UsageError("this command requires exactly one store path");
@ -204,7 +226,7 @@ void MixProfile::updateProfile(const StorePath & storePath)
profile2, storePath)); profile2, storePath));
} }
void MixProfile::updateProfile(const DerivedPathsWithHints & buildables) void MixProfile::updateProfile(const BuiltPaths & buildables)
{ {
if (!profile) return; if (!profile) return;
@ -212,22 +234,19 @@ void MixProfile::updateProfile(const DerivedPathsWithHints & buildables)
for (auto & buildable : buildables) { for (auto & buildable : buildables) {
std::visit(overloaded { std::visit(overloaded {
[&](DerivedPathWithHints::Opaque bo) { [&](const BuiltPath::Opaque & bo) {
result.push_back(bo.path); result.push_back(bo.path);
}, },
[&](DerivedPathWithHints::Built bfd) { [&](const BuiltPath::Built & bfd) {
for (auto & output : bfd.outputs) { for (auto & output : bfd.outputs) {
/* Output path should be known because we just tried to result.push_back(output.second);
build it. */
assert(output.second);
result.push_back(*output.second);
} }
}, },
}, buildable.raw()); }, buildable.raw());
} }
if (result.size() != 1) if (result.size() != 1)
throw Error("'--profile' requires that the arguments produce a single store path, but there are %d", result.size()); throw UsageError("'--profile' requires that the arguments produce a single store path, but there are %d", result.size());
updateProfile(result[0]); updateProfile(result[0]);
} }

View file

@ -47,13 +47,18 @@ struct EvalCommand : virtual StoreCommand, MixEvalArgs
{ {
bool startReplOnEvalErrors = false; bool startReplOnEvalErrors = false;
ref<EvalState> getEvalState();
EvalCommand(); EvalCommand();
std::shared_ptr<EvalState> evalState;
~EvalCommand(); ~EvalCommand();
ref<Store> getEvalStore();
ref<EvalState> getEvalState();
private:
std::shared_ptr<Store> evalStore;
std::shared_ptr<EvalState> evalState;
}; };
struct MixFlakeOptions : virtual Args, EvalCommand struct MixFlakeOptions : virtual Args, EvalCommand
@ -105,6 +110,8 @@ enum class Realise {
exists. */ exists. */
Derivation, Derivation,
/* Evaluate in dry-run mode. Postcondition: nothing. */ /* Evaluate in dry-run mode. Postcondition: nothing. */
// FIXME: currently unused, but could be revived if we can
// evaluate derivations in-memory.
Nothing Nothing
}; };
@ -147,7 +154,7 @@ private:
}; };
/* A command that operates on zero or more store paths. */ /* A command that operates on zero or more store paths. */
struct RealisedPathsCommand : public InstallablesCommand struct BuiltPathsCommand : public InstallablesCommand
{ {
private: private:
@ -160,26 +167,26 @@ protected:
public: public:
RealisedPathsCommand(bool recursive = false); BuiltPathsCommand(bool recursive = false);
using StoreCommand::run; using StoreCommand::run;
virtual void run(ref<Store> store, std::vector<RealisedPath> paths) = 0; virtual void run(ref<Store> store, BuiltPaths && paths) = 0;
void run(ref<Store> store) override; void run(ref<Store> store) override;
bool useDefaultInstallables() override { return !all; } bool useDefaultInstallables() override { return !all; }
}; };
struct StorePathsCommand : public RealisedPathsCommand struct StorePathsCommand : public BuiltPathsCommand
{ {
StorePathsCommand(bool recursive = false); StorePathsCommand(bool recursive = false);
using RealisedPathsCommand::run; using BuiltPathsCommand::run;
virtual void run(ref<Store> store, std::vector<StorePath> storePaths) = 0; virtual void run(ref<Store> store, std::vector<StorePath> && storePaths) = 0;
void run(ref<Store> store, std::vector<RealisedPath> paths) override; void run(ref<Store> store, BuiltPaths && paths) override;
}; };
/* A command that operates on exactly one store path. */ /* A command that operates on exactly one store path. */
@ -189,7 +196,7 @@ struct StorePathCommand : public StorePathsCommand
virtual void run(ref<Store> store, const StorePath & storePath) = 0; virtual void run(ref<Store> store, const StorePath & storePath) = 0;
void run(ref<Store> store, std::vector<StorePath> storePaths) override; void run(ref<Store> store, std::vector<StorePath> && storePaths) override;
}; };
/* A helper class for registering commands globally. */ /* A helper class for registering commands globally. */
@ -220,26 +227,37 @@ static RegisterCommand registerCommand2(std::vector<std::string> && name)
return RegisterCommand(std::move(name), [](){ return make_ref<T>(); }); return RegisterCommand(std::move(name), [](){ return make_ref<T>(); });
} }
DerivedPathsWithHints build(ref<Store> store, Realise mode, BuiltPaths build(
std::vector<std::shared_ptr<Installable>> installables, BuildMode bMode = bmNormal); ref<Store> evalStore,
ref<Store> store, Realise mode,
const std::vector<std::shared_ptr<Installable>> & installables,
BuildMode bMode = bmNormal);
std::set<StorePath> toStorePaths(ref<Store> store, std::set<StorePath> toStorePaths(
Realise mode, OperateOn operateOn, ref<Store> evalStore,
std::vector<std::shared_ptr<Installable>> installables);
StorePath toStorePath(ref<Store> store,
Realise mode, OperateOn operateOn,
std::shared_ptr<Installable> installable);
std::set<StorePath> toDerivations(ref<Store> store,
std::vector<std::shared_ptr<Installable>> installables,
bool useDeriver = false);
std::set<RealisedPath> toRealisedPaths(
ref<Store> store, ref<Store> store,
Realise mode, Realise mode,
OperateOn operateOn, OperateOn operateOn,
std::vector<std::shared_ptr<Installable>> installables); const std::vector<std::shared_ptr<Installable>> & installables);
StorePath toStorePath(
ref<Store> evalStore,
ref<Store> store,
Realise mode,
OperateOn operateOn,
std::shared_ptr<Installable> installable);
std::set<StorePath> toDerivations(
ref<Store> store,
const std::vector<std::shared_ptr<Installable>> & installables,
bool useDeriver = false);
BuiltPaths toBuiltPaths(
ref<Store> evalStore,
ref<Store> store,
Realise mode,
OperateOn operateOn,
const std::vector<std::shared_ptr<Installable>> & installables);
/* Helper function to generate args that invoke $EDITOR on /* Helper function to generate args that invoke $EDITOR on
filename:lineno. */ filename:lineno. */
@ -256,7 +274,7 @@ struct MixProfile : virtual StoreCommand
/* If 'profile' is set, make it point at the store path produced /* If 'profile' is set, make it point at the store path produced
by 'buildables'. */ by 'buildables'. */
void updateProfile(const DerivedPathsWithHints & buildables); void updateProfile(const BuiltPaths & buildables);
}; };
struct MixDefaultProfile : MixProfile struct MixDefaultProfile : MixProfile

View file

@ -58,9 +58,13 @@ MixFlakeOptions::MixFlakeOptions()
addFlag({ addFlag({
.longName = "no-registries", .longName = "no-registries",
.description = "Don't allow lookups in the flake registries.", .description =
"Don't allow lookups in the flake registries. This option is deprecated; use `--no-use-registries`.",
.category = category, .category = category,
.handler = {&lockFlags.useRegistries, false} .handler = {[&]() {
lockFlags.useRegistries = false;
warn("'--no-registries' is deprecated; use '--no-use-registries'");
}}
}); });
addFlag({ addFlag({
@ -171,14 +175,50 @@ Strings SourceExprCommand::getDefaultFlakeAttrPathPrefixes()
void SourceExprCommand::completeInstallable(std::string_view prefix) void SourceExprCommand::completeInstallable(std::string_view prefix)
{ {
if (file) return; // FIXME if (file) {
evalSettings.pureEval = false;
auto state = getEvalState();
Expr *e = state->parseExprFromFile(
resolveExprPath(state->checkSourcePath(lookupFileArg(*state, *file)))
);
completeFlakeRefWithFragment( Value root;
getEvalState(), state->eval(e, root);
lockFlags,
getDefaultFlakeAttrPathPrefixes(), auto autoArgs = getAutoArgs(*state);
getDefaultFlakeAttrPaths(),
prefix); std::string prefix_ = std::string(prefix);
auto sep = prefix_.rfind('.');
std::string searchWord;
if (sep != std::string::npos) {
searchWord = prefix_.substr(sep, std::string::npos);
prefix_ = prefix_.substr(0, sep);
} else {
searchWord = prefix_;
prefix_ = "";
}
Value &v1(*findAlongAttrPath(*state, prefix_, *autoArgs, root).first);
state->forceValue(v1);
Value v2;
state->autoCallFunction(*autoArgs, v1, v2);
if (v2.type() == nAttrs) {
for (auto & i : *v2.attrs) {
std::string name = i.name;
if (name.find(searchWord) == 0) {
completions->add(i.name);
}
}
}
} else {
completeFlakeRefWithFragment(
getEvalState(),
lockFlags,
getDefaultFlakeAttrPathPrefixes(),
getDefaultFlakeAttrPaths(),
prefix);
}
} }
void completeFlakeRefWithFragment( void completeFlakeRefWithFragment(
@ -249,7 +289,6 @@ void completeFlakeRefWithFragment(
completeFlakeRef(evalState->store, prefix); completeFlakeRef(evalState->store, prefix);
} }
void completeFlakeRef(ref<Store> store, std::string_view prefix) void completeFlakeRef(ref<Store> store, std::string_view prefix)
{ {
if (prefix == "") if (prefix == "")
@ -273,9 +312,9 @@ void completeFlakeRef(ref<Store> store, std::string_view prefix)
} }
} }
DerivedPathWithHints Installable::toDerivedPathWithHints() DerivedPath Installable::toDerivedPath()
{ {
auto buildables = toDerivedPathsWithHints(); auto buildables = toDerivedPaths();
if (buildables.size() != 1) if (buildables.size() != 1)
throw Error("installable '%s' evaluates to %d derivations, where only one is expected", what(), buildables.size()); throw Error("installable '%s' evaluates to %d derivations, where only one is expected", what(), buildables.size());
return std::move(buildables[0]); return std::move(buildables[0]);
@ -309,22 +348,19 @@ struct InstallableStorePath : Installable
std::string what() override { return store->printStorePath(storePath); } std::string what() override { return store->printStorePath(storePath); }
DerivedPathsWithHints toDerivedPathsWithHints() override DerivedPaths toDerivedPaths() override
{ {
if (storePath.isDerivation()) { if (storePath.isDerivation()) {
std::map<std::string, std::optional<StorePath>> outputs;
auto drv = store->readDerivation(storePath); auto drv = store->readDerivation(storePath);
for (auto & [name, output] : drv.outputsAndOptPaths(*store))
outputs.emplace(name, output.second);
return { return {
DerivedPathWithHints::Built { DerivedPath::Built {
.drvPath = storePath, .drvPath = storePath,
.outputs = std::move(outputs) .outputs = drv.outputNames(),
} }
}; };
} else { } else {
return { return {
DerivedPathWithHints::Opaque { DerivedPath::Opaque {
.path = storePath, .path = storePath,
} }
}; };
@ -337,22 +373,24 @@ struct InstallableStorePath : Installable
} }
}; };
DerivedPathsWithHints InstallableValue::toDerivedPathsWithHints() DerivedPaths InstallableValue::toDerivedPaths()
{ {
DerivedPathsWithHints res; DerivedPaths res;
std::map<StorePath, std::map<std::string, std::optional<StorePath>>> drvsToOutputs; std::map<StorePath, std::set<std::string>> drvsToOutputs;
RealisedPath::Set drvsToCopy;
// Group by derivation, helps with .all in particular // Group by derivation, helps with .all in particular
for (auto & drv : toDerivations()) { for (auto & drv : toDerivations()) {
auto outputName = drv.outputName; auto outputName = drv.outputName;
if (outputName == "") if (outputName == "")
throw Error("derivation '%s' lacks an 'outputName' attribute", state->store->printStorePath(drv.drvPath)); throw Error("derivation '%s' lacks an 'outputName' attribute", state->store->printStorePath(drv.drvPath));
drvsToOutputs[drv.drvPath].insert_or_assign(outputName, drv.outPath); drvsToOutputs[drv.drvPath].insert(outputName);
drvsToCopy.insert(drv.drvPath);
} }
for (auto & i : drvsToOutputs) for (auto & i : drvsToOutputs)
res.push_back(DerivedPathWithHints::Built { i.first, i.second }); res.push_back(DerivedPath::Built { i.first, i.second });
return res; return res;
} }
@ -564,10 +602,10 @@ InstallableFlake::getCursors(EvalState & state)
std::shared_ptr<flake::LockedFlake> InstallableFlake::getLockedFlake() const std::shared_ptr<flake::LockedFlake> InstallableFlake::getLockedFlake() const
{ {
flake::LockFlags lockFlagsApplyConfig = lockFlags;
lockFlagsApplyConfig.applyNixConfig = true;
if (!_lockedFlake) { if (!_lockedFlake) {
_lockedFlake = std::make_shared<flake::LockedFlake>(lockFlake(*state, flakeRef, lockFlags)); _lockedFlake = std::make_shared<flake::LockedFlake>(lockFlake(*state, flakeRef, lockFlagsApplyConfig));
_lockedFlake->flake.config.apply();
// FIXME: send new config to the daemon.
} }
return _lockedFlake; return _lockedFlake;
} }
@ -616,6 +654,17 @@ std::vector<std::shared_ptr<Installable>> SourceExprCommand::parseInstallables(
for (auto & s : ss) { for (auto & s : ss) {
std::exception_ptr ex; std::exception_ptr ex;
if (s.find('/') != std::string::npos) {
try {
result.push_back(std::make_shared<InstallableStorePath>(store, store->followLinksToStorePath(s)));
continue;
} catch (BadStorePath &) {
} catch (...) {
if (!ex)
ex = std::current_exception();
}
}
try { try {
auto [flakeRef, fragment] = parseFlakeRefWithFragment(s, absPath(".")); auto [flakeRef, fragment] = parseFlakeRefWithFragment(s, absPath("."));
result.push_back(std::make_shared<InstallableFlake>( result.push_back(std::make_shared<InstallableFlake>(
@ -630,25 +679,7 @@ std::vector<std::shared_ptr<Installable>> SourceExprCommand::parseInstallables(
ex = std::current_exception(); ex = std::current_exception();
} }
if (s.find('/') != std::string::npos) {
try {
result.push_back(std::make_shared<InstallableStorePath>(store, store->followLinksToStorePath(s)));
continue;
} catch (BadStorePath &) {
} catch (...) {
if (!ex)
ex = std::current_exception();
}
}
std::rethrow_exception(ex); std::rethrow_exception(ex);
/*
throw Error(
pathExists(s)
? "path '%s' is not a flake or a store path"
: "don't know how to handle argument '%s'", s);
*/
} }
} }
@ -663,107 +694,121 @@ std::shared_ptr<Installable> SourceExprCommand::parseInstallable(
return installables.front(); return installables.front();
} }
DerivedPathsWithHints build(ref<Store> store, Realise mode, BuiltPaths getBuiltPaths(ref<Store> evalStore, ref<Store> store, const DerivedPaths & hopefullyBuiltPaths)
std::vector<std::shared_ptr<Installable>> installables, BuildMode bMode)
{ {
if (mode == Realise::Nothing) BuiltPaths res;
settings.readOnlyMode = true; for (const auto & b : hopefullyBuiltPaths)
std::visit(
DerivedPathsWithHints buildables; overloaded{
[&](const DerivedPath::Opaque & bo) {
std::vector<DerivedPath> pathsToBuild; res.push_back(BuiltPath::Opaque{bo.path});
for (auto & i : installables) {
for (auto & b : i->toDerivedPathsWithHints()) {
std::visit(overloaded {
[&](DerivedPathWithHints::Opaque bo) {
pathsToBuild.push_back(bo);
}, },
[&](DerivedPathWithHints::Built bfd) { [&](const DerivedPath::Built & bfd) {
StringSet outputNames; OutputPathMap outputs;
for (auto & output : bfd.outputs) auto drv = evalStore->readDerivation(bfd.drvPath);
outputNames.insert(output.first); auto outputHashes = staticOutputHashes(*evalStore, drv); // FIXME: expensive
pathsToBuild.push_back( auto drvOutputs = drv.outputsAndOptPaths(*store);
DerivedPath::Built{bfd.drvPath, outputNames});
},
}, b.raw());
buildables.push_back(std::move(b));
}
}
if (mode == Realise::Nothing)
printMissing(store, pathsToBuild, lvlError);
else if (mode == Realise::Outputs)
store->buildPaths(pathsToBuild, bMode);
return buildables;
}
std::set<RealisedPath> toRealisedPaths(
ref<Store> store,
Realise mode,
OperateOn operateOn,
std::vector<std::shared_ptr<Installable>> installables)
{
std::set<RealisedPath> res;
if (operateOn == OperateOn::Output) {
for (auto & b : build(store, mode, installables))
std::visit(overloaded {
[&](DerivedPathWithHints::Opaque bo) {
res.insert(bo.path);
},
[&](DerivedPathWithHints::Built bfd) {
auto drv = store->readDerivation(bfd.drvPath);
auto outputHashes = staticOutputHashes(*store, drv);
for (auto & output : bfd.outputs) { for (auto & output : bfd.outputs) {
if (settings.isExperimentalFeatureEnabled("ca-derivations")) { if (!outputHashes.count(output))
if (!outputHashes.count(output.first)) throw Error(
throw Error( "the derivation '%s' doesn't have an output named '%s'",
"the derivation '%s' doesn't have an output named '%s'", store->printStorePath(bfd.drvPath), output);
store->printStorePath(bfd.drvPath), if (settings.isExperimentalFeatureEnabled(
output.first); Xp::CaDerivations)) {
auto outputId = DrvOutput{outputHashes.at(output.first), output.first}; auto outputId =
auto realisation = store->queryRealisation(outputId); DrvOutput{outputHashes.at(output), output};
auto realisation =
store->queryRealisation(outputId);
if (!realisation) if (!realisation)
throw Error("cannot operate on an output of unbuilt content-addresed derivation '%s'", outputId.to_string()); throw Error(
res.insert(RealisedPath{*realisation}); "cannot operate on an output of unbuilt "
} "content-addressed derivation '%s'",
else { outputId.to_string());
// If ca-derivations isn't enabled, behave as if outputs.insert_or_assign(
// all the paths are opaque to keep the default output, realisation->outPath);
// behavior } else {
assert(output.second); // If ca-derivations isn't enabled, assume that
res.insert(*output.second); // the output path is statically known.
assert(drvOutputs.count(output));
assert(drvOutputs.at(output).second);
outputs.insert_or_assign(
output, *drvOutputs.at(output).second);
} }
} }
res.push_back(BuiltPath::Built{bfd.drvPath, outputs});
}, },
}, b.raw()); },
} else { b.raw());
if (mode == Realise::Nothing)
settings.readOnlyMode = true;
auto drvPaths = toDerivations(store, installables, true);
res.insert(drvPaths.begin(), drvPaths.end());
}
return res; return res;
} }
StorePathSet toStorePaths(ref<Store> store, BuiltPaths build(
ref<Store> evalStore,
ref<Store> store,
Realise mode,
const std::vector<std::shared_ptr<Installable>> & installables,
BuildMode bMode)
{
if (mode == Realise::Nothing)
settings.readOnlyMode = true;
std::vector<DerivedPath> pathsToBuild;
for (auto & i : installables) {
auto b = i->toDerivedPaths();
pathsToBuild.insert(pathsToBuild.end(), b.begin(), b.end());
}
if (mode == Realise::Nothing || mode == Realise::Derivation)
printMissing(store, pathsToBuild, lvlError);
else if (mode == Realise::Outputs)
store->buildPaths(pathsToBuild, bMode, evalStore);
return getBuiltPaths(evalStore, store, pathsToBuild);
}
BuiltPaths toBuiltPaths(
ref<Store> evalStore,
ref<Store> store,
Realise mode,
OperateOn operateOn,
const std::vector<std::shared_ptr<Installable>> & installables)
{
if (operateOn == OperateOn::Output)
return build(evalStore, store, mode, installables);
else {
if (mode == Realise::Nothing)
settings.readOnlyMode = true;
BuiltPaths res;
for (auto & drvPath : toDerivations(store, installables, true))
res.push_back(BuiltPath::Opaque{drvPath});
return res;
}
}
StorePathSet toStorePaths(
ref<Store> evalStore,
ref<Store> store,
Realise mode, OperateOn operateOn, Realise mode, OperateOn operateOn,
std::vector<std::shared_ptr<Installable>> installables) const std::vector<std::shared_ptr<Installable>> & installables)
{ {
StorePathSet outPaths; StorePathSet outPaths;
for (auto & path : toRealisedPaths(store, mode, operateOn, installables)) for (auto & path : toBuiltPaths(evalStore, store, mode, operateOn, installables)) {
outPaths.insert(path.path()); auto thisOutPaths = path.outPaths();
outPaths.insert(thisOutPaths.begin(), thisOutPaths.end());
}
return outPaths; return outPaths;
} }
StorePath toStorePath(ref<Store> store, StorePath toStorePath(
ref<Store> evalStore,
ref<Store> store,
Realise mode, OperateOn operateOn, Realise mode, OperateOn operateOn,
std::shared_ptr<Installable> installable) std::shared_ptr<Installable> installable)
{ {
auto paths = toStorePaths(store, mode, operateOn, {installable}); auto paths = toStorePaths(evalStore, store, mode, operateOn, {installable});
if (paths.size() != 1) if (paths.size() != 1)
throw Error("argument '%s' should evaluate to one store path", installable->what()); throw Error("argument '%s' should evaluate to one store path", installable->what());
@ -771,15 +816,17 @@ StorePath toStorePath(ref<Store> store,
return *paths.begin(); return *paths.begin();
} }
StorePathSet toDerivations(ref<Store> store, StorePathSet toDerivations(
std::vector<std::shared_ptr<Installable>> installables, bool useDeriver) ref<Store> store,
const std::vector<std::shared_ptr<Installable>> & installables,
bool useDeriver)
{ {
StorePathSet drvPaths; StorePathSet drvPaths;
for (auto & i : installables) for (const auto & i : installables)
for (auto & b : i->toDerivedPathsWithHints()) for (const auto & b : i->toDerivedPaths())
std::visit(overloaded { std::visit(overloaded {
[&](DerivedPathWithHints::Opaque bo) { [&](const DerivedPath::Opaque & bo) {
if (!useDeriver) if (!useDeriver)
throw Error("argument '%s' did not evaluate to a derivation", i->what()); throw Error("argument '%s' did not evaluate to a derivation", i->what());
auto derivers = store->queryValidDerivers(bo.path); auto derivers = store->queryValidDerivers(bo.path);
@ -788,7 +835,7 @@ StorePathSet toDerivations(ref<Store> store,
// FIXME: use all derivers? // FIXME: use all derivers?
drvPaths.insert(*derivers.begin()); drvPaths.insert(*derivers.begin());
}, },
[&](DerivedPathWithHints::Built bfd) { [&](const DerivedPath::Built & bfd) {
drvPaths.insert(bfd.drvPath); drvPaths.insert(bfd.drvPath);
}, },
}, b.raw()); }, b.raw());

View file

@ -23,17 +23,23 @@ struct App
// FIXME: add args, sandbox settings, metadata, ... // FIXME: add args, sandbox settings, metadata, ...
}; };
struct UnresolvedApp
{
App unresolved;
App resolve(ref<Store> evalStore, ref<Store> store);
};
struct Installable struct Installable
{ {
virtual ~Installable() { } virtual ~Installable() { }
virtual std::string what() = 0; virtual std::string what() = 0;
virtual DerivedPathsWithHints toDerivedPathsWithHints() = 0; virtual DerivedPaths toDerivedPaths() = 0;
DerivedPathWithHints toDerivedPathWithHints(); DerivedPath toDerivedPath();
App toApp(EvalState & state); UnresolvedApp toApp(EvalState & state);
virtual std::pair<Value *, Pos> toValue(EvalState & state) virtual std::pair<Value *, Pos> toValue(EvalState & state)
{ {
@ -74,7 +80,7 @@ struct InstallableValue : Installable
virtual std::vector<DerivationInfo> toDerivations() = 0; virtual std::vector<DerivationInfo> toDerivations() = 0;
DerivedPathsWithHints toDerivedPathsWithHints() override; DerivedPaths toDerivedPaths() override;
}; };
struct InstallableFlake : InstallableValue struct InstallableFlake : InstallableValue

View file

@ -8,8 +8,9 @@ libcmd_SOURCES := $(wildcard $(d)/*.cc)
libcmd_CXXFLAGS += -I src/libutil -I src/libstore -I src/libexpr -I src/libmain -I src/libfetchers -I src/nix libcmd_CXXFLAGS += -I src/libutil -I src/libstore -I src/libexpr -I src/libmain -I src/libfetchers -I src/nix
libcmd_LDFLAGS = $(EDITLINE_LIBS) -llowdown # libcmd_LDFLAGS = $(EDITLINE_LIBS) -llowdown
libcmd_LDFLAGS += -llowdown -pthread
libcmd_LIBS = libstore libutil libexpr libmain libfetchers libnix libcmd_LIBS = libstore libutil libexpr libmain libfetchers libnix
$(eval $(call install-file-in, $(d)/nix-cmd.pc, $(prefix)/lib/pkgconfig, 0644)) $(eval $(call install-file-in, $(d)/nix-cmd.pc, $(libdir)/pkgconfig, 0644))

View file

@ -12,7 +12,7 @@ std::string renderMarkdownToTerminal(std::string_view markdown)
struct lowdown_opts opts { struct lowdown_opts opts {
.type = LOWDOWN_TERM, .type = LOWDOWN_TERM,
.maxdepth = 20, .maxdepth = 20,
.cols = std::min(getWindowSize().second, (unsigned short) 80), .cols = std::max(getWindowSize().second, (unsigned short) 80),
.hmargin = 0, .hmargin = 0,
.vmargin = 0, .vmargin = 0,
.feat = LOWDOWN_COMMONMARK | LOWDOWN_FENCED | LOWDOWN_DEFLIST | LOWDOWN_TABLES, .feat = LOWDOWN_COMMONMARK | LOWDOWN_FENCED | LOWDOWN_DEFLIST | LOWDOWN_TABLES,
@ -25,7 +25,7 @@ std::string renderMarkdownToTerminal(std::string_view markdown)
Finally freeDoc([&]() { lowdown_doc_free(doc); }); Finally freeDoc([&]() { lowdown_doc_free(doc); });
size_t maxn = 0; size_t maxn = 0;
auto node = lowdown_doc_parse(doc, &maxn, markdown.data(), markdown.size()); auto node = lowdown_doc_parse(doc, &maxn, markdown.data(), markdown.size(), nullptr);
if (!node) if (!node)
throw Error("cannot parse Markdown document"); throw Error("cannot parse Markdown document");
Finally freeNode([&]() { lowdown_node_free(node); }); Finally freeNode([&]() { lowdown_node_free(node); });
@ -40,11 +40,11 @@ std::string renderMarkdownToTerminal(std::string_view markdown)
throw Error("cannot allocate Markdown output buffer"); throw Error("cannot allocate Markdown output buffer");
Finally freeBuffer([&]() { lowdown_buf_free(buf); }); Finally freeBuffer([&]() { lowdown_buf_free(buf); });
int rndr_res = lowdown_term_rndr(buf, nullptr, renderer, node); int rndr_res = lowdown_term_rndr(buf, renderer, node);
if (!rndr_res) if (!rndr_res)
throw Error("allocation error while rendering Markdown"); throw Error("allocation error while rendering Markdown");
return std::string(buf->data, buf->size); return filterANSIEscapes(std::string(buf->data, buf->size), !shouldANSI());
} }
} }

View file

@ -68,6 +68,7 @@ struct NixRepl
StorePath getDerivationPath(Value & v); StorePath getDerivationPath(Value & v);
bool processLine(string line); bool processLine(string line);
void loadFile(const Path & path); void loadFile(const Path & path);
void loadFlake(const std::string & flakeRef);
void initEnv(); void initEnv();
void reloadFiles(); void reloadFiles();
void addAttrsToScope(Value & attrs); void addAttrsToScope(Value & attrs);
@ -104,6 +105,25 @@ NixRepl::~NixRepl()
write_history(historyFile.c_str()); write_history(historyFile.c_str());
} }
string runNix(Path program, const Strings & args,
const std::optional<std::string> & input = {})
{
auto subprocessEnv = getEnv();
subprocessEnv["NIX_CONFIG"] = globalConfig.toKeyValue();
auto res = runProgram(RunOptions {
.program = settings.nixBinDir+ "/" + program,
.args = args,
.environment = subprocessEnv,
.input = input,
});
if (!statusOk(res.first))
throw ExecError(res.first, fmt("program '%1%' %2%", program, statusToString(res.first)));
return res.second;
}
static NixRepl * curRepl; // ugly static NixRepl * curRepl; // ugly
static char * completionCallback(char * s, int *match) { static char * completionCallback(char * s, int *match) {
@ -180,15 +200,14 @@ namespace {
void NixRepl::mainLoop(const std::vector<std::string> & files) void NixRepl::mainLoop(const std::vector<std::string> & files)
{ {
string error = ANSI_RED "error:" ANSI_NORMAL " "; string error = ANSI_RED "error:" ANSI_NORMAL " ";
std::cout << "Welcome to Nix version " << nixVersion << ". Type :? for help." << std::endl << std::endl; notice("Welcome to Nix " + nixVersion + ". Type :? for help.\n");
if (!files.empty()) { if (!files.empty()) {
for (auto & i : files) for (auto & i : files)
loadedFiles.push_back(i); loadedFiles.push_back(i);
reloadFiles(); reloadFiles();
if (!loadedFiles.empty()) std::cout << std::endl; if (!loadedFiles.empty()) notice("");
}
// Allow nix-repl specific settings in .inputrc // Allow nix-repl specific settings in .inputrc
rl_readline_name = "nix-repl"; rl_readline_name = "nix-repl";
@ -378,6 +397,8 @@ bool NixRepl::processLine(string line)
{ {
if (line == "") return true; if (line == "") return true;
_isInterrupted = false;
string command, arg; string command, arg;
if (line[0] == ':') { if (line[0] == ':') {
@ -397,9 +418,10 @@ bool NixRepl::processLine(string line)
<< " <x> = <expr> Bind expression to variable\n" << " <x> = <expr> Bind expression to variable\n"
<< " :a <expr> Add attributes from resulting set to scope\n" << " :a <expr> Add attributes from resulting set to scope\n"
<< " :b <expr> Build derivation\n" << " :b <expr> Build derivation\n"
<< " :e <expr> Open the derivation in $EDITOR\n" << " :e <expr> Open package or function in $EDITOR\n"
<< " :i <expr> Build derivation, then install result into current profile\n" << " :i <expr> Build derivation, then install result into current profile\n"
<< " :l <path> Load Nix expression and add it to scope\n" << " :l <path> Load Nix expression and add it to scope\n"
<< " :lf <ref> Load Nix flake and add it to scope\n"
<< " :p <expr> Evaluate and print expression recursively\n" << " :p <expr> Evaluate and print expression recursively\n"
<< " :q Exit nix-repl\n" << " :q Exit nix-repl\n"
<< " :r Reload all files\n" << " :r Reload all files\n"
@ -420,6 +442,10 @@ bool NixRepl::processLine(string line)
loadFile(arg); loadFile(arg);
} }
else if (command == ":lf" || command == ":load-flake") {
loadFlake(arg);
}
else if (command == ":r" || command == ":reload") { else if (command == ":r" || command == ":reload") {
state->resetFileCache(); state->resetFileCache();
reloadFiles(); reloadFiles();
@ -439,14 +465,17 @@ bool NixRepl::processLine(string line)
pos = v.lambda.fun->pos; pos = v.lambda.fun->pos;
} else { } else {
// assume it's a derivation // assume it's a derivation
pos = findDerivationFilename(*state, v, arg); pos = findPackageFilename(*state, v, arg);
} }
// Open in EDITOR // Open in EDITOR
auto args = editorFor(pos); auto args = editorFor(pos);
auto editor = args.front(); auto editor = args.front();
args.pop_front(); args.pop_front();
runProgram(editor, true, args);
// runProgram redirects stdout to a StringSink,
// using runProgram2 to allow editors to display their UI
runProgram2(RunOptions { .program = editor, .searchPath = true, .args = args });
// Reload right after exiting the editor // Reload right after exiting the editor
state->resetFileCache(); state->resetFileCache();
@ -456,16 +485,17 @@ bool NixRepl::processLine(string line)
else if (command == ":t") { else if (command == ":t") {
Value v; Value v;
evalString(arg, v); evalString(arg, v);
std::cout << showType(v) << std::endl; logger->cout(showType(v));
}
} else if (command == ":u") { else if (command == ":u") {
Value v, f, result; Value v, f, result;
evalString(arg, v); evalString(arg, v);
evalString("drv: (import <nixpkgs> {}).runCommand \"shell\" { buildInputs = [ drv ]; } \"\"", f); evalString("drv: (import <nixpkgs> {}).runCommand \"shell\" { buildInputs = [ drv ]; } \"\"", f);
state->callFunction(f, v, result, Pos()); state->callFunction(f, v, result, Pos());
StorePath drvPath = getDerivationPath(result); StorePath drvPath = getDerivationPath(result);
runProgram(settings.nixBinDir + "/nix-shell", true, {state->store->printStorePath(drvPath)}); runNix("nix-shell", {state->store->printStorePath(drvPath)});
} }
else if (command == ":b" || command == ":i" || command == ":s") { else if (command == ":b" || command == ":i" || command == ":s") {
@ -475,21 +505,15 @@ bool NixRepl::processLine(string line)
Path drvPathRaw = state->store->printStorePath(drvPath); Path drvPathRaw = state->store->printStorePath(drvPath);
if (command == ":b") { if (command == ":b") {
/* We could do the build in this process using buildPaths(), state->store->buildPaths({DerivedPath::Built{drvPath}});
but doing it in a child makes it easier to recover from auto drv = state->store->readDerivation(drvPath);
problems / SIGINT. */ logger->cout("\nThis derivation produced the following outputs:");
try { for (auto & [outputName, outputPath] : state->store->queryDerivationOutputMap(drvPath))
runProgram(settings.nixBinDir + "/nix", true, {"build", "--no-link", drvPathRaw}); logger->cout(" %s -> %s", outputName, state->store->printStorePath(outputPath));
auto drv = state->store->readDerivation(drvPath);
std::cout << std::endl << "this derivation produced the following outputs:" << std::endl;
for (auto & i : drv.outputsAndOptPaths(*state->store))
std::cout << fmt(" %s -> %s\n", i.first, state->store->printStorePath(*i.second.second));
} catch (ExecError &) {
}
} else if (command == ":i") { } else if (command == ":i") {
runProgram(settings.nixBinDir + "/nix-env", true, {"-i", drvPathRaw}); runNix("nix-env", {"-i", drvPathRaw});
} else { } else {
runProgram(settings.nixBinDir + "/nix-shell", true, {drvPathRaw}); runNix("nix-shell", {drvPathRaw});
} }
} }
@ -518,9 +542,9 @@ bool NixRepl::processLine(string line)
+ concatStringsSep(" ", args) + "\n\n"; + concatStringsSep(" ", args) + "\n\n";
} }
markdown += trim(stripIndentation(doc->doc)); markdown += stripIndentation(doc->doc);
std::cout << renderMarkdownToTerminal(markdown); logger->cout(trim(renderMarkdownToTerminal(markdown)));
} else } else
throw Error("value does not have documentation"); throw Error("value does not have documentation");
} }
@ -561,6 +585,25 @@ void NixRepl::loadFile(const Path & path)
addAttrsToScope(v2); addAttrsToScope(v2);
} }
void NixRepl::loadFlake(const std::string & flakeRefS)
{
auto flakeRef = parseFlakeRef(flakeRefS, absPath("."), true);
if (evalSettings.pureEval && !flakeRef.input.isImmutable())
throw Error("cannot use ':load-flake' on mutable flake reference '%s' (use --impure to override)", flakeRefS);
Value v;
flake::callFlake(*state,
flake::lockFlake(*state, flakeRef,
flake::LockFlags {
.updateLockFile = false,
.useRegistries = !evalSettings.pureEval,
.allowMutable = !evalSettings.pureEval,
}),
v);
addAttrsToScope(v);
}
void NixRepl::initEnv() void NixRepl::initEnv()
{ {
@ -584,9 +627,9 @@ void NixRepl::reloadFiles()
bool first = true; bool first = true;
for (auto & i : old) { for (auto & i : old) {
if (!first) std::cout << std::endl; if (!first) notice("");
first = false; first = false;
std::cout << format("Loading '%1%'...") % i << std::endl; notice("Loading '%1%'...", i);
loadFile(i); loadFile(i);
} }
} }
@ -596,8 +639,8 @@ void NixRepl::addAttrsToScope(Value & attrs)
{ {
state->forceAttrs(attrs); state->forceAttrs(attrs);
for (auto & i : *attrs.attrs) for (auto & i : *attrs.attrs)
addVarToScope(i.name, i.value); addVarToScope(i.name, *i.value);
std::cout << format("Added %1% variables.") % attrs.attrs->size() << std::endl; notice("Added %1% variables.", attrs.attrs->size());
} }
@ -605,8 +648,9 @@ void NixRepl::addVarToScope(const Symbol & name, Value * v)
{ {
if (displ >= envSize) if (displ >= envSize)
throw Error("environment full; cannot add more variables"); throw Error("environment full; cannot add more variables");
staticEnv->vars[name] = displ; staticEnv->vars.emplace_back(name, displ);
env->values[displ++] = v; staticEnv->sort();
env->values[displ++] = &v;
varNames.insert((string) name); varNames.insert((string) name);
} }
@ -665,7 +709,7 @@ std::ostream & NixRepl::printValue(std::ostream & str, Value & v, unsigned int m
break; break;
case nString: case nString:
str << ANSI_YELLOW; str << ANSI_WARNING;
printStringValue(str, v.string.s); printStringValue(str, v.string.s);
str << ANSI_NORMAL; str << ANSI_NORMAL;
break; break;

View file

@ -19,7 +19,7 @@ static Strings parseAttrPath(std::string_view s)
++i; ++i;
while (1) { while (1) {
if (i == s.end()) if (i == s.end())
throw Error("missing closing quote in selection path '%1%'", s); throw ParseError("missing closing quote in selection path '%1%'", s);
if (*i == '"') break; if (*i == '"') break;
cur.push_back(*i++); cur.push_back(*i++);
} }
@ -100,7 +100,7 @@ std::pair<Value *, Pos> findAlongAttrPath(EvalState & state, const string & attr
} }
Pos findDerivationFilename(EvalState & state, Value & v, std::string what) Pos findPackageFilename(EvalState & state, Value & v, std::string what)
{ {
Value * v2; Value * v2;
try { try {
@ -116,14 +116,14 @@ Pos findDerivationFilename(EvalState & state, Value & v, std::string what)
auto colon = pos.rfind(':'); auto colon = pos.rfind(':');
if (colon == std::string::npos) if (colon == std::string::npos)
throw Error("cannot parse meta.position attribute '%s'", pos); throw ParseError("cannot parse meta.position attribute '%s'", pos);
std::string filename(pos, 0, colon); std::string filename(pos, 0, colon);
unsigned int lineno; unsigned int lineno;
try { try {
lineno = std::stoi(std::string(pos, colon + 1)); lineno = std::stoi(std::string(pos, colon + 1));
} catch (std::invalid_argument & e) { } catch (std::invalid_argument & e) {
throw Error("cannot parse line number '%s'", pos); throw ParseError("cannot parse line number '%s'", pos);
} }
Symbol file = state.symbols.create(filename); Symbol file = state.symbols.create(filename);

View file

@ -14,7 +14,7 @@ std::pair<Value *, Pos> findAlongAttrPath(EvalState & state, const string & attr
Bindings & autoArgs, Value & vIn); Bindings & autoArgs, Value & vIn);
/* Heuristic to find the filename and lineno or a nix value. */ /* Heuristic to find the filename and lineno or a nix value. */
Pos findDerivationFilename(EvalState & state, Value & v, std::string what); Pos findPackageFilename(EvalState & state, Value & v, std::string what);
std::vector<Symbol> parseAttrPath(EvalState & state, std::string_view s); std::vector<Symbol> parseAttrPath(EvalState & state, std::string_view s);

View file

@ -17,8 +17,8 @@ struct Attr
{ {
Symbol name; Symbol name;
Value * value; Value * value;
Pos * pos; ptr<Pos> pos;
Attr(Symbol name, Value * value, Pos * pos = &noPos) Attr(Symbol name, Value * value, ptr<Pos> pos = ptr(&noPos))
: name(name), value(value), pos(pos) { }; : name(name), value(value), pos(pos) { };
Attr() : pos(&noPos) { }; Attr() : pos(&noPos) { };
bool operator < (const Attr & a) const bool operator < (const Attr & a) const
@ -35,13 +35,13 @@ class Bindings
{ {
public: public:
typedef uint32_t size_t; typedef uint32_t size_t;
Pos *pos; ptr<Pos> pos;
private: private:
size_t size_, capacity_; size_t size_, capacity_;
Attr attrs[0]; Attr attrs[0];
Bindings(size_t capacity) : size_(0), capacity_(capacity) { } Bindings(size_t capacity) : pos(&noPos), size_(0), capacity_(capacity) { }
Bindings(const Bindings & bindings) = delete; Bindings(const Bindings & bindings) = delete;
public: public:

View file

@ -61,6 +61,14 @@ MixEvalArgs::MixEvalArgs()
fetchers::overrideRegistry(from.input, to.input, extraAttrs); fetchers::overrideRegistry(from.input, to.input, extraAttrs);
}} }}
}); });
addFlag({
.longName = "eval-store",
.description = "The Nix store to use for evaluations.",
.category = category,
.labels = {"store-url"},
.handler = {&evalStoreUrl},
});
} }
Bindings * MixEvalArgs::getAutoArgs(EvalState & state) Bindings * MixEvalArgs::getAutoArgs(EvalState & state)

View file

@ -16,8 +16,9 @@ struct MixEvalArgs : virtual Args
Strings searchPath; Strings searchPath;
private: std::optional<std::string> evalStoreUrl;
private:
std::map<std::string, std::string> autoArgs; std::map<std::string, std::string> autoArgs;
}; };

View file

@ -65,7 +65,11 @@ static char * dupStringWithLen(const char * s, size_t size)
RootValue allocRootValue(Value * v) RootValue allocRootValue(Value * v)
{ {
#if HAVE_BOEHMGC
return std::allocate_shared<Value *>(traceable_allocator<Value *>(), v); return std::allocate_shared<Value *>(traceable_allocator<Value *>(), v);
#else
return std::make_shared<Value *>(v);
#endif
} }
@ -234,22 +238,34 @@ static void * oomHandler(size_t requested)
} }
class BoehmGCStackAllocator : public StackAllocator { class BoehmGCStackAllocator : public StackAllocator {
boost::coroutines2::protected_fixedsize_stack stack { boost::coroutines2::protected_fixedsize_stack stack {
// We allocate 8 MB, the default max stack size on NixOS. // We allocate 8 MB, the default max stack size on NixOS.
// A smaller stack might be quicker to allocate but reduces the stack // A smaller stack might be quicker to allocate but reduces the stack
// depth available for source filter expressions etc. // depth available for source filter expressions etc.
std::max(boost::context::stack_traits::default_size(), static_cast<std::size_t>(8 * 1024 * 1024)) std::max(boost::context::stack_traits::default_size(), static_cast<std::size_t>(8 * 1024 * 1024))
}; };
// This is specific to boost::coroutines2::protected_fixedsize_stack.
// The stack protection page is included in sctx.size, so we have to
// subtract one page size from the stack size.
std::size_t pfss_usable_stack_size(boost::context::stack_context &sctx) {
return sctx.size - boost::context::stack_traits::page_size();
}
public: public:
boost::context::stack_context allocate() override { boost::context::stack_context allocate() override {
auto sctx = stack.allocate(); auto sctx = stack.allocate();
GC_add_roots(static_cast<char *>(sctx.sp) - sctx.size, sctx.sp);
// Stacks generally start at a high address and grow to lower addresses.
// Architectures that do the opposite are rare; in fact so rare that
// boost_routine does not implement it.
// So we subtract the stack size.
GC_add_roots(static_cast<char *>(sctx.sp) - pfss_usable_stack_size(sctx), sctx.sp);
return sctx; return sctx;
} }
void deallocate(boost::context::stack_context sctx) override { void deallocate(boost::context::stack_context sctx) override {
GC_remove_roots(static_cast<char *>(sctx.sp) - sctx.size, sctx.sp); GC_remove_roots(static_cast<char *>(sctx.sp) - pfss_usable_stack_size(sctx), sctx.sp);
stack.deallocate(sctx); stack.deallocate(sctx);
} }
@ -363,7 +379,10 @@ static Strings parseNixPath(const string & s)
} }
EvalState::EvalState(const Strings & _searchPath, ref<Store> store) EvalState::EvalState(
const Strings & _searchPath,
ref<Store> store,
std::shared_ptr<Store> buildStore)
: sWith(symbols.create("<with>")) : sWith(symbols.create("<with>"))
, sOutPath(symbols.create("outPath")) , sOutPath(symbols.create("outPath"))
, sDrvPath(symbols.create("drvPath")) , sDrvPath(symbols.create("drvPath"))
@ -396,6 +415,7 @@ EvalState::EvalState(const Strings & _searchPath, ref<Store> store)
, sEpsilon(symbols.create("")) , sEpsilon(symbols.create(""))
, repair(NoRepair) , repair(NoRepair)
, store(store) , store(store)
, buildStore(buildStore ? buildStore : store)
, regexCache(makeRegexCache()) , regexCache(makeRegexCache())
, baseEnv(allocEnv(128)) , baseEnv(allocEnv(128))
, staticBaseEnv(new StaticEnv(false, 0)) , staticBaseEnv(new StaticEnv(false, 0))
@ -426,12 +446,12 @@ EvalState::EvalState(const Strings & _searchPath, ref<Store> store)
StorePathSet closure; StorePathSet closure;
store->computeFSClosure(store->toStorePath(r.second).first, closure); store->computeFSClosure(store->toStorePath(r.second).first, closure);
for (auto & path : closure) for (auto & path : closure)
allowedPaths->insert(store->printStorePath(path)); allowPath(path);
} catch (InvalidPath &) { } catch (InvalidPath &) {
allowedPaths->insert(r.second); allowPath(r.second);
} }
} else } else
allowedPaths->insert(r.second); allowPath(r.second);
} }
} }
@ -446,6 +466,35 @@ EvalState::~EvalState()
} }
void EvalState::requireExperimentalFeatureOnEvaluation(
const ExperimentalFeature & feature,
const std::string_view fName,
const Pos & pos)
{
if (!settings.isExperimentalFeatureEnabled(feature)) {
throw EvalError({
.msg = hintfmt(
"Cannot call '%2%' because experimental Nix feature '%1%' is disabled. You can enable it via '--extra-experimental-features %1%'.",
feature,
fName
),
.errPos = pos
});
}
}
void EvalState::allowPath(const Path & path)
{
if (allowedPaths)
allowedPaths->insert(path);
}
void EvalState::allowPath(const StorePath & storePath)
{
if (allowedPaths)
allowedPaths->insert(store->toRealPath(storePath));
}
Path EvalState::checkSourcePath(const Path & path_) Path EvalState::checkSourcePath(const Path & path_)
{ {
if (!allowedPaths) return path_; if (!allowedPaths) return path_;
@ -472,7 +521,7 @@ Path EvalState::checkSourcePath(const Path & path_)
} }
if (!found) if (!found)
throw RestrictedPathError("access to path '%1%' is forbidden in restricted mode", abspath); throw RestrictedPathError("access to absolute path '%1%' is forbidden in restricted mode", abspath);
/* Resolve symlinks. */ /* Resolve symlinks. */
debug(format("checking access to '%s'") % abspath); debug(format("checking access to '%s'") % abspath);
@ -485,7 +534,7 @@ Path EvalState::checkSourcePath(const Path & path_)
} }
} }
throw RestrictedPathError("access to path '%1%' is forbidden in restricted mode", path); throw RestrictedPathError("access to canonical path '%1%' is forbidden in restricted mode", path);
} }
@ -535,14 +584,20 @@ Value * EvalState::addConstant(const string & name, Value & v)
{ {
Value * v2 = allocValue(); Value * v2 = allocValue();
*v2 = v; *v2 = v;
staticBaseEnv->vars[symbols.create(name)] = baseEnvDispl; addConstant(name, v2);
baseEnv.values[baseEnvDispl++] = v2;
string name2 = string(name, 0, 2) == "__" ? string(name, 2) : name;
baseEnv.values[0]->attrs->push_back(Attr(symbols.create(name2), v2));
return v2; return v2;
} }
void EvalState::addConstant(const string & name, Value * v)
{
staticBaseEnv.vars.emplace_back(symbols.create(name), baseEnvDispl);
baseEnv.values[baseEnvDispl++] = v;
string name2 = string(name, 0, 2) == "__" ? string(name, 2) : name;
baseEnv.values[0]->attrs->push_back(Attr(symbols.create(name2), v));
}
Value * EvalState::addPrimOp(const string & name, Value * EvalState::addPrimOp(const string & name,
size_t arity, PrimOpFun primOp) size_t arity, PrimOpFun primOp)
{ {
@ -561,7 +616,7 @@ Value * EvalState::addPrimOp(const string & name,
Value * v = allocValue(); Value * v = allocValue();
v->mkPrimOp(new PrimOp { .fun = primOp, .arity = arity, .name = sym }); v->mkPrimOp(new PrimOp { .fun = primOp, .arity = arity, .name = sym });
staticBaseEnv->vars[symbols.create(name)] = baseEnvDispl; staticBaseEnv->vars.emplace_back(symbols.create(name), baseEnvDispl);
baseEnv.values[baseEnvDispl++] = v; baseEnv.values[baseEnvDispl++] = v;
baseEnv.values[0]->attrs->push_back(Attr(sym, v)); baseEnv.values[0]->attrs->push_back(Attr(sym, v));
return v; return v;
@ -587,7 +642,7 @@ Value * EvalState::addPrimOp(PrimOp && primOp)
Value * v = allocValue(); Value * v = allocValue();
v->mkPrimOp(new PrimOp(std::move(primOp))); v->mkPrimOp(new PrimOp(std::move(primOp)));
staticBaseEnv->vars[envName] = baseEnvDispl; staticBaseEnv->vars.emplace_back(envName, baseEnvDispl);
baseEnv.values[baseEnvDispl++] = v; baseEnv.values[baseEnvDispl++] = v;
baseEnv.values[0]->attrs->push_back(Attr(primOp.name, v)); baseEnv.values[0]->attrs->push_back(Attr(primOp.name, v));
return v; return v;
@ -861,7 +916,7 @@ void mkPath(Value & v, const char * s)
inline Value * EvalState::lookupVar(Env * env, const ExprVar & var, bool noEval) inline Value * EvalState::lookupVar(Env * env, const ExprVar & var, bool noEval)
{ {
for (size_t l = var.level; l; --l, env = env->up) ; for (auto l = var.level; l; --l, env = env->up) ;
if (!var.fromWith) return env->values[var.displ]; if (!var.fromWith) return env->values[var.displ];
@ -875,7 +930,7 @@ inline Value * EvalState::lookupVar(Env * env, const ExprVar & var, bool noEval)
} }
Bindings::iterator j = env->values[0]->attrs->find(var.name); Bindings::iterator j = env->values[0]->attrs->find(var.name);
if (j != env->values[0]->attrs->end()) { if (j != env->values[0]->attrs->end()) {
if (countCalls && j->pos) attrSelects[*j->pos]++; if (countCalls) attrSelects[*j->pos]++;
return j->value; return j->value;
} }
if (!env->prevWith) { if (!env->prevWith) {
@ -886,18 +941,10 @@ inline Value * EvalState::lookupVar(Env * env, const ExprVar & var, bool noEval)
} }
std::atomic<uint64_t> nrValuesFreed{0};
void finalizeValue(void * obj, void * data)
{
nrValuesFreed++;
}
Value * EvalState::allocValue() Value * EvalState::allocValue()
{ {
nrValues++; nrValues++;
auto v = (Value *) allocBytes(sizeof(Value)); auto v = (Value *) allocBytes(sizeof(Value));
//GC_register_finalizer_no_order(v, finalizeValue, nullptr, nullptr, nullptr);
return v; return v;
} }
@ -949,9 +996,9 @@ void EvalState::mkThunk_(Value & v, Expr * expr)
} }
void EvalState::mkPos(Value & v, Pos * pos) void EvalState::mkPos(Value & v, ptr<Pos> pos)
{ {
if (pos && pos->file.set()) { if (pos->file.set()) {
mkAttrs(v, 3); mkAttrs(v, 3);
mkString(*allocAttr(v, sFile), pos->file); mkString(*allocAttr(v, sFile), pos->file);
mkInt(*allocAttr(v, sLine), pos->line); mkInt(*allocAttr(v, sLine), pos->line);
@ -974,39 +1021,37 @@ Value * Expr::maybeThunk(EvalState & state, Env & env)
} }
unsigned long nrAvoided = 0;
Value * ExprVar::maybeThunk(EvalState & state, Env & env) Value * ExprVar::maybeThunk(EvalState & state, Env & env)
{ {
Value * v = state.lookupVar(&env, *this, true); Value * v = state.lookupVar(&env, *this, true);
/* The value might not be initialised in the environment yet. /* The value might not be initialised in the environment yet.
In that case, ignore it. */ In that case, ignore it. */
if (v) { nrAvoided++; return v; } if (v) { state.nrAvoided++; return v; }
return Expr::maybeThunk(state, env); return Expr::maybeThunk(state, env);
} }
Value * ExprString::maybeThunk(EvalState & state, Env & env) Value * ExprString::maybeThunk(EvalState & state, Env & env)
{ {
nrAvoided++; state.nrAvoided++;
return &v; return &v;
} }
Value * ExprInt::maybeThunk(EvalState & state, Env & env) Value * ExprInt::maybeThunk(EvalState & state, Env & env)
{ {
nrAvoided++; state.nrAvoided++;
return &v; return &v;
} }
Value * ExprFloat::maybeThunk(EvalState & state, Env & env) Value * ExprFloat::maybeThunk(EvalState & state, Env & env)
{ {
nrAvoided++; state.nrAvoided++;
return &v; return &v;
} }
Value * ExprPath::maybeThunk(EvalState & state, Env & env) Value * ExprPath::maybeThunk(EvalState & state, Env & env)
{ {
nrAvoided++; state.nrAvoided++;
return &v; return &v;
} }
@ -1021,38 +1066,23 @@ void EvalState::evalFile(const Path & path_, Value & v, bool mustBeTrivial)
return; return;
} }
Path path2 = resolveExprPath(path); Path resolvedPath = resolveExprPath(path);
if ((i = fileEvalCache.find(path2)) != fileEvalCache.end()) { if ((i = fileEvalCache.find(resolvedPath)) != fileEvalCache.end()) {
v = i->second; v = i->second;
return; return;
} }
printTalkative("evaluating file '%1%'", path2); printTalkative("evaluating file '%1%'", resolvedPath);
Expr * e = nullptr; Expr * e = nullptr;
auto j = fileParseCache.find(path2); auto j = fileParseCache.find(resolvedPath);
if (j != fileParseCache.end()) if (j != fileParseCache.end())
e = j->second; e = j->second;
if (!e) if (!e)
e = parseExprFromFile(checkSourcePath(path2)); e = parseExprFromFile(checkSourcePath(resolvedPath));
fileParseCache[path2] = e; cacheFile(path, resolvedPath, e, v, mustBeTrivial);
try {
// Enforce that 'flake.nix' is a direct attrset, not a
// computation.
if (mustBeTrivial &&
!(dynamic_cast<ExprAttrs *>(e)))
throw Error("file '%s' must be an attribute set", path);
eval(e, v);
} catch (Error & e) {
addErrorTrace(e, "while evaluating the file '%1%':", path2);
throw;
}
fileEvalCache[path2] = v;
if (path != path2) fileEvalCache[path] = v;
} }
@ -1063,6 +1093,32 @@ void EvalState::resetFileCache()
} }
void EvalState::cacheFile(
const Path & path,
const Path & resolvedPath,
Expr * e,
Value & v,
bool mustBeTrivial)
{
fileParseCache[resolvedPath] = e;
try {
// Enforce that 'flake.nix' is a direct attrset, not a
// computation.
if (mustBeTrivial &&
!(dynamic_cast<ExprAttrs *>(e)))
throw EvalError("file '%s' must be an attribute set", path);
eval(e, v);
} catch (Error & e) {
addErrorTrace(e, "while evaluating the file '%1%':", resolvedPath);
throw;
}
fileEvalCache[resolvedPath] = v;
if (path != resolvedPath) fileEvalCache[path] = v;
}
void EvalState::eval(Expr * e, Value & v) void EvalState::eval(Expr * e, Value & v)
{ {
e->eval(*this, baseEnv, v); e->eval(*this, baseEnv, v);
@ -1144,7 +1200,7 @@ void ExprAttrs::eval(EvalState & state, Env & env, Value & v)
/* The recursive attributes are evaluated in the new /* The recursive attributes are evaluated in the new
environment, while the inherited attributes are evaluated environment, while the inherited attributes are evaluated
in the original environment. */ in the original environment. */
size_t displ = 0; Displacement displ = 0;
for (auto & i : attrs) { for (auto & i : attrs) {
Value * vAttr; Value * vAttr;
if (hasOverrides && !i.second.inherited) { if (hasOverrides && !i.second.inherited) {
@ -1153,7 +1209,7 @@ void ExprAttrs::eval(EvalState & state, Env & env, Value & v)
} else } else
vAttr = i.second.e->maybeThunk(state, i.second.inherited ? env : env2); vAttr = i.second.e->maybeThunk(state, i.second.inherited ? env : env2);
env2.values[displ++] = vAttr; env2.values[displ++] = vAttr;
v.attrs->push_back(Attr(i.first, vAttr, &i.second.pos)); v.attrs->push_back(Attr(i.first, vAttr, ptr(&i.second.pos)));
} }
/* If the rec contains an attribute called `__overrides', then /* If the rec contains an attribute called `__overrides', then
@ -1185,7 +1241,7 @@ void ExprAttrs::eval(EvalState & state, Env & env, Value & v)
else else
for (auto & i : attrs) for (auto & i : attrs)
v.attrs->push_back(Attr(i.first, i.second.e->maybeThunk(state, env), &i.second.pos)); v.attrs->push_back(Attr(i.first, i.second.e->maybeThunk(state, env), ptr(&i.second.pos)));
/* Dynamic attrs apply *after* rec and __overrides. */ /* Dynamic attrs apply *after* rec and __overrides. */
for (auto & i : dynamicAttrs) { for (auto & i : dynamicAttrs) {
@ -1203,11 +1259,11 @@ void ExprAttrs::eval(EvalState & state, Env & env, Value & v)
i.valueExpr->setName(nameSym); i.valueExpr->setName(nameSym);
/* Keep sorted order so find can catch duplicates */ /* Keep sorted order so find can catch duplicates */
v.attrs->push_back(Attr(nameSym, i.valueExpr->maybeThunk(state, *dynamicEnv), &i.pos)); v.attrs->push_back(Attr(nameSym, i.valueExpr->maybeThunk(state, *dynamicEnv), ptr(&i.pos)));
v.attrs->sort(); // FIXME: inefficient v.attrs->sort(); // FIXME: inefficient
} }
v.attrs->pos = &pos; v.attrs->pos = ptr(&pos);
} }
@ -1221,7 +1277,7 @@ void ExprLet::eval(EvalState & state, Env & env, Value & v)
/* The recursive attributes are evaluated in the new environment, /* The recursive attributes are evaluated in the new environment,
while the inherited attributes are evaluated in the original while the inherited attributes are evaluated in the original
environment. */ environment. */
size_t displ = 0; Displacement displ = 0;
for (auto & i : attrs->attrs) for (auto & i : attrs->attrs)
env2.values[displ++] = i.second.e->maybeThunk(state, i.second.inherited ? env : env2); env2.values[displ++] = i.second.e->maybeThunk(state, i.second.inherited ? env : env2);
@ -1262,12 +1318,10 @@ static string showAttrPath(EvalState & state, Env & env, const AttrPath & attrPa
} }
unsigned long nrLookups = 0;
void ExprSelect::eval(EvalState & state, Env & env, Value & v) void ExprSelect::eval(EvalState & state, Env & env, Value & v)
{ {
Value vTmp; Value vTmp;
Pos * pos2 = 0; ptr<Pos> pos2(&noPos);
Value * vAttrs = &vTmp; Value * vAttrs = &vTmp;
e->eval(state, env, vTmp); e->eval(state, env, vTmp);
@ -1275,7 +1329,7 @@ void ExprSelect::eval(EvalState & state, Env & env, Value & v)
try { try {
for (auto & i : attrPath) { for (auto & i : attrPath) {
nrLookups++; state.nrLookups++;
Bindings::iterator j; Bindings::iterator j;
Symbol name = getName(i, state, env); Symbol name = getName(i, state, env);
if (def) { if (def) {
@ -1293,13 +1347,13 @@ void ExprSelect::eval(EvalState & state, Env & env, Value & v)
} }
vAttrs = j->value; vAttrs = j->value;
pos2 = j->pos; pos2 = j->pos;
if (state.countCalls && pos2) state.attrSelects[*pos2]++; if (state.countCalls) state.attrSelects[*pos2]++;
} }
state.forceValue(*vAttrs, ( pos2 != NULL ? *pos2 : this->pos ) ); state.forceValue(*vAttrs, (*pos2 != noPos ? *pos2 : this->pos ) );
} catch (Error & e) { } catch (Error & e) {
if (pos2 && pos2->file != state.sDerivationNix) if (*pos2 != noPos && pos2->file != state.sDerivationNix)
addErrorTrace(e, *pos2, "while evaluating the attribute '%1%'", addErrorTrace(e, *pos2, "while evaluating the attribute '%1%'",
showAttrPath(state, env, attrPath)); showAttrPath(state, env, attrPath));
throw; throw;
@ -1340,160 +1394,183 @@ void ExprLambda::eval(EvalState & state, Env & env, Value & v)
} }
void ExprApp::eval(EvalState & state, Env & env, Value & v) void EvalState::callFunction(Value & fun, size_t nrArgs, Value * * args, Value & vRes, const Pos & pos)
{
/* FIXME: vFun prevents GCC from doing tail call optimisation. */
Value vFun;
e1->eval(state, env, vFun);
state.callFunction(vFun, *(e2->maybeThunk(state, env)), v, pos);
}
void EvalState::callPrimOp(Value & fun, Value & arg, Value & v, const Pos & pos)
{
/* Figure out the number of arguments still needed. */
size_t argsDone = 0;
Value * primOp = &fun;
while (primOp->isPrimOpApp()) {
argsDone++;
primOp = primOp->primOpApp.left;
}
assert(primOp->isPrimOp());
auto arity = primOp->primOp->arity;
auto argsLeft = arity - argsDone;
if (argsLeft == 1) {
/* We have all the arguments, so call the primop. */
/* Put all the arguments in an array. */
Value * vArgs[arity];
auto n = arity - 1;
vArgs[n--] = &arg;
for (Value * arg = &fun; arg->isPrimOpApp(); arg = arg->primOpApp.left)
vArgs[n--] = arg->primOpApp.right;
/* And call the primop. */
nrPrimOpCalls++;
if (countCalls) primOpCalls[primOp->primOp->name]++;
primOp->primOp->fun(*this, pos, vArgs, v);
} else {
Value * fun2 = allocValue();
*fun2 = fun;
v.mkPrimOpApp(fun2, &arg);
}
}
void EvalState::callFunction(Value & fun, Value & arg, Value & v, const Pos & pos)
{ {
auto trace = evalSettings.traceFunctionCalls ? std::make_unique<FunctionCallTrace>(pos) : nullptr; auto trace = evalSettings.traceFunctionCalls ? std::make_unique<FunctionCallTrace>(pos) : nullptr;
forceValue(fun, pos); forceValue(fun, pos);
if (fun.isPrimOp() || fun.isPrimOpApp()) { Value vCur(fun);
callPrimOp(fun, arg, v, pos);
return;
}
if (fun.type() == nAttrs) { auto makeAppChain = [&]()
auto found = fun.attrs->find(sFunctor); {
if (found != fun.attrs->end()) { vRes = vCur;
/* fun may be allocated on the stack of the calling function, for (size_t i = 0; i < nrArgs; ++i) {
* but for functors we may keep a reference, so heap-allocate auto fun2 = allocValue();
* a copy and use that instead. *fun2 = vRes;
*/ vRes.mkPrimOpApp(fun2, args[i]);
auto & fun2 = *allocValue(); }
fun2 = fun; };
/* !!! Should we use the attr pos here? */
Value v2;
callFunction(*found->value, fun2, v2, pos);
return callFunction(v2, arg, v, pos);
}
}
if (!fun.isLambda()) { Attr * functor;
throwTypeError(
pos,
"attempt to call something which is not a function but %1%",
showType(fun).c_str(),
fakeEnv(1), 0);
}
ExprLambda & lambda(*fun.lambda.fun); while (nrArgs > 0) {
auto size = if (vCur.isLambda()) {
(lambda.arg.empty() ? 0 : 1) +
(lambda.matchAttrs ? lambda.formals->formals.size() : 0);
Env & env2(allocEnv(size));
env2.up = fun.lambda.env;
size_t displ = 0; ExprLambda & lambda(*vCur.lambda.fun);
if (!lambda.matchAttrs){ auto size =
env2.values[displ++] = &arg; (lambda.arg.empty() ? 0 : 1) +
} (lambda.hasFormals() ? lambda.formals->formals.size() : 0);
else { Env & env2(allocEnv(size));
forceAttrs(arg, pos); env2.up = vCur.lambda.env;
if (!lambda.arg.empty()) Displacement displ = 0;
env2.values[displ++] = &arg;
/* For each formal argument, get the actual argument. If if (!lambda.hasFormals())
there is no matching actual argument but the formal env2.values[displ++] = args[0];
argument has a default, use the default. */ else {
size_t attrsUsed = 0; forceAttrs(*args[0], pos);
for (auto & i : lambda.formals->formals) {
Bindings::iterator j = arg.attrs->find(i.name); if (!lambda.arg.empty())
if (j == arg.attrs->end()) { env2.values[displ++] = args[0];
if (!i.def)
throwTypeError( /* For each formal argument, get the actual argument. If
pos, there is no matching actual argument but the formal
"%1% called without required argument '%2%'", argument has a default, use the default. */
lambda, size_t attrsUsed = 0;
i.name, for (auto & i : lambda.formals->formals) {
*fun.lambda.env, &lambda); auto j = args[0]->attrs->get(i.name);
env2.values[displ++] = i.def->maybeThunk(*this, env2); if (!j) {
if (!i.def) throwTypeError(pos, "%1% called without required argument '%2%'",
lambda, i.name, *fun.lambda.env, &lambda);
env2.values[displ++] = i.def->maybeThunk(*this, env2);
} else {
attrsUsed++;
env2.values[displ++] = j->value;
}
}
/* Check that each actual argument is listed as a formal
argument (unless the attribute match specifies a `...'). */
if (!lambda.formals->ellipsis && attrsUsed != args[0]->attrs->size()) {
/* Nope, so show the first unexpected argument to the
user. */
for (auto & i : *args[0]->attrs)
if (lambda.formals->argNames.find(i.name) == lambda.formals->argNames.end())
throwTypeError(pos, "%1% called with unexpected argument '%2%'", lambda, i.name);
abort(); // can't happen
}
}
nrFunctionCalls++;
if (countCalls) incrFunctionCall(&lambda);
/* Evaluate the body. */
try {
lambda.body->eval(*this, env2, vCur);
} catch (Error & e) {
if (loggerSettings.showTrace.get()) {
addErrorTrace(e, lambda.pos, "while evaluating %s",
(lambda.name.set()
? "'" + (string) lambda.name + "'"
: "anonymous lambda"));
addErrorTrace(e, pos, "from call site%s", "");
}
throw;
}
nrArgs--;
args += 1;
}
else if (vCur.isPrimOp()) {
size_t argsLeft = vCur.primOp->arity;
if (nrArgs < argsLeft) {
/* We don't have enough arguments, so create a tPrimOpApp chain. */
makeAppChain();
return;
} else { } else {
attrsUsed++; /* We have all the arguments, so call the primop. */
env2.values[displ++] = j->value; nrPrimOpCalls++;
if (countCalls) primOpCalls[vCur.primOp->name]++;
vCur.primOp->fun(*this, pos, args, vCur);
nrArgs -= argsLeft;
args += argsLeft;
} }
} }
else if (vCur.isPrimOpApp()) {
/* Figure out the number of arguments still needed. */
size_t argsDone = 0;
Value * primOp = &vCur;
while (primOp->isPrimOpApp()) {
argsDone++;
primOp = primOp->primOpApp.left;
}
assert(primOp->isPrimOp());
auto arity = primOp->primOp->arity;
auto argsLeft = arity - argsDone;
/* Check that each actual argument is listed as a formal if (nrArgs < argsLeft) {
argument (unless the attribute match specifies a `...'). */ /* We still don't have enough arguments, so extend the tPrimOpApp chain. */
if (!lambda.formals->ellipsis && attrsUsed != arg.attrs->size()) { makeAppChain();
/* Nope, so show the first unexpected argument to the return;
user. */ } else {
for (auto & i : *arg.attrs) /* We have all the arguments, so call the primop with
if (lambda.formals->argNames.find(i.name) == lambda.formals->argNames.end()) the previous and new arguments. */
throwTypeError(pos,
"%1% called with unexpected argument '%2%'", Value * vArgs[arity];
lambda, auto n = argsDone;
i.name, for (Value * arg = &vCur; arg->isPrimOpApp(); arg = arg->primOpApp.left)
*fun.lambda.env, &lambda); vArgs[--n] = arg->primOpApp.right;
abort(); // can't happen
for (size_t i = 0; i < argsLeft; ++i)
vArgs[argsDone + i] = args[i];
nrPrimOpCalls++;
if (countCalls) primOpCalls[primOp->primOp->name]++;
primOp->primOp->fun(*this, pos, vArgs, vCur);
nrArgs -= argsLeft;
args += argsLeft;
}
} }
else if (vCur.type() == nAttrs && (functor = vCur.attrs->get(sFunctor))) {
/* 'vCur' may be allocated on the stack of the calling
function, but for functors we may keep a reference, so
heap-allocate a copy and use that instead. */
Value * args2[] = {allocValue(), args[0]};
*args2[0] = vCur;
/* !!! Should we use the attr pos here? */
callFunction(*functor->value, 2, args2, vCur, pos);
nrArgs--;
args++;
}
else
throwTypeError(pos, "attempt to call something which is not a function but %1%", vCur);
} }
nrFunctionCalls++; vRes = vCur;
if (countCalls) incrFunctionCall(&lambda); }
/* Evaluate the body. This is conditional on showTrace, because
catching exceptions makes this function not tail-recursive. */ void ExprCall::eval(EvalState & state, Env & env, Value & v)
if (loggerSettings.showTrace.get()) {
try { Value vFun;
lambda.body->eval(*this, env2, v); fun->eval(state, env, vFun);
} catch (Error & e) {
addErrorTrace(e, lambda.pos, "while evaluating %s", Value * vArgs[args.size()];
(lambda.name.set() for (size_t i = 0; i < args.size(); ++i)
? "'" + (string) lambda.name + "'" vArgs[i] = args[i]->maybeThunk(state, env);
: "anonymous lambda"));
addErrorTrace(e, pos, "from call site%s", ""); state.callFunction(vFun, args.size(), vArgs, v, pos);
throw;
}
else
fun.lambda.fun->body->eval(*this, env2, v);
} }
@ -1519,7 +1596,7 @@ void EvalState::autoCallFunction(Bindings & args, Value & fun, Value & res)
} }
} }
if (!fun.isLambda() || !fun.lambda.fun->matchAttrs) { if (!fun.isLambda() || !fun.lambda.fun->hasFormals()) {
res = fun; res = fun;
return; return;
} }
@ -1722,7 +1799,6 @@ void ExprConcatStrings::eval(EvalState & state, Env & env, Value & v)
and none of the strings are allowed to have contexts. */ and none of the strings are allowed to have contexts. */
if (first) { if (first) {
firstType = vTmp.type(); firstType = vTmp.type();
first = false;
} }
if (firstType == nInt) { if (firstType == nInt) {
@ -1744,7 +1820,12 @@ void ExprConcatStrings::eval(EvalState & state, Env & env, Value & v)
} else } else
throwEvalError(pos, "cannot add %1% to a float", showType(vTmp), env, this); throwEvalError(pos, "cannot add %1% to a float", showType(vTmp), env, this);
} else } else
s << state.coerceToString(pos, vTmp, context, false, firstType == nString); /* skip canonization of first path, which would only be not
canonized in the first place if it's coming from a ./${foo} type
path */
s << state.coerceToString(pos, vTmp, context, false, firstType == nString, !first);
first = false;
} }
if (firstType == nInt) if (firstType == nInt)
@ -1763,7 +1844,7 @@ void ExprConcatStrings::eval(EvalState & state, Env & env, Value & v)
void ExprPos::eval(EvalState & state, Env & env, Value & v) void ExprPos::eval(EvalState & state, Env & env, Value & v)
{ {
state.mkPos(v, &pos); state.mkPos(v, ptr(&pos));
} }
@ -1935,7 +2016,7 @@ std::optional<string> EvalState::tryAttrsToString(const Pos & pos, Value & v,
} }
string EvalState::coerceToString(const Pos & pos, Value & v, PathSet & context, string EvalState::coerceToString(const Pos & pos, Value & v, PathSet & context,
bool coerceMore, bool copyToStore) bool coerceMore, bool copyToStore, bool canonicalizePath)
{ {
forceValue(v, pos); forceValue(v, pos);
@ -1947,7 +2028,7 @@ string EvalState::coerceToString(const Pos & pos, Value & v, PathSet & context,
} }
if (v.type() == nPath) { if (v.type() == nPath) {
Path path(canonPath(v.path)); Path path(canonicalizePath ? canonPath(v.path) : v.path);
return copyToStore ? copyPathToStore(context, path) : path; return copyToStore ? copyPathToStore(context, path) : path;
} }
@ -2010,6 +2091,7 @@ string EvalState::copyPathToStore(PathSet & context, const Path & path)
? store->computeStorePathForPath(std::string(baseNameOf(path)), checkSourcePath(path)).first ? store->computeStorePathForPath(std::string(baseNameOf(path)), checkSourcePath(path)).first
: store->addToStore(std::string(baseNameOf(path)), checkSourcePath(path), FileIngestionMethod::Recursive, htSHA256, defaultPathFilter, repair); : store->addToStore(std::string(baseNameOf(path)), checkSourcePath(path), FileIngestionMethod::Recursive, htSHA256, defaultPathFilter, repair);
dstPath = store->printStorePath(p); dstPath = store->printStorePath(p);
allowPath(p);
srcToStore.insert_or_assign(path, std::move(p)); srcToStore.insert_or_assign(path, std::move(p));
printMsg(lvlChatty, "copied source '%1%' -> '%2%'", path, dstPath); printMsg(lvlChatty, "copied source '%1%' -> '%2%'", path, dstPath);
} }

View file

@ -5,6 +5,7 @@
#include "nixexpr.hh" #include "nixexpr.hh"
#include "symbol-table.hh" #include "symbol-table.hh"
#include "config.hh" #include "config.hh"
#include "experimental-features.hh"
#include <map> #include <map>
#include <optional> #include <optional>
@ -98,8 +99,14 @@ public:
Value vEmptySet; Value vEmptySet;
/* Store used to materialise .drv files. */
const ref<Store> store; const ref<Store> store;
/* Store used to build stuff. */
const ref<Store> buildStore;
RootValue vCallFlake = nullptr;
RootValue vImportedDrvToDerivation = nullptr;
private: private:
SrcToStore srcToStore; SrcToStore srcToStore;
@ -132,13 +139,31 @@ private:
public: public:
EvalState(const Strings & _searchPath, ref<Store> store); EvalState(
const Strings & _searchPath,
ref<Store> store,
std::shared_ptr<Store> buildStore = nullptr);
~EvalState(); ~EvalState();
void requireExperimentalFeatureOnEvaluation(
const ExperimentalFeature &,
const std::string_view fName,
const Pos & pos
);
void addToSearchPath(const string & s); void addToSearchPath(const string & s);
SearchPath getSearchPath() { return searchPath; } SearchPath getSearchPath() { return searchPath; }
/* Allow access to a path. */
void allowPath(const Path & path);
/* Allow access to a store path. Note that this gets remapped to
the real store path if `store` is a chroot store. */
void allowPath(const StorePath & storePath);
/* Check whether access to a path is allowed and throw an error if
not. Otherwise return the canonicalised path. */
Path checkSourcePath(const Path & path); Path checkSourcePath(const Path & path);
void checkURI(const std::string & uri); void checkURI(const std::string & uri);
@ -167,6 +192,14 @@ public:
trivial (i.e. doesn't require arbitrary computation). */ trivial (i.e. doesn't require arbitrary computation). */
void evalFile(const Path & path, Value & v, bool mustBeTrivial = false); void evalFile(const Path & path, Value & v, bool mustBeTrivial = false);
/* Like `cacheFile`, but with an already parsed expression. */
void cacheFile(
const Path & path,
const Path & resolvedPath,
Expr * e,
Value & v,
bool mustBeTrivial = false);
void resetFileCache(); void resetFileCache();
/* Look up a file in the search path. */ /* Look up a file in the search path. */
@ -221,7 +254,8 @@ public:
booleans and lists to a string. If `copyToStore' is set, booleans and lists to a string. If `copyToStore' is set,
referenced paths are copied to the Nix store as a side effect. */ referenced paths are copied to the Nix store as a side effect. */
string coerceToString(const Pos & pos, Value & v, PathSet & context, string coerceToString(const Pos & pos, Value & v, PathSet & context,
bool coerceMore = false, bool copyToStore = true); bool coerceMore = false, bool copyToStore = true,
bool canonicalizePath = true);
string copyPathToStore(PathSet & context, const Path & path); string copyPathToStore(PathSet & context, const Path & path);
@ -247,6 +281,8 @@ private:
Value * addConstant(const string & name, Value & v); Value * addConstant(const string & name, Value & v);
void addConstant(const string & name, Value * v);
Value * addPrimOp(const string & name, Value * addPrimOp(const string & name,
size_t arity, PrimOpFun primOp); size_t arity, PrimOpFun primOp);
@ -286,8 +322,14 @@ public:
bool isFunctor(Value & fun); bool isFunctor(Value & fun);
void callFunction(Value & fun, Value & arg, Value & v, const Pos & pos); // FIXME: use std::span
void callPrimOp(Value & fun, Value & arg, Value & v, const Pos & pos); void callFunction(Value & fun, size_t nrArgs, Value * * args, Value & vRes, const Pos & pos);
void callFunction(Value & fun, Value & arg, Value & vRes, const Pos & pos)
{
Value * args[] = {&arg};
callFunction(fun, 1, args, vRes, pos);
}
/* Automatically call a function for which each argument has a /* Automatically call a function for which each argument has a
default value or has a binding in the `args' map. */ default value or has a binding in the `args' map. */
@ -305,7 +347,7 @@ public:
void mkList(Value & v, size_t length); void mkList(Value & v, size_t length);
void mkAttrs(Value & v, size_t capacity); void mkAttrs(Value & v, size_t capacity);
void mkThunk_(Value & v, Expr * expr); void mkThunk_(Value & v, Expr * expr);
void mkPos(Value & v, Pos * pos); void mkPos(Value & v, ptr<Pos> pos);
void concatLists(Value & v, size_t nrLists, Value * * lists, const Pos & pos); void concatLists(Value & v, size_t nrLists, Value * * lists, const Pos & pos);
@ -320,8 +362,10 @@ private:
unsigned long nrValuesInEnvs = 0; unsigned long nrValuesInEnvs = 0;
unsigned long nrValues = 0; unsigned long nrValues = 0;
unsigned long nrListElems = 0; unsigned long nrListElems = 0;
unsigned long nrLookups = 0;
unsigned long nrAttrsets = 0; unsigned long nrAttrsets = 0;
unsigned long nrAttrsInAttrsets = 0; unsigned long nrAttrsInAttrsets = 0;
unsigned long nrAvoided = 0;
unsigned long nrOpUpdates = 0; unsigned long nrOpUpdates = 0;
unsigned long nrOpUpdateValuesCopied = 0; unsigned long nrOpUpdateValuesCopied = 0;
unsigned long nrListConcats = 0; unsigned long nrListConcats = 0;
@ -343,6 +387,11 @@ private:
friend struct ExprOpUpdate; friend struct ExprOpUpdate;
friend struct ExprOpConcatLists; friend struct ExprOpConcatLists;
friend struct ExprVar;
friend struct ExprString;
friend struct ExprInt;
friend struct ExprFloat;
friend struct ExprPath;
friend struct ExprSelect; friend struct ExprSelect;
friend void prim_getAttr(EvalState & state, const Pos & pos, Value * * args, Value & v); friend void prim_getAttr(EvalState & state, const Pos & pos, Value * * args, Value & v);
friend void prim_match(EvalState & state, const Pos & pos, Value * * args, Value & v); friend void prim_match(EvalState & state, const Pos & pos, Value * * args, Value & v);

View file

@ -1,4 +1,5 @@
#include "flake.hh" #include "flake.hh"
#include "globals.hh"
#include <nlohmann/json.hpp> #include <nlohmann/json.hpp>
@ -22,12 +23,14 @@ static TrustedList readTrustedList()
static void writeTrustedList(const TrustedList & trustedList) static void writeTrustedList(const TrustedList & trustedList)
{ {
writeFile(trustedListPath(), nlohmann::json(trustedList).dump()); auto path = trustedListPath();
createDirs(dirOf(path));
writeFile(path, nlohmann::json(trustedList).dump());
} }
void ConfigFile::apply() void ConfigFile::apply()
{ {
std::set<std::string> whitelist{"bash-prompt", "bash-prompt-suffix"}; std::set<std::string> whitelist{"bash-prompt", "bash-prompt-suffix", "flake-registry"};
for (auto & [name, value] : settings) { for (auto & [name, value] : settings) {
@ -50,21 +53,19 @@ void ConfigFile::apply()
auto trustedList = readTrustedList(); auto trustedList = readTrustedList();
bool trusted = false; bool trusted = false;
if (nix::settings.acceptFlakeConfig){
if (auto saved = get(get(trustedList, name).value_or(std::map<std::string, bool>()), valueS)) { trusted = true;
} else if (auto saved = get(get(trustedList, name).value_or(std::map<std::string, bool>()), valueS)) {
trusted = *saved; trusted = *saved;
warn("Using saved setting for '%s = %s' from ~/.local/share/nix/trusted-settings.json.", name,valueS);
} else { } else {
// FIXME: filter ANSI escapes, newlines, \r, etc. // FIXME: filter ANSI escapes, newlines, \r, etc.
if (std::tolower(logger->ask(fmt("do you want to allow configuration setting '%s' to be set to '" ANSI_RED "%s" ANSI_NORMAL "' (y/N)?", name, valueS)).value_or('n')) != 'y') { if (std::tolower(logger->ask(fmt("do you want to allow configuration setting '%s' to be set to '" ANSI_RED "%s" ANSI_NORMAL "' (y/N)?", name, valueS)).value_or('n')) == 'y') {
if (std::tolower(logger->ask("do you want to permanently mark this value as untrusted (y/N)?").value_or('n')) == 'y') { trusted = true;
trustedList[name][valueS] = false; }
writeTrustedList(trustedList); if (std::tolower(logger->ask(fmt("do you want to permanently mark this value as %s (y/N)?", trusted ? "trusted": "untrusted" )).value_or('n')) == 'y') {
} trustedList[name][valueS] = trusted;
} else { writeTrustedList(trustedList);
if (std::tolower(logger->ask("do you want to permanently mark this value as trusted (y/N)?").value_or('n')) == 'y') {
trustedList[name][valueS] = trusted = true;
writeTrustedList(trustedList);
}
} }
} }

View file

@ -1,4 +1,5 @@
#include "flake.hh" #include "flake.hh"
#include "eval.hh"
#include "lockfile.hh" #include "lockfile.hh"
#include "primops.hh" #include "primops.hh"
#include "eval-inline.hh" #include "eval-inline.hh"
@ -63,8 +64,7 @@ static std::tuple<fetchers::Tree, FlakeRef, FlakeRef> fetchOrSubstituteTree(
debug("got tree '%s' from '%s'", debug("got tree '%s' from '%s'",
state.store->printStorePath(tree.storePath), lockedRef); state.store->printStorePath(tree.storePath), lockedRef);
if (state.allowedPaths) state.allowPath(tree.storePath);
state.allowedPaths->insert(tree.actualPath);
assert(!originalRef.input.getNarHash() || tree.storePath == originalRef.input.computeStorePath(*state.store)); assert(!originalRef.input.getNarHash() || tree.storePath == originalRef.input.computeStorePath(*state.store));
@ -88,10 +88,12 @@ static void expectType(EvalState & state, ValueType type,
} }
static std::map<FlakeId, FlakeInput> parseFlakeInputs( static std::map<FlakeId, FlakeInput> parseFlakeInputs(
EvalState & state, Value * value, const Pos & pos); EvalState & state, Value * value, const Pos & pos,
const std::optional<Path> & baseDir);
static FlakeInput parseFlakeInput(EvalState & state, static FlakeInput parseFlakeInput(EvalState & state,
const std::string & inputName, Value * value, const Pos & pos) const std::string & inputName, Value * value, const Pos & pos,
const std::optional<Path> & baseDir)
{ {
expectType(state, nAttrs, *value, pos); expectType(state, nAttrs, *value, pos);
@ -115,7 +117,7 @@ static FlakeInput parseFlakeInput(EvalState & state,
expectType(state, nBool, *attr.value, *attr.pos); expectType(state, nBool, *attr.value, *attr.pos);
input.isFlake = attr.value->boolean; input.isFlake = attr.value->boolean;
} else if (attr.name == sInputs) { } else if (attr.name == sInputs) {
input.overrides = parseFlakeInputs(state, attr.value, *attr.pos); input.overrides = parseFlakeInputs(state, attr.value, *attr.pos, baseDir);
} else if (attr.name == sFollows) { } else if (attr.name == sFollows) {
expectType(state, nString, *attr.value, *attr.pos); expectType(state, nString, *attr.value, *attr.pos);
input.follows = parseInputPath(attr.value->string.s); input.follows = parseInputPath(attr.value->string.s);
@ -153,7 +155,7 @@ static FlakeInput parseFlakeInput(EvalState & state,
if (!attrs.empty()) if (!attrs.empty())
throw Error("unexpected flake input attribute '%s', at %s", attrs.begin()->first, pos); throw Error("unexpected flake input attribute '%s', at %s", attrs.begin()->first, pos);
if (url) if (url)
input.ref = parseFlakeRef(*url, {}, true); input.ref = parseFlakeRef(*url, baseDir, true);
} }
if (!input.follows && !input.ref) if (!input.follows && !input.ref)
@ -163,7 +165,8 @@ static FlakeInput parseFlakeInput(EvalState & state,
} }
static std::map<FlakeId, FlakeInput> parseFlakeInputs( static std::map<FlakeId, FlakeInput> parseFlakeInputs(
EvalState & state, Value * value, const Pos & pos) EvalState & state, Value * value, const Pos & pos,
const std::optional<Path> & baseDir)
{ {
std::map<FlakeId, FlakeInput> inputs; std::map<FlakeId, FlakeInput> inputs;
@ -174,7 +177,8 @@ static std::map<FlakeId, FlakeInput> parseFlakeInputs(
parseFlakeInput(state, parseFlakeInput(state,
inputAttr.name, inputAttr.name,
inputAttr.value, inputAttr.value,
*inputAttr.pos)); *inputAttr.pos,
baseDir));
} }
return inputs; return inputs;
@ -190,7 +194,8 @@ static Flake getFlake(
state, originalRef, allowLookup, flakeCache); state, originalRef, allowLookup, flakeCache);
// Guard against symlink attacks. // Guard against symlink attacks.
auto flakeFile = canonPath(sourceInfo.actualPath + "/" + lockedRef.subdir + "/flake.nix"); auto flakeDir = canonPath(sourceInfo.actualPath + "/" + lockedRef.subdir);
auto flakeFile = canonPath(flakeDir + "/flake.nix");
if (!isInDir(flakeFile, sourceInfo.actualPath)) if (!isInDir(flakeFile, sourceInfo.actualPath))
throw Error("'flake.nix' file of flake '%s' escapes from '%s'", throw Error("'flake.nix' file of flake '%s' escapes from '%s'",
lockedRef, state.store->printStorePath(sourceInfo.storePath)); lockedRef, state.store->printStorePath(sourceInfo.storePath));
@ -218,14 +223,14 @@ static Flake getFlake(
auto sInputs = state.symbols.create("inputs"); auto sInputs = state.symbols.create("inputs");
if (auto inputs = vInfo.attrs->get(sInputs)) if (auto inputs = vInfo.attrs->get(sInputs))
flake.inputs = parseFlakeInputs(state, inputs->value, *inputs->pos); flake.inputs = parseFlakeInputs(state, inputs->value, *inputs->pos, flakeDir);
auto sOutputs = state.symbols.create("outputs"); auto sOutputs = state.symbols.create("outputs");
if (auto outputs = vInfo.attrs->get(sOutputs)) { if (auto outputs = vInfo.attrs->get(sOutputs)) {
expectType(state, nFunction, *outputs->value, *outputs->pos); expectType(state, nFunction, *outputs->value, *outputs->pos);
if (outputs->value->isLambda() && outputs->value->lambda.fun->matchAttrs) { if (outputs->value->isLambda() && outputs->value->lambda.fun->hasFormals()) {
for (auto & formal : outputs->value->lambda.fun->formals->formals) { for (auto & formal : outputs->value->lambda.fun->formals->formals) {
if (formal.name != state.sSelf) if (formal.name != state.sSelf)
flake.inputs.emplace(formal.name, FlakeInput { flake.inputs.emplace(formal.name, FlakeInput {
@ -292,11 +297,18 @@ LockedFlake lockFlake(
const FlakeRef & topRef, const FlakeRef & topRef,
const LockFlags & lockFlags) const LockFlags & lockFlags)
{ {
settings.requireExperimentalFeature("flakes"); settings.requireExperimentalFeature(Xp::Flakes);
FlakeCache flakeCache; FlakeCache flakeCache;
auto flake = getFlake(state, topRef, lockFlags.useRegistries, flakeCache); auto useRegistries = lockFlags.useRegistries.value_or(settings.useRegistries);
auto flake = getFlake(state, topRef, useRegistries, flakeCache);
if (lockFlags.applyNixConfig) {
flake.config.apply();
state.store->setOptions();
}
try { try {
@ -317,25 +329,38 @@ LockedFlake lockFlake(
std::vector<FlakeRef> parents; std::vector<FlakeRef> parents;
struct LockParent {
/* The path to this parent. */
InputPath path;
/* Whether we are currently inside a top-level lockfile
(inputs absolute) or subordinate lockfile (inputs
relative). */
bool absolute;
};
std::function<void( std::function<void(
const FlakeInputs & flakeInputs, const FlakeInputs & flakeInputs,
std::shared_ptr<Node> node, std::shared_ptr<Node> node,
const InputPath & inputPathPrefix, const InputPath & inputPathPrefix,
std::shared_ptr<const Node> oldNode)> std::shared_ptr<const Node> oldNode,
const LockParent & parent,
const Path & parentPath)>
computeLocks; computeLocks;
computeLocks = [&]( computeLocks = [&](
const FlakeInputs & flakeInputs, const FlakeInputs & flakeInputs,
std::shared_ptr<Node> node, std::shared_ptr<Node> node,
const InputPath & inputPathPrefix, const InputPath & inputPathPrefix,
std::shared_ptr<const Node> oldNode) std::shared_ptr<const Node> oldNode,
const LockParent & parent,
const Path & parentPath)
{ {
debug("computing lock file node '%s'", printInputPath(inputPathPrefix)); debug("computing lock file node '%s'", printInputPath(inputPathPrefix));
/* Get the overrides (i.e. attributes of the form /* Get the overrides (i.e. attributes of the form
'inputs.nixops.inputs.nixpkgs.url = ...'). */ 'inputs.nixops.inputs.nixpkgs.url = ...'). */
// FIXME: check this for (auto & [id, input] : flakeInputs) {
for (auto & [id, input] : flake.inputs) {
for (auto & [idOverride, inputOverride] : input.overrides) { for (auto & [idOverride, inputOverride] : input.overrides) {
auto inputPath(inputPathPrefix); auto inputPath(inputPathPrefix);
inputPath.push_back(id); inputPath.push_back(id);
@ -359,22 +384,31 @@ LockedFlake lockFlake(
ancestors? */ ancestors? */
auto i = overrides.find(inputPath); auto i = overrides.find(inputPath);
bool hasOverride = i != overrides.end(); bool hasOverride = i != overrides.end();
if (hasOverride) overridesUsed.insert(inputPath); if (hasOverride) {
overridesUsed.insert(inputPath);
// Respect the “flakeness” of the input even if we
// override it
i->second.isFlake = input2.isFlake;
}
auto & input = hasOverride ? i->second : input2; auto & input = hasOverride ? i->second : input2;
/* Resolve 'follows' later (since it may refer to an input /* Resolve 'follows' later (since it may refer to an input
path we haven't processed yet. */ path we haven't processed yet. */
if (input.follows) { if (input.follows) {
InputPath target; InputPath target;
if (hasOverride || input.absolute)
/* 'follows' from an override is relative to the if (parent.absolute && !hasOverride) {
root of the graph. */
target = *input.follows; target = *input.follows;
else { } else {
/* Otherwise, it's relative to the current flake. */ if (hasOverride) {
target = inputPathPrefix; target = inputPathPrefix;
target.pop_back();
} else
target = parent.path;
for (auto & i : *input.follows) target.push_back(i); for (auto & i : *input.follows) target.push_back(i);
} }
debug("input '%s' follows '%s'", inputPathS, printInputPath(target)); debug("input '%s' follows '%s'", inputPathS, printInputPath(target));
node->inputs.insert_or_assign(id, target); node->inputs.insert_or_assign(id, target);
continue; continue;
@ -412,22 +446,18 @@ LockedFlake lockFlake(
update it. */ update it. */
auto lb = lockFlags.inputUpdates.lower_bound(inputPath); auto lb = lockFlags.inputUpdates.lower_bound(inputPath);
auto hasChildUpdate = auto mustRefetch =
lb != lockFlags.inputUpdates.end() lb != lockFlags.inputUpdates.end()
&& lb->size() > inputPath.size() && lb->size() > inputPath.size()
&& std::equal(inputPath.begin(), inputPath.end(), lb->begin()); && std::equal(inputPath.begin(), inputPath.end(), lb->begin());
if (hasChildUpdate) { FlakeInputs fakeInputs;
auto inputFlake = getFlake(
state, oldLock->lockedRef, false, flakeCache); if (!mustRefetch) {
computeLocks(inputFlake.inputs, childNode, inputPath, oldLock);
} else {
/* No need to fetch this flake, we can be /* No need to fetch this flake, we can be
lazy. However there may be new overrides on the lazy. However there may be new overrides on the
inputs of this flake, so we need to check inputs of this flake, so we need to check
those. */ those. */
FlakeInputs fakeInputs;
for (auto & i : oldLock->inputs) { for (auto & i : oldLock->inputs) {
if (auto lockedNode = std::get_if<0>(&i.second)) { if (auto lockedNode = std::get_if<0>(&i.second)) {
fakeInputs.emplace(i.first, FlakeInput { fakeInputs.emplace(i.first, FlakeInput {
@ -435,16 +465,28 @@ LockedFlake lockFlake(
.isFlake = (*lockedNode)->isFlake, .isFlake = (*lockedNode)->isFlake,
}); });
} else if (auto follows = std::get_if<1>(&i.second)) { } else if (auto follows = std::get_if<1>(&i.second)) {
auto o = input.overrides.find(i.first);
// If the override disappeared, we have to refetch the flake,
// since some of the inputs may not be present in the lockfile.
if (o == input.overrides.end()) {
mustRefetch = true;
// There's no point populating the rest of the fake inputs,
// since we'll refetch the flake anyways.
break;
}
fakeInputs.emplace(i.first, FlakeInput { fakeInputs.emplace(i.first, FlakeInput {
.follows = *follows, .follows = *follows,
.absolute = true
}); });
} }
} }
computeLocks(fakeInputs, childNode, inputPath, oldLock);
} }
computeLocks(
mustRefetch
? getFlake(state, oldLock->lockedRef, false, flakeCache).inputs
: fakeInputs,
childNode, inputPath, oldLock, parent, parentPath);
} else { } else {
/* We need to create a new lock file entry. So fetch /* We need to create a new lock file entry. So fetch
this input. */ this input. */
@ -454,7 +496,15 @@ LockedFlake lockFlake(
throw Error("cannot update flake input '%s' in pure mode", inputPathS); throw Error("cannot update flake input '%s' in pure mode", inputPathS);
if (input.isFlake) { if (input.isFlake) {
auto inputFlake = getFlake(state, *input.ref, lockFlags.useRegistries, flakeCache); Path localPath = parentPath;
FlakeRef localRef = *input.ref;
// If this input is a path, recurse it down.
// This allows us to resolve path inputs relative to the current flake.
if (localRef.input.getType() == "path")
localPath = absPath(*input.ref->input.getSourcePath(), parentPath);
auto inputFlake = getFlake(state, localRef, useRegistries, flakeCache);
/* Note: in case of an --override-input, we use /* Note: in case of an --override-input, we use
the *original* ref (input2.ref) for the the *original* ref (input2.ref) for the
@ -475,6 +525,13 @@ LockedFlake lockFlake(
parents.push_back(*input.ref); parents.push_back(*input.ref);
Finally cleanup([&]() { parents.pop_back(); }); Finally cleanup([&]() { parents.pop_back(); });
// Follows paths from existing inputs in the top-level lockfile are absolute,
// whereas paths in subordinate lockfiles are relative to those lockfiles.
LockParent newParent {
.path = inputPath,
.absolute = oldLock ? true : false
};
/* Recursively process the inputs of this /* Recursively process the inputs of this
flake. Also, unless we already have this flake flake. Also, unless we already have this flake
in the top-level lock file, use this flake's in the top-level lock file, use this flake's
@ -484,12 +541,13 @@ LockedFlake lockFlake(
oldLock oldLock
? std::dynamic_pointer_cast<const Node>(oldLock) ? std::dynamic_pointer_cast<const Node>(oldLock)
: LockFile::read( : LockFile::read(
inputFlake.sourceInfo->actualPath + "/" + inputFlake.lockedRef.subdir + "/flake.lock").root); inputFlake.sourceInfo->actualPath + "/" + inputFlake.lockedRef.subdir + "/flake.lock").root,
newParent, localPath);
} }
else { else {
auto [sourceInfo, resolvedRef, lockedRef] = fetchOrSubstituteTree( auto [sourceInfo, resolvedRef, lockedRef] = fetchOrSubstituteTree(
state, *input.ref, lockFlags.useRegistries, flakeCache); state, *input.ref, useRegistries, flakeCache);
node->inputs.insert_or_assign(id, node->inputs.insert_or_assign(id,
std::make_shared<LockedNode>(lockedRef, *input.ref, false)); std::make_shared<LockedNode>(lockedRef, *input.ref, false));
} }
@ -502,9 +560,17 @@ LockedFlake lockFlake(
} }
}; };
LockParent parent {
.path = {},
.absolute = true
};
// Bring in the current ref for relative path resolution if we have it
auto parentPath = canonPath(flake.sourceInfo->actualPath + "/" + flake.lockedRef.subdir);
computeLocks( computeLocks(
flake.inputs, newLockFile.root, {}, flake.inputs, newLockFile.root, {},
lockFlags.recreateLockFile ? nullptr : oldLockFile.root); lockFlags.recreateLockFile ? nullptr : oldLockFile.root, parent, parentPath);
for (auto & i : lockFlags.inputOverrides) for (auto & i : lockFlags.inputOverrides)
if (!overridesUsed.count(i.first)) if (!overridesUsed.count(i.first))
@ -554,8 +620,8 @@ LockedFlake lockFlake(
topRef.input.markChangedFile( topRef.input.markChangedFile(
(topRef.subdir == "" ? "" : topRef.subdir + "/") + "flake.lock", (topRef.subdir == "" ? "" : topRef.subdir + "/") + "flake.lock",
lockFlags.commitLockFile lockFlags.commitLockFile
? std::optional<std::string>(fmt("%s: %s\n\nFlake input changes:\n\n%s", ? std::optional<std::string>(fmt("%s: %s\n\nFlake lock file changes:\n\n%s",
relPath, lockFileExists ? "Update" : "Add", diff)) relPath, lockFileExists ? "Update" : "Add", filterANSIEscapes(diff, true)))
: std::nullopt); : std::nullopt);
/* Rewriting the lockfile changed the top-level /* Rewriting the lockfile changed the top-level
@ -563,7 +629,7 @@ LockedFlake lockFlake(
also just clear the 'rev' field... */ also just clear the 'rev' field... */
auto prevLockedRef = flake.lockedRef; auto prevLockedRef = flake.lockedRef;
FlakeCache dummyCache; FlakeCache dummyCache;
flake = getFlake(state, topRef, lockFlags.useRegistries, dummyCache); flake = getFlake(state, topRef, useRegistries, dummyCache);
if (lockFlags.commitLockFile && if (lockFlags.commitLockFile &&
flake.lockedRef.input.getRev() && flake.lockedRef.input.getRev() &&
@ -580,8 +646,10 @@ LockedFlake lockFlake(
} }
} else } else
throw Error("cannot write modified lock file of flake '%s' (use '--no-write-lock-file' to ignore)", topRef); throw Error("cannot write modified lock file of flake '%s' (use '--no-write-lock-file' to ignore)", topRef);
} else } else {
warn("not writing modified lock file of flake '%s':\n%s", topRef, chomp(diff)); warn("not writing modified lock file of flake '%s':\n%s", topRef, chomp(diff));
flake.forceDirty = true;
}
} }
return LockedFlake { .flake = std::move(flake), .lockFile = std::move(newLockFile) }; return LockedFlake { .flake = std::move(flake), .lockFile = std::move(newLockFile) };
@ -604,26 +672,32 @@ void callFlake(EvalState & state,
mkString(*vLocks, lockedFlake.lockFile.to_string()); mkString(*vLocks, lockedFlake.lockFile.to_string());
emitTreeAttrs(state, *lockedFlake.flake.sourceInfo, lockedFlake.flake.lockedRef.input, *vRootSrc); emitTreeAttrs(
state,
*lockedFlake.flake.sourceInfo,
lockedFlake.flake.lockedRef.input,
*vRootSrc,
false,
lockedFlake.flake.forceDirty);
mkString(*vRootSubdir, lockedFlake.flake.lockedRef.subdir); mkString(*vRootSubdir, lockedFlake.flake.lockedRef.subdir);
static RootValue vCallFlake = nullptr; if (!state.vCallFlake) {
state.vCallFlake = allocRootValue(state.allocValue());
if (!vCallFlake) {
vCallFlake = allocRootValue(state.allocValue());
state.eval(state.parseExprFromString( state.eval(state.parseExprFromString(
#include "call-flake.nix.gen.hh" #include "call-flake.nix.gen.hh"
, "/"), **vCallFlake); , "/"), **state.vCallFlake);
} }
state.callFunction(**vCallFlake, *vLocks, *vTmp1, noPos); state.callFunction(**state.vCallFlake, *vLocks, *vTmp1, noPos);
state.callFunction(*vTmp1, *vRootSrc, *vTmp2, noPos); state.callFunction(*vTmp1, *vRootSrc, *vTmp2, noPos);
state.callFunction(*vTmp2, *vRootSubdir, vRes, noPos); state.callFunction(*vTmp2, *vRootSubdir, vRes, noPos);
} }
static void prim_getFlake(EvalState & state, const Pos & pos, Value * * args, Value & v) static void prim_getFlake(EvalState & state, const Pos & pos, Value * * args, Value & v)
{ {
state.requireExperimentalFeatureOnEvaluation(Xp::Flakes, "builtins.getFlake", pos);
auto flakeRefS = state.forceStringNoCtx(*args[0], pos); auto flakeRefS = state.forceStringNoCtx(*args[0], pos);
auto flakeRef = parseFlakeRef(flakeRefS, {}, true); auto flakeRef = parseFlakeRef(flakeRefS, {}, true);
if (evalSettings.pureEval && !flakeRef.input.isImmutable()) if (evalSettings.pureEval && !flakeRef.input.isImmutable())
@ -633,13 +707,13 @@ static void prim_getFlake(EvalState & state, const Pos & pos, Value * * args, Va
lockFlake(state, flakeRef, lockFlake(state, flakeRef,
LockFlags { LockFlags {
.updateLockFile = false, .updateLockFile = false,
.useRegistries = !evalSettings.pureEval, .useRegistries = !evalSettings.pureEval && settings.useRegistries,
.allowMutable = !evalSettings.pureEval, .allowMutable = !evalSettings.pureEval,
}), }),
v); v);
} }
static RegisterPrimOp r2("__getFlake", 1, prim_getFlake, "flakes"); static RegisterPrimOp r2("__getFlake", 1, prim_getFlake);
} }
@ -649,8 +723,9 @@ Fingerprint LockedFlake::getFingerprint() const
// and we haven't changed it, then it's sufficient to use // and we haven't changed it, then it's sufficient to use
// flake.sourceInfo.storePath for the fingerprint. // flake.sourceInfo.storePath for the fingerprint.
return hashString(htSHA256, return hashString(htSHA256,
fmt("%s;%d;%d;%s", fmt("%s;%s;%d;%d;%s",
flake.sourceInfo->storePath.to_string(), flake.sourceInfo->storePath.to_string(),
flake.lockedRef.subdir,
flake.lockedRef.input.getRevCount().value_or(0), flake.lockedRef.input.getRevCount().value_or(0),
flake.lockedRef.input.getLastModified().value_or(0), flake.lockedRef.input.getLastModified().value_or(0),
lockFile)); lockFile));

View file

@ -43,7 +43,6 @@ struct FlakeInput
std::optional<FlakeRef> ref; std::optional<FlakeRef> ref;
bool isFlake = true; // true = process flake to get outputs, false = (fetched) static source path bool isFlake = true; // true = process flake to get outputs, false = (fetched) static source path
std::optional<InputPath> follows; std::optional<InputPath> follows;
bool absolute = false; // whether 'follows' is relative to the flake root
FlakeInputs overrides; FlakeInputs overrides;
}; };
@ -59,9 +58,10 @@ struct ConfigFile
/* The contents of a flake.nix file. */ /* The contents of a flake.nix file. */
struct Flake struct Flake
{ {
FlakeRef originalRef; // the original flake specification (by the user) FlakeRef originalRef; // the original flake specification (by the user)
FlakeRef resolvedRef; // registry references and caching resolved to the specific underlying flake FlakeRef resolvedRef; // registry references and caching resolved to the specific underlying flake
FlakeRef lockedRef; // the specific local store result of invoking the fetcher FlakeRef lockedRef; // the specific local store result of invoking the fetcher
bool forceDirty = false; // pretend that 'lockedRef' is dirty
std::optional<std::string> description; std::optional<std::string> description;
std::shared_ptr<const fetchers::Tree> sourceInfo; std::shared_ptr<const fetchers::Tree> sourceInfo;
FlakeInputs inputs; FlakeInputs inputs;
@ -102,7 +102,11 @@ struct LockFlags
/* Whether to use the registries to lookup indirect flake /* Whether to use the registries to lookup indirect flake
references like 'nixpkgs'. */ references like 'nixpkgs'. */
bool useRegistries = true; std::optional<bool> useRegistries = std::nullopt;
/* Whether to apply flake's nixConfig attribute to the configuration */
bool applyNixConfig = false;
/* Whether mutable flake references (i.e. those without a Git /* Whether mutable flake references (i.e. those without a Git
revision or similar) without a corresponding lock are revision or similar) without a corresponding lock are
@ -137,6 +141,8 @@ void emitTreeAttrs(
EvalState & state, EvalState & state,
const fetchers::Tree & tree, const fetchers::Tree & tree,
const fetchers::Input & input, const fetchers::Input & input,
Value & v, bool emptyRevFallback = false); Value & v,
bool emptyRevFallback = false,
bool forceDirty = false);
} }

View file

@ -172,8 +172,12 @@ std::pair<FlakeRef, std::string> parseFlakeRefWithFragment(
auto parsedURL = parseURL(url); auto parsedURL = parseURL(url);
std::string fragment; std::string fragment;
std::swap(fragment, parsedURL.fragment); std::swap(fragment, parsedURL.fragment);
auto input = Input::fromURL(parsedURL);
input.parent = baseDir;
return std::make_pair( return std::make_pair(
FlakeRef(Input::fromURL(parsedURL), get(parsedURL.query, "dir").value_or("")), FlakeRef(std::move(input), get(parsedURL.query, "dir").value_or("")),
fragment); fragment);
} }
} }

Some files were not shown because too many files have changed in this diff Show more