Merge remote-tracking branch 'upstream/master' into path-info

This commit is contained in:
John Ericson 2021-09-30 22:36:50 +00:00
commit f4f3203aa7
303 changed files with 32784 additions and 3308 deletions

View file

@ -3,7 +3,7 @@
- Thanks for your contribution! - Thanks for your contribution!
- To remove the stale label, just leave a new comment. - To remove the stale label, just leave a new comment.
- _How to find the right people to ping?_ → [`git blame`](https://git-scm.com/docs/git-blame) to the rescue! (or GitHub's history and blame buttons.) - _How to find the right people to ping?_ → [`git blame`](https://git-scm.com/docs/git-blame) to the rescue! (or GitHub's history and blame buttons.)
- You can always ask for help on [our Discourse Forum](https://discourse.nixos.org/) or on the [#nixos IRC channel](https://webchat.freenode.net/#nixos). - You can always ask for help on [our Discourse Forum](https://discourse.nixos.org/) or on [Matrix - #nix:nixos.org](https://matrix.to/#/#nix:nixos.org).
## Suggestions for PRs ## Suggestions for PRs

View file

@ -4,6 +4,7 @@ on:
push: push:
jobs: jobs:
tests: tests:
needs: [check_cachix]
strategy: strategy:
matrix: matrix:
os: [ubuntu-latest, macos-latest] os: [ubuntu-latest, macos-latest]
@ -13,15 +14,15 @@ jobs:
- uses: actions/checkout@v2.3.4 - uses: actions/checkout@v2.3.4
with: with:
fetch-depth: 0 fetch-depth: 0
- uses: cachix/install-nix-action@v13 - uses: cachix/install-nix-action@v14
- run: echo CACHIX_NAME="$(echo $GITHUB_REPOSITORY-install-tests | tr "[A-Z]/" "[a-z]-")" >> $GITHUB_ENV - run: echo CACHIX_NAME="$(echo $GITHUB_REPOSITORY-install-tests | tr "[A-Z]/" "[a-z]-")" >> $GITHUB_ENV
- uses: cachix/cachix-action@v9 - uses: cachix/cachix-action@v10
if: needs.check_cachix.outputs.secret == 'true'
with: with:
name: '${{ env.CACHIX_NAME }}' name: '${{ env.CACHIX_NAME }}'
signingKey: '${{ secrets.CACHIX_SIGNING_KEY }}' signingKey: '${{ secrets.CACHIX_SIGNING_KEY }}'
authToken: '${{ secrets.CACHIX_AUTH_TOKEN }}' authToken: '${{ secrets.CACHIX_AUTH_TOKEN }}'
#- run: nix flake check - run: nix-build -A checks.$(nix-instantiate --eval -E '(builtins.currentSystem)')
- run: nix-build -A checks.$(if [[ `uname` = Linux ]]; then echo x86_64-linux; else echo x86_64-darwin; fi)
check_cachix: check_cachix:
name: Cachix secret present for installer tests name: Cachix secret present for installer tests
runs-on: ubuntu-latest runs-on: ubuntu-latest
@ -44,8 +45,8 @@ jobs:
with: with:
fetch-depth: 0 fetch-depth: 0
- run: echo CACHIX_NAME="$(echo $GITHUB_REPOSITORY-install-tests | tr "[A-Z]/" "[a-z]-")" >> $GITHUB_ENV - run: echo CACHIX_NAME="$(echo $GITHUB_REPOSITORY-install-tests | tr "[A-Z]/" "[a-z]-")" >> $GITHUB_ENV
- uses: cachix/install-nix-action@v13 - uses: cachix/install-nix-action@v14
- uses: cachix/cachix-action@v9 - uses: cachix/cachix-action@v10
with: with:
name: '${{ env.CACHIX_NAME }}' name: '${{ env.CACHIX_NAME }}'
signingKey: '${{ secrets.CACHIX_SIGNING_KEY }}' signingKey: '${{ secrets.CACHIX_SIGNING_KEY }}'
@ -62,7 +63,7 @@ jobs:
steps: steps:
- uses: actions/checkout@v2.3.4 - uses: actions/checkout@v2.3.4
- run: echo CACHIX_NAME="$(echo $GITHUB_REPOSITORY-install-tests | tr "[A-Z]/" "[a-z]-")" >> $GITHUB_ENV - run: echo CACHIX_NAME="$(echo $GITHUB_REPOSITORY-install-tests | tr "[A-Z]/" "[a-z]-")" >> $GITHUB_ENV
- uses: cachix/install-nix-action@v13 - uses: cachix/install-nix-action@v14
with: with:
install_url: '${{needs.installer.outputs.installerURL}}' install_url: '${{needs.installer.outputs.installerURL}}'
install_options: "--tarball-url-prefix https://${{ env.CACHIX_NAME }}.cachix.org/serve" install_options: "--tarball-url-prefix https://${{ env.CACHIX_NAME }}.cachix.org/serve"

6
.gitignore vendored
View file

@ -15,6 +15,7 @@ perl/Makefile.config
/doc/manual/*.1 /doc/manual/*.1
/doc/manual/*.5 /doc/manual/*.5
/doc/manual/*.8 /doc/manual/*.8
/doc/manual/generated/*
/doc/manual/nix.json /doc/manual/nix.json
/doc/manual/conf-file.json /doc/manual/conf-file.json
/doc/manual/builtins.json /doc/manual/builtins.json
@ -56,9 +57,6 @@ perl/Makefile.config
/src/nix-prefetch-url/nix-prefetch-url /src/nix-prefetch-url/nix-prefetch-url
# /src/nix-daemon/
/src/nix-daemon/nix-daemon
/src/nix-collect-garbage/nix-collect-garbage /src/nix-collect-garbage/nix-collect-garbage
# /src/nix-channel/ # /src/nix-channel/
@ -76,12 +74,12 @@ perl/Makefile.config
# /tests/ # /tests/
/tests/test-tmp /tests/test-tmp
/tests/common.sh /tests/common.sh
/tests/dummy
/tests/result* /tests/result*
/tests/restricted-innocent /tests/restricted-innocent
/tests/shell /tests/shell
/tests/shell.drv /tests/shell.drv
/tests/config.nix /tests/config.nix
/tests/ca/config.nix
# /tests/lang/ # /tests/lang/
/tests/lang/*.out /tests/lang/*.out

View file

@ -12,6 +12,8 @@ makefiles = \
src/resolve-system-dependencies/local.mk \ src/resolve-system-dependencies/local.mk \
scripts/local.mk \ scripts/local.mk \
misc/bash/local.mk \ misc/bash/local.mk \
misc/fish/local.mk \
misc/zsh/local.mk \
misc/systemd/local.mk \ misc/systemd/local.mk \
misc/launchd/local.mk \ misc/launchd/local.mk \
misc/upstart/local.mk \ misc/upstart/local.mk \
@ -31,4 +33,4 @@ endif
include mk/lib.mk include mk/lib.mk
GLOBAL_CXXFLAGS += -g -Wall -include config.h -std=c++17 GLOBAL_CXXFLAGS += -g -Wall -include config.h -std=c++17 -I src

View file

@ -1,3 +1,4 @@
HOST_OS = @host_os@
AR = @AR@ AR = @AR@
BDW_GC_LIBS = @BDW_GC_LIBS@ BDW_GC_LIBS = @BDW_GC_LIBS@
BOOST_LDFLAGS = @BOOST_LDFLAGS@ BOOST_LDFLAGS = @BOOST_LDFLAGS@
@ -15,7 +16,6 @@ LDFLAGS = @LDFLAGS@
LIBARCHIVE_LIBS = @LIBARCHIVE_LIBS@ LIBARCHIVE_LIBS = @LIBARCHIVE_LIBS@
LIBBROTLI_LIBS = @LIBBROTLI_LIBS@ LIBBROTLI_LIBS = @LIBBROTLI_LIBS@
LIBCURL_LIBS = @LIBCURL_LIBS@ LIBCURL_LIBS = @LIBCURL_LIBS@
LIBLZMA_LIBS = @LIBLZMA_LIBS@
OPENSSL_LIBS = @OPENSSL_LIBS@ OPENSSL_LIBS = @OPENSSL_LIBS@
LIBSECCOMP_LIBS = @LIBSECCOMP_LIBS@ LIBSECCOMP_LIBS = @LIBSECCOMP_LIBS@
PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_NAME = @PACKAGE_NAME@

View file

@ -28,7 +28,8 @@ build nix from source with nix-build or how to get a development environment.
- [Nix manual](https://nixos.org/nix/manual) - [Nix manual](https://nixos.org/nix/manual)
- [Nix jobsets on hydra.nixos.org](https://hydra.nixos.org/project/nix) - [Nix jobsets on hydra.nixos.org](https://hydra.nixos.org/project/nix)
- [NixOS Discourse](https://discourse.nixos.org/) - [NixOS Discourse](https://discourse.nixos.org/)
- [IRC - #nixos on freenode.net](irc://irc.freenode.net/#nixos) - [Matrix - #nix:nixos.org](https://matrix.to/#/#nix:nixos.org)
- [IRC - #nixos on libera.chat](irc://irc.libera.chat/#nixos)
## License ## License

View file

@ -0,0 +1,42 @@
diff --git a/pthread_stop_world.c b/pthread_stop_world.c
index 1cee6a0b..46c3acd9 100644
--- a/pthread_stop_world.c
+++ b/pthread_stop_world.c
@@ -674,6 +674,8 @@ GC_INNER void GC_push_all_stacks(void)
struct GC_traced_stack_sect_s *traced_stack_sect;
pthread_t self = pthread_self();
word total_size = 0;
+ size_t stack_limit;
+ pthread_attr_t pattr;
if (!EXPECT(GC_thr_initialized, TRUE))
GC_thr_init();
@@ -723,6 +725,28 @@ GC_INNER void GC_push_all_stacks(void)
hi = p->altstack + p->altstack_size;
/* FIXME: Need to scan the normal stack too, but how ? */
/* FIXME: Assume stack grows down */
+ } else {
+ if (pthread_getattr_np(p->id, &pattr)) {
+ ABORT("GC_push_all_stacks: pthread_getattr_np failed!");
+ }
+ if (pthread_attr_getstacksize(&pattr, &stack_limit)) {
+ ABORT("GC_push_all_stacks: pthread_attr_getstacksize failed!");
+ }
+ // When a thread goes into a coroutine, we lose its original sp until
+ // control flow returns to the thread.
+ // While in the coroutine, the sp points outside the thread stack,
+ // so we can detect this and push the entire thread stack instead,
+ // as an approximation.
+ // We assume that the coroutine has similarly added its entire stack.
+ // This could be made accurate by cooperating with the application
+ // via new functions and/or callbacks.
+ #ifndef STACK_GROWS_UP
+ if (lo >= hi || lo < hi - stack_limit) { // sp outside stack
+ lo = hi - stack_limit;
+ }
+ #else
+ #error "STACK_GROWS_UP not supported in boost_coroutine2 (as of june 2021), so we don't support it in Nix."
+ #endif
}
GC_push_all_stack_sections(lo, hi, traced_stack_sect);
# ifdef STACK_GROWS_UP

20
config/config.guess vendored
View file

@ -1,8 +1,8 @@
#! /bin/sh #! /bin/sh
# Attempt to guess a canonical system name. # Attempt to guess a canonical system name.
# Copyright 1992-2020 Free Software Foundation, Inc. # Copyright 1992-2021 Free Software Foundation, Inc.
timestamp='2020-11-19' timestamp='2021-01-25'
# This file is free software; you can redistribute it and/or modify it # This file is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by # under the terms of the GNU General Public License as published by
@ -50,7 +50,7 @@ version="\
GNU config.guess ($timestamp) GNU config.guess ($timestamp)
Originally written by Per Bothner. Originally written by Per Bothner.
Copyright 1992-2020 Free Software Foundation, Inc. Copyright 1992-2021 Free Software Foundation, Inc.
This is free software; see the source for copying conditions. There is NO This is free software; see the source for copying conditions. There is NO
warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE." warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE."
@ -188,10 +188,9 @@ case "$UNAME_MACHINE:$UNAME_SYSTEM:$UNAME_RELEASE:$UNAME_VERSION" in
# #
# Note: NetBSD doesn't particularly care about the vendor # Note: NetBSD doesn't particularly care about the vendor
# portion of the name. We always set it to "unknown". # portion of the name. We always set it to "unknown".
sysctl="sysctl -n hw.machine_arch"
UNAME_MACHINE_ARCH=$( (uname -p 2>/dev/null || \ UNAME_MACHINE_ARCH=$( (uname -p 2>/dev/null || \
"/sbin/$sysctl" 2>/dev/null || \ /sbin/sysctl -n hw.machine_arch 2>/dev/null || \
"/usr/sbin/$sysctl" 2>/dev/null || \ /usr/sbin/sysctl -n hw.machine_arch 2>/dev/null || \
echo unknown)) echo unknown))
case "$UNAME_MACHINE_ARCH" in case "$UNAME_MACHINE_ARCH" in
aarch64eb) machine=aarch64_be-unknown ;; aarch64eb) machine=aarch64_be-unknown ;;
@ -996,6 +995,9 @@ EOF
k1om:Linux:*:*) k1om:Linux:*:*)
echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" echo "$UNAME_MACHINE"-unknown-linux-"$LIBC"
exit ;; exit ;;
loongarch32:Linux:*:* | loongarch64:Linux:*:* | loongarchx32:Linux:*:*)
echo "$UNAME_MACHINE"-unknown-linux-"$LIBC"
exit ;;
m32r*:Linux:*:*) m32r*:Linux:*:*)
echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" echo "$UNAME_MACHINE"-unknown-linux-"$LIBC"
exit ;; exit ;;
@ -1084,7 +1086,7 @@ EOF
ppcle:Linux:*:*) ppcle:Linux:*:*)
echo powerpcle-unknown-linux-"$LIBC" echo powerpcle-unknown-linux-"$LIBC"
exit ;; exit ;;
riscv32:Linux:*:* | riscv64:Linux:*:*) riscv32:Linux:*:* | riscv32be:Linux:*:* | riscv64:Linux:*:* | riscv64be:Linux:*:*)
echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" echo "$UNAME_MACHINE"-unknown-linux-"$LIBC"
exit ;; exit ;;
s390:Linux:*:* | s390x:Linux:*:*) s390:Linux:*:* | s390x:Linux:*:*)
@ -1480,8 +1482,8 @@ EOF
i*86:rdos:*:*) i*86:rdos:*:*)
echo "$UNAME_MACHINE"-pc-rdos echo "$UNAME_MACHINE"-pc-rdos
exit ;; exit ;;
i*86:AROS:*:*) *:AROS:*:*)
echo "$UNAME_MACHINE"-pc-aros echo "$UNAME_MACHINE"-unknown-aros
exit ;; exit ;;
x86_64:VMkernel:*:*) x86_64:VMkernel:*:*)
echo "$UNAME_MACHINE"-unknown-esx echo "$UNAME_MACHINE"-unknown-esx

20
config/config.sub vendored
View file

@ -1,8 +1,8 @@
#! /bin/sh #! /bin/sh
# Configuration validation subroutine script. # Configuration validation subroutine script.
# Copyright 1992-2020 Free Software Foundation, Inc. # Copyright 1992-2021 Free Software Foundation, Inc.
timestamp='2020-12-02' timestamp='2021-01-08'
# This file is free software; you can redistribute it and/or modify it # This file is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by # under the terms of the GNU General Public License as published by
@ -67,7 +67,7 @@ Report bugs and patches to <config-patches@gnu.org>."
version="\ version="\
GNU config.sub ($timestamp) GNU config.sub ($timestamp)
Copyright 1992-2020 Free Software Foundation, Inc. Copyright 1992-2021 Free Software Foundation, Inc.
This is free software; see the source for copying conditions. There is NO This is free software; see the source for copying conditions. There is NO
warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE." warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE."
@ -1185,6 +1185,7 @@ case $cpu-$vendor in
| k1om \ | k1om \
| le32 | le64 \ | le32 | le64 \
| lm32 \ | lm32 \
| loongarch32 | loongarch64 | loongarchx32 \
| m32c | m32r | m32rle \ | m32c | m32r | m32rle \
| m5200 | m68000 | m680[012346]0 | m68360 | m683?2 | m68k \ | m5200 | m68000 | m680[012346]0 | m68360 | m683?2 | m68k \
| m6811 | m68hc11 | m6812 | m68hc12 | m68hcs12x \ | m6811 | m68hc11 | m6812 | m68hc12 | m68hcs12x \
@ -1229,7 +1230,7 @@ case $cpu-$vendor in
| powerpc | powerpc64 | powerpc64le | powerpcle | powerpcspe \ | powerpc | powerpc64 | powerpc64le | powerpcle | powerpcspe \
| pru \ | pru \
| pyramid \ | pyramid \
| riscv | riscv32 | riscv64 \ | riscv | riscv32 | riscv32be | riscv64 | riscv64be \
| rl78 | romp | rs6000 | rx \ | rl78 | romp | rs6000 | rx \
| s390 | s390x \ | s390 | s390x \
| score \ | score \
@ -1682,11 +1683,14 @@ fi
# Now, validate our (potentially fixed-up) OS. # Now, validate our (potentially fixed-up) OS.
case $os in case $os in
# Sometimes we do "kernel-abi", so those need to count as OSes. # Sometimes we do "kernel-libc", so those need to count as OSes.
musl* | newlib* | uclibc*) musl* | newlib* | uclibc*)
;; ;;
# Likewise for "kernel-libc" # Likewise for "kernel-abi"
eabi | eabihf | gnueabi | gnueabihf) eabi* | gnueabi*)
;;
# VxWorks passes extra cpu info in the 4th filed.
simlinux | simwindows | spe)
;; ;;
# Now accept the basic system types. # Now accept the basic system types.
# The portable systems comes first. # The portable systems comes first.
@ -1750,6 +1754,8 @@ case $kernel-$os in
;; ;;
kfreebsd*-gnu* | kopensolaris*-gnu*) kfreebsd*-gnu* | kopensolaris*-gnu*)
;; ;;
vxworks-simlinux | vxworks-simwindows | vxworks-spe)
;;
nto-qnx*) nto-qnx*)
;; ;;
os2-emx) os2-emx)

View file

@ -1,4 +1,4 @@
AC_INIT(nix, m4_esyscmd([bash -c "echo -n $(cat ./.version)$VERSION_SUFFIX"])) AC_INIT([nix],[m4_esyscmd(bash -c "echo -n $(cat ./.version)$VERSION_SUFFIX")])
AC_CONFIG_MACRO_DIRS([m4]) AC_CONFIG_MACRO_DIRS([m4])
AC_CONFIG_SRCDIR(README.md) AC_CONFIG_SRCDIR(README.md)
AC_CONFIG_AUX_DIR(config) AC_CONFIG_AUX_DIR(config)
@ -9,8 +9,7 @@ AC_PROG_SED
AC_CANONICAL_HOST AC_CANONICAL_HOST
AC_MSG_CHECKING([for the canonical Nix system name]) AC_MSG_CHECKING([for the canonical Nix system name])
AC_ARG_WITH(system, AC_HELP_STRING([--with-system=SYSTEM], AC_ARG_WITH(system, AS_HELP_STRING([--with-system=SYSTEM],[Platform identifier (e.g., `i686-linux').]),
[Platform identifier (e.g., `i686-linux').]),
[system=$withval], [system=$withval],
[case "$host_cpu" in [case "$host_cpu" in
i*86) i*86)
@ -33,14 +32,6 @@ AC_ARG_WITH(system, AC_HELP_STRING([--with-system=SYSTEM],
system="$machine_name-`echo $host_os | "$SED" -e's/@<:@0-9.@:>@*$//g'`";; system="$machine_name-`echo $host_os | "$SED" -e's/@<:@0-9.@:>@*$//g'`";;
esac]) esac])
sys_name=$(uname -s | tr 'A-Z ' 'a-z_')
case $sys_name in
cygwin*)
sys_name=cygwin
;;
esac
AC_MSG_RESULT($system) AC_MSG_RESULT($system)
AC_SUBST(system) AC_SUBST(system)
AC_DEFINE_UNQUOTED(SYSTEM, ["$system"], [platform identifier ('cpu-os')]) AC_DEFINE_UNQUOTED(SYSTEM, ["$system"], [platform identifier ('cpu-os')])
@ -64,10 +55,12 @@ AC_SYS_LARGEFILE
# Solaris-specific stuff. # Solaris-specific stuff.
AC_STRUCT_DIRENT_D_TYPE AC_STRUCT_DIRENT_D_TYPE
if test "$sys_name" = sunos; then case "$host_os" in
solaris*)
# Solaris requires -lsocket -lnsl for network functions # Solaris requires -lsocket -lnsl for network functions
LIBS="-lsocket -lnsl $LIBS" LDFLAGS="-lsocket -lnsl $LDFLAGS"
fi ;;
esac
# Check for pubsetbuf. # Check for pubsetbuf.
@ -127,8 +120,7 @@ NEED_PROG(jq, jq)
AC_SUBST(coreutils, [$(dirname $(type -p cat))]) AC_SUBST(coreutils, [$(dirname $(type -p cat))])
AC_ARG_WITH(store-dir, AC_HELP_STRING([--with-store-dir=PATH], AC_ARG_WITH(store-dir, AS_HELP_STRING([--with-store-dir=PATH],[path of the Nix store (defaults to /nix/store)]),
[path of the Nix store (defaults to /nix/store)]),
storedir=$withval, storedir='/nix/store') storedir=$withval, storedir='/nix/store')
AC_SUBST(storedir) AC_SUBST(storedir)
@ -152,13 +144,12 @@ int main() {
}]])], GCC_ATOMIC_BUILTINS_NEED_LIBATOMIC=no, GCC_ATOMIC_BUILTINS_NEED_LIBATOMIC=yes) }]])], GCC_ATOMIC_BUILTINS_NEED_LIBATOMIC=no, GCC_ATOMIC_BUILTINS_NEED_LIBATOMIC=yes)
AC_MSG_RESULT($GCC_ATOMIC_BUILTINS_NEED_LIBATOMIC) AC_MSG_RESULT($GCC_ATOMIC_BUILTINS_NEED_LIBATOMIC)
if test "x$GCC_ATOMIC_BUILTINS_NEED_LIBATOMIC" = xyes; then if test "x$GCC_ATOMIC_BUILTINS_NEED_LIBATOMIC" = xyes; then
LIBS="-latomic $LIBS" LDFLAGS="-latomic $LDFLAGS"
fi fi
PKG_PROG_PKG_CONFIG PKG_PROG_PKG_CONFIG
AC_ARG_ENABLE(shared, AC_HELP_STRING([--enable-shared], AC_ARG_ENABLE(shared, AS_HELP_STRING([--enable-shared],[Build shared libraries for Nix [default=yes]]),
[Build shared libraries for Nix [default=yes]]),
shared=$enableval, shared=yes) shared=$enableval, shared=yes)
if test "$shared" = yes; then if test "$shared" = yes; then
AC_SUBST(BUILD_SHARED_LIBS, 1, [Whether to build shared libraries.]) AC_SUBST(BUILD_SHARED_LIBS, 1, [Whether to build shared libraries.])
@ -172,11 +163,6 @@ fi
PKG_CHECK_MODULES([OPENSSL], [libcrypto], [CXXFLAGS="$OPENSSL_CFLAGS $CXXFLAGS"]) PKG_CHECK_MODULES([OPENSSL], [libcrypto], [CXXFLAGS="$OPENSSL_CFLAGS $CXXFLAGS"])
# Look for libbz2, a required dependency.
AC_CHECK_LIB([bz2], [BZ2_bzWriteOpen], [true],
[AC_MSG_ERROR([Nix requires libbz2, which is part of bzip2. See https://sourceware.org/bzip2/.])])
AC_CHECK_HEADERS([bzlib.h], [true],
[AC_MSG_ERROR([Nix requires libbz2, which is part of bzip2. See https://sourceware.org/bzip2/.])])
# Checks for libarchive # Checks for libarchive
PKG_CHECK_MODULES([LIBARCHIVE], [libarchive >= 3.1.2], [CXXFLAGS="$LIBARCHIVE_CFLAGS $CXXFLAGS"]) PKG_CHECK_MODULES([LIBARCHIVE], [libarchive >= 3.1.2], [CXXFLAGS="$LIBARCHIVE_CFLAGS $CXXFLAGS"])
# Workaround until https://github.com/libarchive/libarchive/issues/1446 is fixed # Workaround until https://github.com/libarchive/libarchive/issues/1446 is fixed
@ -205,16 +191,6 @@ PKG_CHECK_MODULES([EDITLINE], [libeditline], [CXXFLAGS="$EDITLINE_CFLAGS $CXXFLA
# Look for libsodium, an optional dependency. # Look for libsodium, an optional dependency.
PKG_CHECK_MODULES([SODIUM], [libsodium], [CXXFLAGS="$SODIUM_CFLAGS $CXXFLAGS"]) PKG_CHECK_MODULES([SODIUM], [libsodium], [CXXFLAGS="$SODIUM_CFLAGS $CXXFLAGS"])
# Look for liblzma, a required dependency.
PKG_CHECK_MODULES([LIBLZMA], [liblzma], [CXXFLAGS="$LIBLZMA_CFLAGS $CXXFLAGS"])
AC_CHECK_LIB([lzma], [lzma_stream_encoder_mt],
[AC_DEFINE([HAVE_LZMA_MT], [1], [xz multithreaded compression support])])
# Look for zlib, a required dependency.
PKG_CHECK_MODULES([ZLIB], [zlib], [CXXFLAGS="$ZLIB_CFLAGS $CXXFLAGS"])
AC_CHECK_HEADER([zlib.h],[:],[AC_MSG_ERROR([could not find the zlib.h header])])
LDFLAGS="-lz $LDFLAGS"
# Look for libbrotli{enc,dec}. # Look for libbrotli{enc,dec}.
PKG_CHECK_MODULES([LIBBROTLI], [libbrotlienc libbrotlidec], [CXXFLAGS="$LIBBROTLI_CFLAGS $CXXFLAGS"]) PKG_CHECK_MODULES([LIBBROTLI], [libbrotlienc libbrotlidec], [CXXFLAGS="$LIBBROTLI_CFLAGS $CXXFLAGS"])
@ -228,30 +204,32 @@ AC_SUBST(HAVE_LIBCPUID, [$have_libcpuid])
# Look for libseccomp, required for Linux sandboxing. # Look for libseccomp, required for Linux sandboxing.
if test "$sys_name" = linux; then case "$host_os" in
AC_ARG_ENABLE([seccomp-sandboxing], linux*)
AC_HELP_STRING([--disable-seccomp-sandboxing], AC_ARG_ENABLE([seccomp-sandboxing],
[Don't build support for seccomp sandboxing (only recommended if your arch doesn't support libseccomp yet!)] AS_HELP_STRING([--disable-seccomp-sandboxing],[Don't build support for seccomp sandboxing (only recommended if your arch doesn't support libseccomp yet!)
)) ]))
if test "x$enable_seccomp_sandboxing" != "xno"; then if test "x$enable_seccomp_sandboxing" != "xno"; then
PKG_CHECK_MODULES([LIBSECCOMP], [libseccomp], PKG_CHECK_MODULES([LIBSECCOMP], [libseccomp],
[CXXFLAGS="$LIBSECCOMP_CFLAGS $CXXFLAGS"]) [CXXFLAGS="$LIBSECCOMP_CFLAGS $CXXFLAGS"])
have_seccomp=1 have_seccomp=1
AC_DEFINE([HAVE_SECCOMP], [1], [Whether seccomp is available and should be used for sandboxing.]) AC_DEFINE([HAVE_SECCOMP], [1], [Whether seccomp is available and should be used for sandboxing.])
else else
have_seccomp=
fi
;;
*)
have_seccomp= have_seccomp=
fi ;;
else esac
have_seccomp=
fi
AC_SUBST(HAVE_SECCOMP, [$have_seccomp]) AC_SUBST(HAVE_SECCOMP, [$have_seccomp])
# Look for aws-cpp-sdk-s3. # Look for aws-cpp-sdk-s3.
AC_LANG_PUSH(C++) AC_LANG_PUSH(C++)
AC_CHECK_HEADERS([aws/s3/S3Client.h], AC_CHECK_HEADERS([aws/s3/S3Client.h],
[AC_DEFINE([ENABLE_S3], [1], [Whether to enable S3 support via aws-sdk-cpp.]) [AC_DEFINE([ENABLE_S3], [1], [Whether to enable S3 support via aws-sdk-cpp.]) enable_s3=1],
enable_s3=1], [enable_s3=]) [AC_DEFINE([ENABLE_S3], [0], [Whether to enable S3 support via aws-sdk-cpp.]) enable_s3=])
AC_SUBST(ENABLE_S3, [$enable_s3]) AC_SUBST(ENABLE_S3, [$enable_s3])
AC_LANG_POP(C++) AC_LANG_POP(C++)
@ -264,8 +242,7 @@ fi
# Whether to use the Boehm garbage collector. # Whether to use the Boehm garbage collector.
AC_ARG_ENABLE(gc, AC_HELP_STRING([--enable-gc], AC_ARG_ENABLE(gc, AS_HELP_STRING([--enable-gc],[enable garbage collection in the Nix expression evaluator (requires Boehm GC) [default=yes]]),
[enable garbage collection in the Nix expression evaluator (requires Boehm GC) [default=yes]]),
gc=$enableval, gc=yes) gc=$enableval, gc=yes)
if test "$gc" = yes; then if test "$gc" = yes; then
PKG_CHECK_MODULES([BDW_GC], [bdw-gc]) PKG_CHECK_MODULES([BDW_GC], [bdw-gc])
@ -279,11 +256,12 @@ PKG_CHECK_MODULES([GTEST], [gtest_main])
# documentation generation switch # documentation generation switch
AC_ARG_ENABLE(doc-gen, AC_HELP_STRING([--disable-doc-gen], AC_ARG_ENABLE(doc-gen, AS_HELP_STRING([--disable-doc-gen],[disable documentation generation]),
[disable documentation generation]),
doc_generate=$enableval, doc_generate=yes) doc_generate=$enableval, doc_generate=yes)
AC_SUBST(doc_generate) AC_SUBST(doc_generate)
# Look for lowdown library.
PKG_CHECK_MODULES([LOWDOWN], [lowdown >= 0.8.0], [CXXFLAGS="$LOWDOWN_CFLAGS $CXXFLAGS"])
# Setuid installations. # Setuid installations.
AC_CHECK_FUNCS([setresuid setreuid lchown]) AC_CHECK_FUNCS([setresuid setreuid lchown])
@ -295,24 +273,14 @@ AC_CHECK_FUNCS([strsignal posix_fallocate sysconf])
# This is needed if bzip2 is a static library, and the Nix libraries # This is needed if bzip2 is a static library, and the Nix libraries
# are dynamic. # are dynamic.
if test "$(uname)" = "Darwin"; then case "${host_os}" in
darwin*)
LDFLAGS="-all_load $LDFLAGS" LDFLAGS="-all_load $LDFLAGS"
fi ;;
esac
# Do we have GNU tar? AC_ARG_WITH(sandbox-shell, AS_HELP_STRING([--with-sandbox-shell=PATH],[path of a statically-linked shell to use as /bin/sh in sandboxes]),
AC_MSG_CHECKING([if you have a recent GNU tar])
if $tar --version 2> /dev/null | grep -q GNU && tar cvf /dev/null --warning=no-timestamp ./config.log > /dev/null; then
AC_MSG_RESULT(yes)
tarFlags="--warning=no-timestamp"
else
AC_MSG_RESULT(no)
fi
AC_SUBST(tarFlags)
AC_ARG_WITH(sandbox-shell, AC_HELP_STRING([--with-sandbox-shell=PATH],
[path of a statically-linked shell to use as /bin/sh in sandboxes]),
sandbox_shell=$withval) sandbox_shell=$withval)
AC_SUBST(sandbox_shell) AC_SUBST(sandbox_shell)
@ -327,6 +295,6 @@ done
rm -f Makefile.config rm -f Makefile.config
AC_CONFIG_HEADER([config.h]) AC_CONFIG_HEADERS([config.h])
AC_CONFIG_FILES([]) AC_CONFIG_FILES([])
AC_OUTPUT AC_OUTPUT

View file

@ -6,9 +6,11 @@ builtins:
concatStrings (map concatStrings (map
(name: (name:
let builtin = builtins.${name}; in let builtin = builtins.${name}; in
" - `builtins.${name}` " + concatStringsSep " " (map (s: "*${s}*") builtin.args) "<dt><code>${name} "
+ " \n\n" + concatStringsSep " " (map (s: "<var>${s}</var>") builtin.args)
+ concatStrings (map (s: " ${s}\n") (splitLines builtin.doc)) + "\n\n" + "</code></dt>"
+ "<dd>\n\n"
+ builtin.doc
+ "\n\n</dd>"
) )
(attrNames builtins)) (attrNames builtins))

View file

@ -89,7 +89,7 @@ let
in in
let let
manpages = processCommand { filename = "nix"; command = "nix"; def = command; }; manpages = processCommand { filename = "nix"; command = "nix"; def = builtins.fromJSON command; };
summary = concatStrings (map (manpage: " - [${manpage.command}](command-ref/new-cli/${manpage.name})\n") manpages); summary = concatStrings (map (manpage: " - [${manpage.command}](command-ref/new-cli/${manpage.name})\n") manpages);
in in
(listToAttrs manpages) // { "SUMMARY.md" = summary; } (listToAttrs manpages) // { "SUMMARY.md" = summary; }

View file

@ -1,7 +1,5 @@
ifeq ($(doc_generate),yes) ifeq ($(doc_generate),yes)
MANUAL_SRCS := $(call rwildcard, $(d)/src, *.md)
# Generate man pages. # Generate man pages.
man-pages := $(foreach n, \ man-pages := $(foreach n, \
nix-env.1 nix-build.1 nix-shell.1 nix-store.1 nix-instantiate.1 \ nix-env.1 nix-build.1 nix-shell.1 nix-store.1 nix-instantiate.1 \
@ -25,19 +23,19 @@ nix-eval = $(dummy-env) $(bindir)/nix eval --experimental-features nix-command -
$(d)/%.1: $(d)/src/command-ref/%.md $(d)/%.1: $(d)/src/command-ref/%.md
@printf "Title: %s\n\n" "$$(basename $@ .1)" > $^.tmp @printf "Title: %s\n\n" "$$(basename $@ .1)" > $^.tmp
@cat $^ >> $^.tmp @cat $^ >> $^.tmp
$(trace-gen) lowdown -sT man $^.tmp -o $@ $(trace-gen) lowdown -sT man -M section=1 $^.tmp -o $@
@rm $^.tmp @rm $^.tmp
$(d)/%.8: $(d)/src/command-ref/%.md $(d)/%.8: $(d)/src/command-ref/%.md
@printf "Title: %s\n\n" "$$(basename $@ .8)" > $^.tmp @printf "Title: %s\n\n" "$$(basename $@ .8)" > $^.tmp
@cat $^ >> $^.tmp @cat $^ >> $^.tmp
$(trace-gen) lowdown -sT man $^.tmp -o $@ $(trace-gen) lowdown -sT man -M section=8 $^.tmp -o $@
@rm $^.tmp @rm $^.tmp
$(d)/nix.conf.5: $(d)/src/command-ref/conf-file.md $(d)/nix.conf.5: $(d)/src/command-ref/conf-file.md
@printf "Title: %s\n\n" "$$(basename $@ .5)" > $^.tmp @printf "Title: %s\n\n" "$$(basename $@ .5)" > $^.tmp
@cat $^ >> $^.tmp @cat $^ >> $^.tmp
$(trace-gen) lowdown -sT man $^.tmp -o $@ $(trace-gen) lowdown -sT man -M section=5 $^.tmp -o $@
@rm $^.tmp @rm $^.tmp
$(d)/src/SUMMARY.md: $(d)/src/SUMMARY.md.in $(d)/src/command-ref/new-cli $(d)/src/SUMMARY.md: $(d)/src/SUMMARY.md.in $(d)/src/command-ref/new-cli
@ -46,7 +44,7 @@ $(d)/src/SUMMARY.md: $(d)/src/SUMMARY.md.in $(d)/src/command-ref/new-cli
$(d)/src/command-ref/new-cli: $(d)/nix.json $(d)/generate-manpage.nix $(bindir)/nix $(d)/src/command-ref/new-cli: $(d)/nix.json $(d)/generate-manpage.nix $(bindir)/nix
@rm -rf $@ @rm -rf $@
$(trace-gen) $(nix-eval) --write-to $@ --expr 'import doc/manual/generate-manpage.nix (builtins.fromJSON (builtins.readFile $<))' $(trace-gen) $(nix-eval) --write-to $@ --expr 'import doc/manual/generate-manpage.nix (builtins.readFile $<)'
$(d)/src/command-ref/conf-file.md: $(d)/conf-file.json $(d)/generate-options.nix $(d)/src/command-ref/conf-file-prefix.md $(bindir)/nix $(d)/src/command-ref/conf-file.md: $(d)/conf-file.json $(d)/generate-options.nix $(d)/src/command-ref/conf-file-prefix.md $(bindir)/nix
@cat doc/manual/src/command-ref/conf-file-prefix.md > $@.tmp @cat doc/manual/src/command-ref/conf-file-prefix.md > $@.tmp
@ -64,6 +62,7 @@ $(d)/conf-file.json: $(bindir)/nix
$(d)/src/expressions/builtins.md: $(d)/builtins.json $(d)/generate-builtins.nix $(d)/src/expressions/builtins-prefix.md $(bindir)/nix $(d)/src/expressions/builtins.md: $(d)/builtins.json $(d)/generate-builtins.nix $(d)/src/expressions/builtins-prefix.md $(bindir)/nix
@cat doc/manual/src/expressions/builtins-prefix.md > $@.tmp @cat doc/manual/src/expressions/builtins-prefix.md > $@.tmp
$(trace-gen) $(nix-eval) --expr 'import doc/manual/generate-builtins.nix (builtins.fromJSON (builtins.readFile $<))' >> $@.tmp $(trace-gen) $(nix-eval) --expr 'import doc/manual/generate-builtins.nix (builtins.fromJSON (builtins.readFile $<))' >> $@.tmp
@cat doc/manual/src/expressions/builtins-suffix.md >> $@.tmp
@mv $@.tmp $@ @mv $@.tmp $@
$(d)/builtins.json: $(bindir)/nix $(d)/builtins.json: $(bindir)/nix
@ -74,17 +73,28 @@ $(d)/builtins.json: $(bindir)/nix
install: $(docdir)/manual/index.html install: $(docdir)/manual/index.html
# Generate 'nix' manpages. # Generate 'nix' manpages.
install: $(d)/src/command-ref/new-cli install: $(mandir)/man1/nix3-manpages
man: doc/manual/generated/man1/nix3-manpages
all: doc/manual/generated/man1/nix3-manpages
$(mandir)/man1/nix3-manpages: doc/manual/generated/man1/nix3-manpages
@mkdir -p $(DESTDIR)$$(dirname $@)
$(trace-install) install -m 0644 $$(dirname $<)/* $(DESTDIR)$$(dirname $@)
doc/manual/generated/man1/nix3-manpages: $(d)/src/command-ref/new-cli
@mkdir -p $(DESTDIR)$$(dirname $@)
$(trace-gen) for i in doc/manual/src/command-ref/new-cli/*.md; do \ $(trace-gen) for i in doc/manual/src/command-ref/new-cli/*.md; do \
name=$$(basename $$i .md); \ name=$$(basename $$i .md); \
tmpFile=$$(mktemp); \
if [[ $$name = SUMMARY ]]; then continue; fi; \ if [[ $$name = SUMMARY ]]; then continue; fi; \
printf "Title: %s\n\n" "$$name" > $$i.tmp; \ printf "Title: %s\n\n" "$$name" > $$tmpFile; \
cat $$i >> $$i.tmp; \ cat $$i >> $$tmpFile; \
lowdown -sT man $$i.tmp -o $(mandir)/man1/$$name.1; \ lowdown -sT man -M section=1 $$tmpFile -o $(DESTDIR)$$(dirname $@)/$$name.1; \
rm $$tmpFile; \
done done
@touch $@
$(docdir)/manual/index.html: $(MANUAL_SRCS) $(d)/book.toml $(d)/custom.css $(d)/src/SUMMARY.md $(d)/src/command-ref/new-cli $(d)/src/command-ref/conf-file.md $(d)/src/expressions/builtins.md $(docdir)/manual/index.html: $(MANUAL_SRCS) $(d)/book.toml $(d)/custom.css $(d)/src/SUMMARY.md $(d)/src/command-ref/new-cli $(d)/src/command-ref/conf-file.md $(d)/src/expressions/builtins.md $(call rwildcard, $(d)/src, *.md)
$(trace-gen) RUST_LOG=warn mdbook build doc/manual -d $(docdir)/manual $(trace-gen) RUST_LOG=warn mdbook build doc/manual -d $(DESTDIR)$(docdir)/manual
@cp doc/manual/highlight.pack.js $(docdir)/manual/highlight.js
endif endif

View file

@ -70,6 +70,7 @@
- [Hacking](contributing/hacking.md) - [Hacking](contributing/hacking.md)
- [CLI guideline](contributing/cli-guideline.md) - [CLI guideline](contributing/cli-guideline.md)
- [Release Notes](release-notes/release-notes.md) - [Release Notes](release-notes/release-notes.md)
- [Release 2.4 (2021-XX-XX)](release-notes/rl-2.4.md)
- [Release 2.3 (2019-09-04)](release-notes/rl-2.3.md) - [Release 2.3 (2019-09-04)](release-notes/rl-2.3.md)
- [Release 2.2 (2019-01-11)](release-notes/rl-2.2.md) - [Release 2.2 (2019-01-11)](release-notes/rl-2.2.md)
- [Release 2.1 (2018-09-02)](release-notes/rl-2.1.md) - [Release 2.1 (2018-09-02)](release-notes/rl-2.1.md)

View file

@ -4,13 +4,13 @@ Nix has two relevant settings with regards to how your CPU cores will
be utilized: `cores` and `max-jobs`. This chapter will talk about what be utilized: `cores` and `max-jobs`. This chapter will talk about what
they are, how they interact, and their configuration trade-offs. they are, how they interact, and their configuration trade-offs.
- `max-jobs` - `max-jobs`\
Dictates how many separate derivations will be built at the same Dictates how many separate derivations will be built at the same
time. If you set this to zero, the local machine will do no time. If you set this to zero, the local machine will do no
builds. Nix will still substitute from binary caches, and build builds. Nix will still substitute from binary caches, and build
remotely if remote builders are configured. remotely if remote builders are configured.
- `cores` - `cores`\
Suggests how many cores each derivation should use. Similar to Suggests how many cores each derivation should use. Similar to
`make -j`. `make -j`.

View file

@ -2,45 +2,49 @@
Most Nix commands interpret the following environment variables: Most Nix commands interpret the following environment variables:
- `IN_NIX_SHELL` - `IN_NIX_SHELL`\
Indicator that tells if the current environment was set up by Indicator that tells if the current environment was set up by
`nix-shell`. Since Nix 2.0 the values are `"pure"` and `"impure"` `nix-shell`. Since Nix 2.0 the values are `"pure"` and `"impure"`
- `NIX_PATH` - `NIX_PATH`\
A colon-separated list of directories used to look up Nix A colon-separated list of directories used to look up Nix
expressions enclosed in angle brackets (i.e., `<path>`). For expressions enclosed in angle brackets (i.e., `<path>`). For
instance, the value instance, the value
/home/eelco/Dev:/etc/nixos /home/eelco/Dev:/etc/nixos
will cause Nix to look for paths relative to `/home/eelco/Dev` and will cause Nix to look for paths relative to `/home/eelco/Dev` and
`/etc/nixos`, in this order. It is also possible to match paths `/etc/nixos`, in this order. It is also possible to match paths
against a prefix. For example, the value against a prefix. For example, the value
nixpkgs=/home/eelco/Dev/nixpkgs-branch:/etc/nixos nixpkgs=/home/eelco/Dev/nixpkgs-branch:/etc/nixos
will cause Nix to search for `<nixpkgs/path>` in will cause Nix to search for `<nixpkgs/path>` in
`/home/eelco/Dev/nixpkgs-branch/path` and `/etc/nixos/nixpkgs/path`. `/home/eelco/Dev/nixpkgs-branch/path` and `/etc/nixos/nixpkgs/path`.
If a path in the Nix search path starts with `http://` or If a path in the Nix search path starts with `http://` or
`https://`, it is interpreted as the URL of a tarball that will be `https://`, it is interpreted as the URL of a tarball that will be
downloaded and unpacked to a temporary location. The tarball must downloaded and unpacked to a temporary location. The tarball must
consist of a single top-level directory. For example, setting consist of a single top-level directory. For example, setting
`NIX_PATH` to `NIX_PATH` to
nixpkgs=https://github.com/NixOS/nixpkgs/archive/nixos-15.09.tar.gz
tells Nix to download the latest revision in the Nixpkgs/NixOS 15.09
channel.
A following shorthand can be used to refer to the official channels:
nixpkgs=channel:nixos-15.09
The search path can be extended using the `-I` option, which takes
precedence over `NIX_PATH`.
- `NIX_IGNORE_SYMLINK_STORE` nixpkgs=https://github.com/NixOS/nixpkgs/archive/master.tar.gz
tells Nix to download and use the current contents of the
`master` branch in the `nixpkgs` repository.
The URLs of the tarballs from the official nixos.org channels (see
[the manual for `nix-channel`](nix-channel.md)) can be abbreviated
as `channel:<channel-name>`. For instance, the following two
values of `NIX_PATH` are equivalent:
nixpkgs=channel:nixos-21.05
nixpkgs=https://nixos.org/channels/nixos-21.05/nixexprs.tar.xz
The Nix search path can also be extended using the `-I` option to
many Nix commands, which takes precedence over `NIX_PATH`.
- `NIX_IGNORE_SYMLINK_STORE`\
Normally, the Nix store directory (typically `/nix/store`) is not Normally, the Nix store directory (typically `/nix/store`) is not
allowed to contain any symlink components. This is to prevent allowed to contain any symlink components. This is to prevent
“impure” builds. Builders sometimes “canonicalise” paths by “impure” builds. Builders sometimes “canonicalise” paths by
@ -50,7 +54,7 @@ Most Nix commands interpret the following environment variables:
builds are deployed to machines where `/nix/store` resolves builds are deployed to machines where `/nix/store` resolves
differently. If you are sure that youre not going to do that, you differently. If you are sure that youre not going to do that, you
can set `NIX_IGNORE_SYMLINK_STORE` to `1`. can set `NIX_IGNORE_SYMLINK_STORE` to `1`.
Note that if youre symlinking the Nix store so that you can put it Note that if youre symlinking the Nix store so that you can put it
on another file system than the root file system, on Linux youre on another file system than the root file system, on Linux youre
better off using `bind` mount points, e.g., better off using `bind` mount points, e.g.,
@ -59,44 +63,44 @@ Most Nix commands interpret the following environment variables:
$ mkdir /nix $ mkdir /nix
$ mount -o bind /mnt/otherdisk/nix /nix $ mount -o bind /mnt/otherdisk/nix /nix
``` ```
Consult the mount 8 manual page for details. Consult the mount 8 manual page for details.
- `NIX_STORE_DIR` - `NIX_STORE_DIR`\
Overrides the location of the Nix store (default `prefix/store`). Overrides the location of the Nix store (default `prefix/store`).
- `NIX_DATA_DIR` - `NIX_DATA_DIR`\
Overrides the location of the Nix static data directory (default Overrides the location of the Nix static data directory (default
`prefix/share`). `prefix/share`).
- `NIX_LOG_DIR` - `NIX_LOG_DIR`\
Overrides the location of the Nix log directory (default Overrides the location of the Nix log directory (default
`prefix/var/log/nix`). `prefix/var/log/nix`).
- `NIX_STATE_DIR` - `NIX_STATE_DIR`\
Overrides the location of the Nix state directory (default Overrides the location of the Nix state directory (default
`prefix/var/nix`). `prefix/var/nix`).
- `NIX_CONF_DIR` - `NIX_CONF_DIR`\
Overrides the location of the system Nix configuration directory Overrides the location of the system Nix configuration directory
(default `prefix/etc/nix`). (default `prefix/etc/nix`).
- `NIX_CONFIG` - `NIX_CONFIG`\
Applies settings from Nix configuration from the environment. Applies settings from Nix configuration from the environment.
The content is treated as if it was read from a Nix configuration file. The content is treated as if it was read from a Nix configuration file.
Settings are separated by the newline character. Settings are separated by the newline character.
- `NIX_USER_CONF_FILES` - `NIX_USER_CONF_FILES`\
Overrides the location of the user Nix configuration files to load Overrides the location of the user Nix configuration files to load
from (defaults to the XDG spec locations). The variable is treated from (defaults to the XDG spec locations). The variable is treated
as a list separated by the `:` token. as a list separated by the `:` token.
- `TMPDIR` - `TMPDIR`\
Use the specified directory to store temporary files. In particular, Use the specified directory to store temporary files. In particular,
this includes temporary build directories; these can take up this includes temporary build directories; these can take up
substantial amounts of disk space. The default is `/tmp`. substantial amounts of disk space. The default is `/tmp`.
- `NIX_REMOTE` - `NIX_REMOTE`\
This variable should be set to `daemon` if you want to use the Nix This variable should be set to `daemon` if you want to use the Nix
daemon to execute Nix operations. This is necessary in [multi-user daemon to execute Nix operations. This is necessary in [multi-user
Nix installations](../installation/multi-user.md). If the Nix Nix installations](../installation/multi-user.md). If the Nix
@ -104,16 +108,16 @@ Most Nix commands interpret the following environment variables:
should be set to `unix://path/to/socket`. Otherwise, it should be should be set to `unix://path/to/socket`. Otherwise, it should be
left unset. left unset.
- `NIX_SHOW_STATS` - `NIX_SHOW_STATS`\
If set to `1`, Nix will print some evaluation statistics, such as If set to `1`, Nix will print some evaluation statistics, such as
the number of values allocated. the number of values allocated.
- `NIX_COUNT_CALLS` - `NIX_COUNT_CALLS`\
If set to `1`, Nix will print how often functions were called during If set to `1`, Nix will print how often functions were called during
Nix expression evaluation. This is useful for profiling your Nix Nix expression evaluation. This is useful for profiling your Nix
expressions. expressions.
- `GC_INITIAL_HEAP_SIZE` - `GC_INITIAL_HEAP_SIZE`\
If Nix has been configured to use the Boehm garbage collector, this If Nix has been configured to use the Boehm garbage collector, this
variable sets the initial size of the heap in bytes. It defaults to variable sets the initial size of the heap in bytes. It defaults to
384 MiB. Setting it to a low value reduces memory consumption, but 384 MiB. Setting it to a low value reduces memory consumption, but

View file

@ -47,16 +47,16 @@ All options not listed here are passed to `nix-store
--realise`, except for `--arg` and `--attr` / `-A` which are passed to --realise`, except for `--arg` and `--attr` / `-A` which are passed to
`nix-instantiate`. `nix-instantiate`.
- `--no-out-link` - `--no-out-link`\
Do not create a symlink to the output path. Note that as a result Do not create a symlink to the output path. Note that as a result
the output does not become a root of the garbage collector, and so the output does not become a root of the garbage collector, and so
might be deleted by `nix-store might be deleted by `nix-store
--gc`. --gc`.
- `--dry-run` - `--dry-run`\
Show what store paths would be built or downloaded. Show what store paths would be built or downloaded.
- `--out-link` / `-o` *outlink* - `--out-link` / `-o` *outlink*\
Change the name of the symlink to the output path created from Change the name of the symlink to the output path created from
`result` to *outlink*. `result` to *outlink*.

View file

@ -17,26 +17,26 @@ To see the list of official NixOS channels, visit
This command has the following operations: This command has the following operations:
- `--add` *url* \[*name*\] - `--add` *url* \[*name*\]\
Adds a channel named *name* with URL *url* to the list of subscribed Adds a channel named *name* with URL *url* to the list of subscribed
channels. If *name* is omitted, it defaults to the last component of channels. If *name* is omitted, it defaults to the last component of
*url*, with the suffixes `-stable` or `-unstable` removed. *url*, with the suffixes `-stable` or `-unstable` removed.
- `--remove` *name* - `--remove` *name*\
Removes the channel named *name* from the list of subscribed Removes the channel named *name* from the list of subscribed
channels. channels.
- `--list` - `--list`\
Prints the names and URLs of all subscribed channels on standard Prints the names and URLs of all subscribed channels on standard
output. output.
- `--update` \[*names*…\] - `--update` \[*names*…\]\
Downloads the Nix expressions of all subscribed channels (or only Downloads the Nix expressions of all subscribed channels (or only
those included in *names* if specified) and makes them the default those included in *names* if specified) and makes them the default
for `nix-env` operations (by symlinking them from the directory for `nix-env` operations (by symlinking them from the directory
`~/.nix-defexpr`). `~/.nix-defexpr`).
- `--rollback` \[*generation*\] - `--rollback` \[*generation*\]\
Reverts the previous call to `nix-channel Reverts the previous call to `nix-channel
--update`. Optionally, you can specify a specific channel generation --update`. Optionally, you can specify a specific channel generation
number to restore. number to restore.
@ -70,14 +70,14 @@ $ nix-instantiate --eval -E '(import <nixpkgs> {}).lib.version'
# Files # Files
- `/nix/var/nix/profiles/per-user/username/channels` - `/nix/var/nix/profiles/per-user/username/channels`\
`nix-channel` uses a `nix-env` profile to keep track of previous `nix-channel` uses a `nix-env` profile to keep track of previous
versions of the subscribed channels. Every time you run `nix-channel versions of the subscribed channels. Every time you run `nix-channel
--update`, a new channel generation (that is, a symlink to the --update`, a new channel generation (that is, a symlink to the
channel Nix expressions in the Nix store) is created. This enables channel Nix expressions in the Nix store) is created. This enables
`nix-channel --rollback` to revert to previous versions. `nix-channel --rollback` to revert to previous versions.
- `~/.nix-defexpr/channels` - `~/.nix-defexpr/channels`\
This is a symlink to This is a symlink to
`/nix/var/nix/profiles/per-user/username/channels`. It ensures that `/nix/var/nix/profiles/per-user/username/channels`. It ensures that
`nix-env` can find your channels. In a multi-user installation, you `nix-env` can find your channels. In a multi-user installation, you
@ -89,7 +89,7 @@ $ nix-instantiate --eval -E '(import <nixpkgs> {}).lib.version'
A channel URL should point to a directory containing the following A channel URL should point to a directory containing the following
files: files:
- `nixexprs.tar.xz` - `nixexprs.tar.xz`\
A tarball containing Nix expressions and files referenced by them A tarball containing Nix expressions and files referenced by them
(such as build scripts and patches). At the top level, the tarball (such as build scripts and patches). At the top level, the tarball
should contain a single directory. That directory must contain a should contain a single directory. That directory must contain a

View file

@ -35,21 +35,21 @@ and second to send the dump of those paths. If this bothers you, use
# Options # Options
- `--to` - `--to`\
Copy the closure of _paths_ from the local Nix store to the Nix Copy the closure of _paths_ from the local Nix store to the Nix
store on _machine_. This is the default. store on _machine_. This is the default.
- `--from` - `--from`\
Copy the closure of _paths_ from the Nix store on _machine_ to the Copy the closure of _paths_ from the Nix store on _machine_ to the
local Nix store. local Nix store.
- `--gzip` - `--gzip`\
Enable compression of the SSH connection. Enable compression of the SSH connection.
- `--include-outputs` - `--include-outputs`\
Also copy the outputs of store derivations included in the closure. Also copy the outputs of store derivations included in the closure.
- `--use-substitutes` / `-s` - `--use-substitutes` / `-s`\
Attempt to download missing paths on the target machine using Nixs Attempt to download missing paths on the target machine using Nixs
substitute mechanism. Any paths that cannot be substituted on the substitute mechanism. Any paths that cannot be substituted on the
target are still copied normally from the source. This is useful, target are still copied normally from the source. This is useful,
@ -58,12 +58,12 @@ and second to send the dump of those paths. If this bothers you, use
`nixos.org` (the default binary cache server) is `nixos.org` (the default binary cache server) is
fast. fast.
- `-v` - `-v`\
Show verbose output. Show verbose output.
# Environment variables # Environment variables
- `NIX_SSHOPTS` - `NIX_SSHOPTS`\
Additional options to be passed to `ssh` on the command Additional options to be passed to `ssh` on the command
line. line.

View file

@ -36,27 +36,27 @@ case-sensitive. The regular expression can optionally be followed by a
dash and a version number; if omitted, any version of the package will dash and a version number; if omitted, any version of the package will
match. Here are some examples: match. Here are some examples:
- `firefox` - `firefox`\
Matches the package name `firefox` and any version. Matches the package name `firefox` and any version.
- `firefox-32.0` - `firefox-32.0`\
Matches the package name `firefox` and version `32.0`. Matches the package name `firefox` and version `32.0`.
- `gtk\\+` - `gtk\\+`\
Matches the package name `gtk+`. The `+` character must be escaped Matches the package name `gtk+`. The `+` character must be escaped
using a backslash to prevent it from being interpreted as a using a backslash to prevent it from being interpreted as a
quantifier, and the backslash must be escaped in turn with another quantifier, and the backslash must be escaped in turn with another
backslash to ensure that the shell passes it on. backslash to ensure that the shell passes it on.
- `.\*` - `.\*`\
Matches any package name. This is the default for most commands. Matches any package name. This is the default for most commands.
- `'.*zip.*'` - `'.*zip.*'`\
Matches any package name containing the string `zip`. Note the dots: Matches any package name containing the string `zip`. Note the dots:
`'*zip*'` does not work, because in a regular expression, the `'*zip*'` does not work, because in a regular expression, the
character `*` is interpreted as a quantifier. character `*` is interpreted as a quantifier.
- `'.*(firefox|chromium).*'` - `'.*(firefox|chromium).*'`\
Matches any package name containing the strings `firefox` or Matches any package name containing the strings `firefox` or
`chromium`. `chromium`.
@ -66,7 +66,7 @@ This section lists the options that are common to all operations. These
options are allowed for every subcommand, though they may not always options are allowed for every subcommand, though they may not always
have an effect. have an effect.
- `--file` / `-f` *path* - `--file` / `-f` *path*\
Specifies the Nix expression (designated below as the *active Nix Specifies the Nix expression (designated below as the *active Nix
expression*) used by the `--install`, `--upgrade`, and `--query expression*) used by the `--install`, `--upgrade`, and `--query
--available` operations to obtain derivations. The default is --available` operations to obtain derivations. The default is
@ -77,13 +77,13 @@ have an effect.
unpacked to a temporary location. The tarball must include a single unpacked to a temporary location. The tarball must include a single
top-level directory containing at least a file named `default.nix`. top-level directory containing at least a file named `default.nix`.
- `--profile` / `-p` *path* - `--profile` / `-p` *path*\
Specifies the profile to be used by those operations that operate on Specifies the profile to be used by those operations that operate on
a profile (designated below as the *active profile*). A profile is a a profile (designated below as the *active profile*). A profile is a
sequence of user environments called *generations*, one of which is sequence of user environments called *generations*, one of which is
the *current generation*. the *current generation*.
- `--dry-run` - `--dry-run`\
For the `--install`, `--upgrade`, `--uninstall`, For the `--install`, `--upgrade`, `--uninstall`,
`--switch-generation`, `--delete-generations` and `--rollback` `--switch-generation`, `--delete-generations` and `--rollback`
operations, this flag will cause `nix-env` to print what *would* be operations, this flag will cause `nix-env` to print what *would* be
@ -93,7 +93,7 @@ have an effect.
[substituted](../glossary.md) (i.e., downloaded) and which paths [substituted](../glossary.md) (i.e., downloaded) and which paths
will be built from source (because no substitute is available). will be built from source (because no substitute is available).
- `--system-filter` *system* - `--system-filter` *system*\
By default, operations such as `--query By default, operations such as `--query
--available` show derivations matching any platform. This option --available` show derivations matching any platform. This option
allows you to use derivations for the specified platform *system*. allows you to use derivations for the specified platform *system*.
@ -102,7 +102,7 @@ have an effect.
# Files # Files
- `~/.nix-defexpr` - `~/.nix-defexpr`\
The source for the default Nix expressions used by the The source for the default Nix expressions used by the
`--install`, `--upgrade`, and `--query --available` operations to `--install`, `--upgrade`, and `--query --available` operations to
obtain derivations. The `--file` option may be used to override obtain derivations. The `--file` option may be used to override
@ -140,7 +140,7 @@ have an effect.
The command `nix-channel` places symlinks to the downloaded Nix The command `nix-channel` places symlinks to the downloaded Nix
expressions from each subscribed channel in this directory. expressions from each subscribed channel in this directory.
- `~/.nix-profile` - `~/.nix-profile`\
A symbolic link to the user's current profile. By default, this A symbolic link to the user's current profile. By default, this
symlink points to `prefix/var/nix/profiles/default`. The `PATH` symlink points to `prefix/var/nix/profiles/default`. The `PATH`
environment variable should include `~/.nix-profile/bin` for the environment variable should include `~/.nix-profile/bin` for the
@ -217,13 +217,13 @@ a number of possible ways:
## Flags ## Flags
- `--prebuilt-only` / `-b` - `--prebuilt-only` / `-b`\
Use only derivations for which a substitute is registered, i.e., Use only derivations for which a substitute is registered, i.e.,
there is a pre-built binary available that can be downloaded in lieu there is a pre-built binary available that can be downloaded in lieu
of building the derivation. Thus, no packages will be built from of building the derivation. Thus, no packages will be built from
source. source.
- `--preserve-installed`; `-P` - `--preserve-installed`; `-P`\
Do not remove derivations with a name matching one of the Do not remove derivations with a name matching one of the
derivations being installed. Usually, trying to have two versions of derivations being installed. Usually, trying to have two versions of
the same package installed in the same generation of a profile will the same package installed in the same generation of a profile will
@ -231,7 +231,7 @@ a number of possible ways:
clashes between the two versions. However, this is not the case for clashes between the two versions. However, this is not the case for
all packages. all packages.
- `--remove-all`; `-r` - `--remove-all`; `-r`\
Remove all previously installed packages first. This is equivalent Remove all previously installed packages first. This is equivalent
to running `nix-env -e '.*'` first, except that everything happens to running `nix-env -e '.*'` first, except that everything happens
in a single transaction. in a single transaction.
@ -346,24 +346,24 @@ version is installed.
## Flags ## Flags
- `--lt` - `--lt`\
Only upgrade a derivation to newer versions. This is the default. Only upgrade a derivation to newer versions. This is the default.
- `--leq` - `--leq`\
In addition to upgrading to newer versions, also “upgrade” to In addition to upgrading to newer versions, also “upgrade” to
derivations that have the same version. Version are not a unique derivations that have the same version. Version are not a unique
identification of a derivation, so there may be many derivations identification of a derivation, so there may be many derivations
that have the same version. This flag may be useful to force that have the same version. This flag may be useful to force
“synchronisation” between the installed and available derivations. “synchronisation” between the installed and available derivations.
- `--eq` - `--eq`\
*Only* “upgrade” to derivations that have the same version. This may *Only* “upgrade” to derivations that have the same version. This may
not seem very useful, but it actually is, e.g., when there is a new not seem very useful, but it actually is, e.g., when there is a new
release of Nixpkgs and you want to replace installed applications release of Nixpkgs and you want to replace installed applications
with the same versions built against newer dependencies (to reduce with the same versions built against newer dependencies (to reduce
the number of dependencies floating around on your system). the number of dependencies floating around on your system).
- `--always` - `--always`\
In addition to upgrading to newer versions, also “upgrade” to In addition to upgrading to newer versions, also “upgrade” to
derivations that have the same or a lower version. I.e., derivations derivations that have the same or a lower version. I.e., derivations
may actually be downgraded depending on what is available in the may actually be downgraded depending on what is available in the
@ -578,11 +578,11 @@ The derivations are sorted by their `name` attributes.
The following flags specify the set of things on which the query The following flags specify the set of things on which the query
operates. operates.
- `--installed` - `--installed`\
The query operates on the store paths that are installed in the The query operates on the store paths that are installed in the
current generation of the active profile. This is the default. current generation of the active profile. This is the default.
- `--available`; `-a` - `--available`; `-a`\
The query operates on the derivations that are available in the The query operates on the derivations that are available in the
active Nix expression. active Nix expression.
@ -593,24 +593,24 @@ selected derivations. Multiple flags may be specified, in which case the
information is shown in the order given here. Note that the name of the information is shown in the order given here. Note that the name of the
derivation is shown unless `--no-name` is specified. derivation is shown unless `--no-name` is specified.
- `--xml` - `--xml`\
Print the result in an XML representation suitable for automatic Print the result in an XML representation suitable for automatic
processing by other tools. The root element is called `items`, which processing by other tools. The root element is called `items`, which
contains a `item` element for each available or installed contains a `item` element for each available or installed
derivation. The fields discussed below are all stored in attributes derivation. The fields discussed below are all stored in attributes
of the `item` elements. of the `item` elements.
- `--json` - `--json`\
Print the result in a JSON representation suitable for automatic Print the result in a JSON representation suitable for automatic
processing by other tools. processing by other tools.
- `--prebuilt-only` / `-b` - `--prebuilt-only` / `-b`\
Show only derivations for which a substitute is registered, i.e., Show only derivations for which a substitute is registered, i.e.,
there is a pre-built binary available that can be downloaded in lieu there is a pre-built binary available that can be downloaded in lieu
of building the derivation. Thus, this shows all packages that of building the derivation. Thus, this shows all packages that
probably can be installed quickly. probably can be installed quickly.
- `--status`; `-s` - `--status`; `-s`\
Print the *status* of the derivation. The status consists of three Print the *status* of the derivation. The status consists of three
characters. The first is `I` or `-`, indicating whether the characters. The first is `I` or `-`, indicating whether the
derivation is currently installed in the current generation of the derivation is currently installed in the current generation of the
@ -621,49 +621,49 @@ derivation is shown unless `--no-name` is specified.
derivation to be built. The third is `S` or `-`, indicating whether derivation to be built. The third is `S` or `-`, indicating whether
a substitute is available for the derivation. a substitute is available for the derivation.
- `--attr-path`; `-P` - `--attr-path`; `-P`\
Print the *attribute path* of the derivation, which can be used to Print the *attribute path* of the derivation, which can be used to
unambiguously select it using the `--attr` option available in unambiguously select it using the `--attr` option available in
commands that install derivations like `nix-env --install`. This commands that install derivations like `nix-env --install`. This
option only works together with `--available` option only works together with `--available`
- `--no-name` - `--no-name`\
Suppress printing of the `name` attribute of each derivation. Suppress printing of the `name` attribute of each derivation.
- `--compare-versions` / `-c` - `--compare-versions` / `-c`\
Compare installed versions to available versions, or vice versa (if Compare installed versions to available versions, or vice versa (if
`--available` is given). This is useful for quickly seeing whether `--available` is given). This is useful for quickly seeing whether
upgrades for installed packages are available in a Nix expression. A upgrades for installed packages are available in a Nix expression. A
column is added with the following meaning: column is added with the following meaning:
- `<` *version* - `<` *version*\
A newer version of the package is available or installed. A newer version of the package is available or installed.
- `=` *version* - `=` *version*\
At most the same version of the package is available or At most the same version of the package is available or
installed. installed.
- `>` *version* - `>` *version*\
Only older versions of the package are available or installed. Only older versions of the package are available or installed.
- `- ?` - `- ?`\
No version of the package is available or installed. No version of the package is available or installed.
- `--system` - `--system`\
Print the `system` attribute of the derivation. Print the `system` attribute of the derivation.
- `--drv-path` - `--drv-path`\
Print the path of the store derivation. Print the path of the store derivation.
- `--out-path` - `--out-path`\
Print the output path of the derivation. Print the output path of the derivation.
- `--description` - `--description`\
Print a short (one-line) description of the derivation, if Print a short (one-line) description of the derivation, if
available. The description is taken from the `meta.description` available. The description is taken from the `meta.description`
attribute of the derivation. attribute of the derivation.
- `--meta` - `--meta`\
Print all of the meta-attributes of the derivation. This option is Print all of the meta-attributes of the derivation. This option is
only available with `--xml` or `--json`. only available with `--xml` or `--json`.
@ -874,7 +874,7 @@ error: no generation older than the current (91) exists
# Environment variables # Environment variables
- `NIX_PROFILE` - `NIX_PROFILE`\
Location of the Nix profile. Defaults to the target of the symlink Location of the Nix profile. Defaults to the target of the symlink
`~/.nix-profile`, if it exists, or `/nix/var/nix/profiles/default` `~/.nix-profile`, if it exists, or `/nix/var/nix/profiles/default`
otherwise. otherwise.

View file

@ -29,29 +29,29 @@ md5sum`.
# Options # Options
- `--flat` - `--flat`\
Print the cryptographic hash of the contents of each regular file Print the cryptographic hash of the contents of each regular file
*path*. That is, do not compute the hash over the dump of *path*. *path*. That is, do not compute the hash over the dump of *path*.
The result is identical to that produced by the GNU commands The result is identical to that produced by the GNU commands
`md5sum` and `sha1sum`. `md5sum` and `sha1sum`.
- `--base32` - `--base32`\
Print the hash in a base-32 representation rather than hexadecimal. Print the hash in a base-32 representation rather than hexadecimal.
This base-32 representation is more compact and can be used in Nix This base-32 representation is more compact and can be used in Nix
expressions (such as in calls to `fetchurl`). expressions (such as in calls to `fetchurl`).
- `--truncate` - `--truncate`\
Truncate hashes longer than 160 bits (such as SHA-256) to 160 bits. Truncate hashes longer than 160 bits (such as SHA-256) to 160 bits.
- `--type` *hashAlgo* - `--type` *hashAlgo*\
Use the specified cryptographic hash algorithm, which can be one of Use the specified cryptographic hash algorithm, which can be one of
`md5`, `sha1`, `sha256`, and `sha512`. `md5`, `sha1`, `sha256`, and `sha512`.
- `--to-base16` - `--to-base16`\
Dont hash anything, but convert the base-32 hash representation Dont hash anything, but convert the base-32 hash representation
*hash* to hexadecimal. *hash* to hexadecimal.
- `--to-base32` - `--to-base32`\
Dont hash anything, but convert the hexadecimal hash representation Dont hash anything, but convert the hexadecimal hash representation
*hash* to base-32. *hash* to base-32.

View file

@ -29,26 +29,26 @@ standard input.
# Options # Options
- `--add-root` *path* - `--add-root` *path*\
See the [corresponding option](nix-store.md) in `nix-store`. See the [corresponding option](nix-store.md) in `nix-store`.
- `--parse` - `--parse`\
Just parse the input files, and print their abstract syntax trees on Just parse the input files, and print their abstract syntax trees on
standard output in ATerm format. standard output in ATerm format.
- `--eval` - `--eval`\
Just parse and evaluate the input files, and print the resulting Just parse and evaluate the input files, and print the resulting
values on standard output. No instantiation of store derivations values on standard output. No instantiation of store derivations
takes place. takes place.
- `--find-file` - `--find-file`\
Look up the given files in Nixs search path (as specified by the Look up the given files in Nixs search path (as specified by the
`NIX_PATH` environment variable). If found, print the corresponding `NIX_PATH` environment variable). If found, print the corresponding
absolute paths on standard output. For instance, if `NIX_PATH` is absolute paths on standard output. For instance, if `NIX_PATH` is
`nixpkgs=/home/alice/nixpkgs`, then `nix-instantiate --find-file `nixpkgs=/home/alice/nixpkgs`, then `nix-instantiate --find-file
nixpkgs/default.nix` will print `/home/alice/nixpkgs/default.nix`. nixpkgs/default.nix` will print `/home/alice/nixpkgs/default.nix`.
- `--strict` - `--strict`\
When used with `--eval`, recursively evaluate list elements and When used with `--eval`, recursively evaluate list elements and
attributes. Normally, such sub-expressions are left unevaluated attributes. Normally, such sub-expressions are left unevaluated
(since the Nix expression language is lazy). (since the Nix expression language is lazy).
@ -58,17 +58,17 @@ standard input.
> This option can cause non-termination, because lazy data > This option can cause non-termination, because lazy data
> structures can be infinitely large. > structures can be infinitely large.
- `--json` - `--json`\
When used with `--eval`, print the resulting value as an JSON When used with `--eval`, print the resulting value as an JSON
representation of the abstract syntax tree rather than as an ATerm. representation of the abstract syntax tree rather than as an ATerm.
- `--xml` - `--xml`\
When used with `--eval`, print the resulting value as an XML When used with `--eval`, print the resulting value as an XML
representation of the abstract syntax tree rather than as an ATerm. representation of the abstract syntax tree rather than as an ATerm.
The schema is the same as that used by the [`toXML` The schema is the same as that used by the [`toXML`
built-in](../expressions/builtins.md). built-in](../expressions/builtins.md).
- `--read-write-mode` - `--read-write-mode`\
When used with `--eval`, perform evaluation in read/write mode so When used with `--eval`, perform evaluation in read/write mode so
nix language features that require it will still work (at the cost nix language features that require it will still work (at the cost
of needing to do instantiation of every evaluated derivation). If of needing to do instantiation of every evaluated derivation). If

View file

@ -37,22 +37,22 @@ Nix store is also printed.
# Options # Options
- `--type` *hashAlgo* - `--type` *hashAlgo*\
Use the specified cryptographic hash algorithm, which can be one of Use the specified cryptographic hash algorithm, which can be one of
`md5`, `sha1`, `sha256`, and `sha512`. `md5`, `sha1`, `sha256`, and `sha512`.
- `--print-path` - `--print-path`\
Print the store path of the downloaded file on standard output. Print the store path of the downloaded file on standard output.
- `--unpack` - `--unpack`\
Unpack the archive (which must be a tarball or zip file) and add the Unpack the archive (which must be a tarball or zip file) and add the
result to the Nix store. The resulting hash can be used with result to the Nix store. The resulting hash can be used with
functions such as Nixpkgss `fetchzip` or `fetchFromGitHub`. functions such as Nixpkgss `fetchzip` or `fetchFromGitHub`.
- `--executable` - `--executable`\
Set the executable bit on the downloaded file. Set the executable bit on the downloaded file.
- `--name` *name* - `--name` *name*\
Override the name of the file in the Nix store. By default, this is Override the name of the file in the Nix store. By default, this is
`hash-basename`, where *basename* is the last component of *url*. `hash-basename`, where *basename* is the last component of *url*.
Overriding the name is necessary when *basename* contains characters Overriding the name is necessary when *basename* contains characters

View file

@ -54,7 +54,7 @@ All options not listed here are passed to `nix-store
--realise`, except for `--arg` and `--attr` / `-A` which are passed to --realise`, except for `--arg` and `--attr` / `-A` which are passed to
`nix-instantiate`. `nix-instantiate`.
- `--command` *cmd* - `--command` *cmd*\
In the environment of the derivation, run the shell command *cmd*. In the environment of the derivation, run the shell command *cmd*.
This command is executed in an interactive shell. (Use `--run` to This command is executed in an interactive shell. (Use `--run` to
use a non-interactive shell instead.) However, a call to `exit` is use a non-interactive shell instead.) However, a call to `exit` is
@ -64,36 +64,34 @@ All options not listed here are passed to `nix-store
drop you into the interactive shell. This can be useful for doing drop you into the interactive shell. This can be useful for doing
any additional initialisation. any additional initialisation.
- `--run` *cmd* - `--run` *cmd*\
Like `--command`, but executes the command in a non-interactive Like `--command`, but executes the command in a non-interactive
shell. This means (among other things) that if you hit Ctrl-C while shell. This means (among other things) that if you hit Ctrl-C while
the command is running, the shell exits. the command is running, the shell exits.
- `--exclude` *regexp* - `--exclude` *regexp*\
Do not build any dependencies whose store path matches the regular Do not build any dependencies whose store path matches the regular
expression *regexp*. This option may be specified multiple times. expression *regexp*. This option may be specified multiple times.
- `--pure` - `--pure`\
If this flag is specified, the environment is almost entirely If this flag is specified, the environment is almost entirely
cleared before the interactive shell is started, so you get an cleared before the interactive shell is started, so you get an
environment that more closely corresponds to the “real” Nix build. A environment that more closely corresponds to the “real” Nix build. A
few variables, in particular `HOME`, `USER` and `DISPLAY`, are few variables, in particular `HOME`, `USER` and `DISPLAY`, are
retained. Note that (depending on your Bash retained.
installation) `/etc/bashrc` is still sourced, so any variables set
there will affect the interactive shell.
- `--packages` / `-p` *packages* - `--packages` / `-p` *packages*…\
Set up an environment in which the specified packages are present. Set up an environment in which the specified packages are present.
The command line arguments are interpreted as attribute names inside The command line arguments are interpreted as attribute names inside
the Nix Packages collection. Thus, `nix-shell -p libjpeg openjdk` the Nix Packages collection. Thus, `nix-shell -p libjpeg openjdk`
will start a shell in which the packages denoted by the attribute will start a shell in which the packages denoted by the attribute
names `libjpeg` and `openjdk` are present. names `libjpeg` and `openjdk` are present.
- `-i` *interpreter* - `-i` *interpreter*\
The chained script interpreter to be invoked by `nix-shell`. Only The chained script interpreter to be invoked by `nix-shell`. Only
applicable in `#!`-scripts (described below). applicable in `#!`-scripts (described below).
- `--keep` *name* - `--keep` *name*\
When a `--pure` shell is started, keep the listed environment When a `--pure` shell is started, keep the listed environment
variables. variables.
@ -101,7 +99,7 @@ The following common options are supported:
# Environment variables # Environment variables
- `NIX_BUILD_SHELL` - `NIX_BUILD_SHELL`\
Shell used to start the interactive environment. Defaults to the Shell used to start the interactive environment. Defaults to the
`bash` found in `PATH`. `bash` found in `PATH`.

View file

@ -22,7 +22,7 @@ This section lists the options that are common to all operations. These
options are allowed for every subcommand, though they may not always options are allowed for every subcommand, though they may not always
have an effect. have an effect.
- `--add-root` *path* - `--add-root` *path*\
Causes the result of a realisation (`--realise` and Causes the result of a realisation (`--realise` and
`--force-realise`) to be registered as a root of the garbage `--force-realise`) to be registered as a root of the garbage
collector. *path* will be created as a symlink to the resulting collector. *path* will be created as a symlink to the resulting
@ -86,15 +86,15 @@ non-derivations argument, the argument itself is printed.)
The following flags are available: The following flags are available:
- `--dry-run` - `--dry-run`\
Print on standard error a description of what packages would be Print on standard error a description of what packages would be
built or downloaded, without actually performing the operation. built or downloaded, without actually performing the operation.
- `--ignore-unknown` - `--ignore-unknown`\
If a non-derivation path does not have a substitute, then silently If a non-derivation path does not have a substitute, then silently
ignore it. ignore it.
- `--check` - `--check`\
This option allows you to check whether a derivation is This option allows you to check whether a derivation is
deterministic. It rebuilds the specified derivation and checks deterministic. It rebuilds the specified derivation and checks
whether the result is bitwise-identical with the existing outputs, whether the result is bitwise-identical with the existing outputs,
@ -110,20 +110,20 @@ The following flags are available:
Special exit codes: Special exit codes:
- `100` - `100`\
Generic build failure, the builder process returned with a non-zero Generic build failure, the builder process returned with a non-zero
exit code. exit code.
- `101` - `101`\
Build timeout, the build was aborted because it did not complete Build timeout, the build was aborted because it did not complete
within the specified `timeout`. within the specified `timeout`.
- `102` - `102`\
Hash mismatch, the build output was rejected because it does not Hash mismatch, the build output was rejected because it does not
match the [`outputHash` attribute of the match the [`outputHash` attribute of the
derivation](../expressions/advanced-attributes.md). derivation](../expressions/advanced-attributes.md).
- `104` - `104`\
Not deterministic, the build succeeded in check mode but the Not deterministic, the build succeeded in check mode but the
resulting output is not binary reproducable. resulting output is not binary reproducable.
@ -170,7 +170,7 @@ access to a restricted ssh user.
The following flags are available: The following flags are available:
- `--write` - `--write`\
Allow the connected client to request the realization of Allow the connected client to request the realization of
derivations. In effect, this can be used to make the host act as a derivations. In effect, this can be used to make the host act as a
remote builder. remote builder.
@ -200,18 +200,18 @@ reachable via file system references from a set of “roots”, are deleted.
The following suboperations may be specified: The following suboperations may be specified:
- `--print-roots` - `--print-roots`\
This operation prints on standard output the set of roots used by This operation prints on standard output the set of roots used by
the garbage collector. the garbage collector.
- `--print-live` - `--print-live`\
This operation prints on standard output the set of “live” store This operation prints on standard output the set of “live” store
paths, which are all the store paths reachable from the roots. Live paths, which are all the store paths reachable from the roots. Live
paths should never be deleted, since that would break consistency — paths should never be deleted, since that would break consistency —
it would become possible that applications are installed that it would become possible that applications are installed that
reference things that are no longer present in the store. reference things that are no longer present in the store.
- `--print-dead` - `--print-dead`\
This operation prints out on standard output the set of “dead” store This operation prints out on standard output the set of “dead” store
paths, which is just the opposite of the set of live paths: any path paths, which is just the opposite of the set of live paths: any path
in the store that is not live (with respect to the roots) is dead. in the store that is not live (with respect to the roots) is dead.
@ -219,7 +219,7 @@ The following suboperations may be specified:
By default, all unreachable paths are deleted. The following options By default, all unreachable paths are deleted. The following options
control what gets deleted and in what order: control what gets deleted and in what order:
- `--max-freed` *bytes* - `--max-freed` *bytes*\
Keep deleting paths until at least *bytes* bytes have been deleted, Keep deleting paths until at least *bytes* bytes have been deleted,
then stop. The argument *bytes* can be followed by the then stop. The argument *bytes* can be followed by the
multiplicative suffix `K`, `M`, `G` or `T`, denoting KiB, MiB, GiB multiplicative suffix `K`, `M`, `G` or `T`, denoting KiB, MiB, GiB
@ -300,22 +300,22 @@ symlink.
## Common query options ## Common query options
- `--use-output`; `-u` - `--use-output`; `-u`\
For each argument to the query that is a store derivation, apply the For each argument to the query that is a store derivation, apply the
query to the output path of the derivation instead. query to the output path of the derivation instead.
- `--force-realise`; `-f` - `--force-realise`; `-f`\
Realise each argument to the query first (see [`nix-store Realise each argument to the query first (see [`nix-store
--realise`](#operation---realise)). --realise`](#operation---realise)).
## Queries ## Queries
- `--outputs` - `--outputs`\
Prints out the [output paths](../glossary.md) of the store Prints out the [output paths](../glossary.md) of the store
derivations *paths*. These are the paths that will be produced when derivations *paths*. These are the paths that will be produced when
the derivation is built. the derivation is built.
- `--requisites`; `-R` - `--requisites`; `-R`\
Prints out the [closure](../glossary.md) of the store path *paths*. Prints out the [closure](../glossary.md) of the store path *paths*.
This query has one option: This query has one option:
@ -332,31 +332,31 @@ symlink.
dependencies) is obtained by distributing the closure of a store dependencies) is obtained by distributing the closure of a store
derivation and specifying the option `--include-outputs`. derivation and specifying the option `--include-outputs`.
- `--references` - `--references`\
Prints the set of [references](../glossary.md) of the store paths Prints the set of [references](../glossary.md) of the store paths
*paths*, that is, their immediate dependencies. (For *all* *paths*, that is, their immediate dependencies. (For *all*
dependencies, use `--requisites`.) dependencies, use `--requisites`.)
- `--referrers` - `--referrers`\
Prints the set of *referrers* of the store paths *paths*, that is, Prints the set of *referrers* of the store paths *paths*, that is,
the store paths currently existing in the Nix store that refer to the store paths currently existing in the Nix store that refer to
one of *paths*. Note that contrary to the references, the set of one of *paths*. Note that contrary to the references, the set of
referrers is not constant; it can change as store paths are added or referrers is not constant; it can change as store paths are added or
removed. removed.
- `--referrers-closure` - `--referrers-closure`\
Prints the closure of the set of store paths *paths* under the Prints the closure of the set of store paths *paths* under the
referrers relation; that is, all store paths that directly or referrers relation; that is, all store paths that directly or
indirectly refer to one of *paths*. These are all the path currently indirectly refer to one of *paths*. These are all the path currently
in the Nix store that are dependent on *paths*. in the Nix store that are dependent on *paths*.
- `--deriver`; `-d` - `--deriver`; `-d`\
Prints the [deriver](../glossary.md) of the store paths *paths*. If Prints the [deriver](../glossary.md) of the store paths *paths*. If
the path has no deriver (e.g., if it is a source file), or if the the path has no deriver (e.g., if it is a source file), or if the
deriver is not known (e.g., in the case of a binary-only deriver is not known (e.g., in the case of a binary-only
deployment), the string `unknown-deriver` is printed. deployment), the string `unknown-deriver` is printed.
- `--graph` - `--graph`\
Prints the references graph of the store paths *paths* in the format Prints the references graph of the store paths *paths* in the format
of the `dot` tool of AT\&T's [Graphviz of the `dot` tool of AT\&T's [Graphviz
package](http://www.graphviz.org/). This can be used to visualise package](http://www.graphviz.org/). This can be used to visualise
@ -364,39 +364,39 @@ symlink.
this to a store derivation. To obtain a runtime dependency graph, this to a store derivation. To obtain a runtime dependency graph,
apply it to an output path. apply it to an output path.
- `--tree` - `--tree`\
Prints the references graph of the store paths *paths* as a nested Prints the references graph of the store paths *paths* as a nested
ASCII tree. References are ordered by descending closure size; this ASCII tree. References are ordered by descending closure size; this
tends to flatten the tree, making it more readable. The query only tends to flatten the tree, making it more readable. The query only
recurses into a store path when it is first encountered; this recurses into a store path when it is first encountered; this
prevents a blowup of the tree representation of the graph. prevents a blowup of the tree representation of the graph.
- `--graphml` - `--graphml`\
Prints the references graph of the store paths *paths* in the Prints the references graph of the store paths *paths* in the
[GraphML](http://graphml.graphdrawing.org/) file format. This can be [GraphML](http://graphml.graphdrawing.org/) file format. This can be
used to visualise dependency graphs. To obtain a build-time used to visualise dependency graphs. To obtain a build-time
dependency graph, apply this to a store derivation. To obtain a dependency graph, apply this to a store derivation. To obtain a
runtime dependency graph, apply it to an output path. runtime dependency graph, apply it to an output path.
- `--binding` *name*; `-b` *name* - `--binding` *name*; `-b` *name*\
Prints the value of the attribute *name* (i.e., environment Prints the value of the attribute *name* (i.e., environment
variable) of the store derivations *paths*. It is an error for a variable) of the store derivations *paths*. It is an error for a
derivation to not have the specified attribute. derivation to not have the specified attribute.
- `--hash` - `--hash`\
Prints the SHA-256 hash of the contents of the store paths *paths* Prints the SHA-256 hash of the contents of the store paths *paths*
(that is, the hash of the output of `nix-store --dump` on the given (that is, the hash of the output of `nix-store --dump` on the given
paths). Since the hash is stored in the Nix database, this is a fast paths). Since the hash is stored in the Nix database, this is a fast
operation. operation.
- `--size` - `--size`\
Prints the size in bytes of the contents of the store paths *paths* Prints the size in bytes of the contents of the store paths *paths*
— to be precise, the size of the output of `nix-store --dump` on — to be precise, the size of the output of `nix-store --dump` on
the given paths. Note that the actual disk space required by the the given paths. Note that the actual disk space required by the
store paths may be higher, especially on filesystems with large store paths may be higher, especially on filesystems with large
cluster sizes. cluster sizes.
- `--roots` - `--roots`\
Prints the garbage collector roots that point, directly or Prints the garbage collector roots that point, directly or
indirectly, at the store paths *paths*. indirectly, at the store paths *paths*.
@ -513,7 +513,7 @@ public url or broke since the download expression was written.
This operation has the following options: This operation has the following options:
- `--recursive` - `--recursive`\
Use recursive instead of flat hashing mode, used when adding Use recursive instead of flat hashing mode, used when adding
directories to the store. directories to the store.
@ -540,14 +540,14 @@ being modified by non-Nix tools, or of bugs in Nix itself.
This operation has the following options: This operation has the following options:
- `--check-contents` - `--check-contents`\
Checks that the contents of every valid store path has not been Checks that the contents of every valid store path has not been
altered by computing a SHA-256 hash of the contents and comparing it altered by computing a SHA-256 hash of the contents and comparing it
with the hash stored in the Nix database at build time. Paths that with the hash stored in the Nix database at build time. Paths that
have been modified are printed out. For large stores, have been modified are printed out. For large stores,
`--check-contents` is obviously quite slow. `--check-contents` is obviously quite slow.
- `--repair` - `--repair`\
If any valid path is missing from the store, or (if If any valid path is missing from the store, or (if
`--check-contents` is given) the contents of a valid path has been `--check-contents` is given) the contents of a valid path has been
modified, then try to repair the path by redownloading it. See modified, then try to repair the path by redownloading it. See

View file

@ -2,56 +2,56 @@
Most Nix commands accept the following command-line options: Most Nix commands accept the following command-line options:
- `--help` - `--help`\
Prints out a summary of the command syntax and exits. Prints out a summary of the command syntax and exits.
- `--version` - `--version`\
Prints out the Nix version number on standard output and exits. Prints out the Nix version number on standard output and exits.
- `--verbose` / `-v` - `--verbose` / `-v`\
Increases the level of verbosity of diagnostic messages printed on Increases the level of verbosity of diagnostic messages printed on
standard error. For each Nix operation, the information printed on standard error. For each Nix operation, the information printed on
standard output is well-defined; any diagnostic information is standard output is well-defined; any diagnostic information is
printed on standard error, never on standard output. printed on standard error, never on standard output.
This option may be specified repeatedly. Currently, the following This option may be specified repeatedly. Currently, the following
verbosity levels exist: verbosity levels exist:
- 0 - 0\
“Errors only”: only print messages explaining why the Nix “Errors only”: only print messages explaining why the Nix
invocation failed. invocation failed.
- 1 - 1\
“Informational”: print *useful* messages about what Nix is “Informational”: print *useful* messages about what Nix is
doing. This is the default. doing. This is the default.
- 2 - 2\
“Talkative”: print more informational messages. “Talkative”: print more informational messages.
- 3 - 3\
“Chatty”: print even more informational messages. “Chatty”: print even more informational messages.
- 4 - 4\
“Debug”: print debug information. “Debug”: print debug information.
- 5 - 5\
“Vomit”: print vast amounts of debug information. “Vomit”: print vast amounts of debug information.
- `--quiet` - `--quiet`\
Decreases the level of verbosity of diagnostic messages printed on Decreases the level of verbosity of diagnostic messages printed on
standard error. This is the inverse option to `-v` / `--verbose`. standard error. This is the inverse option to `-v` / `--verbose`.
This option may be specified repeatedly. See the previous verbosity This option may be specified repeatedly. See the previous verbosity
levels list. levels list.
- `--log-format` *format* - `--log-format` *format*\
This option can be used to change the output of the log format, with This option can be used to change the output of the log format, with
*format* being one of: *format* being one of:
- raw - raw\
This is the raw format, as outputted by nix-build. This is the raw format, as outputted by nix-build.
- internal-json - internal-json\
Outputs the logs in a structured manner. Outputs the logs in a structured manner.
> **Warning** > **Warning**
@ -60,30 +60,30 @@ Most Nix commands accept the following command-line options:
> the error-messages (namely of the `msg`-field) can change > the error-messages (namely of the `msg`-field) can change
> between releases. > between releases.
- bar - bar\
Only display a progress bar during the builds. Only display a progress bar during the builds.
- bar-with-logs - bar-with-logs\
Display the raw logs, with the progress bar at the bottom. Display the raw logs, with the progress bar at the bottom.
- `--no-build-output` / `-Q` - `--no-build-output` / `-Q`\
By default, output written by builders to standard output and By default, output written by builders to standard output and
standard error is echoed to the Nix command's standard error. This standard error is echoed to the Nix command's standard error. This
option suppresses this behaviour. Note that the builder's standard option suppresses this behaviour. Note that the builder's standard
output and error are always written to a log file in output and error are always written to a log file in
`prefix/nix/var/log/nix`. `prefix/nix/var/log/nix`.
- `--max-jobs` / `-j` *number* - `--max-jobs` / `-j` *number*\
Sets the maximum number of build jobs that Nix will perform in Sets the maximum number of build jobs that Nix will perform in
parallel to the specified number. Specify `auto` to use the number parallel to the specified number. Specify `auto` to use the number
of CPUs in the system. The default is specified by the `max-jobs` of CPUs in the system. The default is specified by the `max-jobs`
configuration setting, which itself defaults to `1`. A higher configuration setting, which itself defaults to `1`. A higher
value is useful on SMP systems or to exploit I/O latency. value is useful on SMP systems or to exploit I/O latency.
Setting it to `0` disallows building on the local machine, which is Setting it to `0` disallows building on the local machine, which is
useful when you want builds to happen only on remote builders. useful when you want builds to happen only on remote builders.
- `--cores` - `--cores`\
Sets the value of the `NIX_BUILD_CORES` environment variable in Sets the value of the `NIX_BUILD_CORES` environment variable in
the invocation of builders. Builders can use this variable at the invocation of builders. Builders can use this variable at
their discretion to control the maximum amount of parallelism. For their discretion to control the maximum amount of parallelism. For
@ -94,18 +94,18 @@ Most Nix commands accept the following command-line options:
means that the builder should use all available CPU cores in the means that the builder should use all available CPU cores in the
system. system.
- `--max-silent-time` - `--max-silent-time`\
Sets the maximum number of seconds that a builder can go without Sets the maximum number of seconds that a builder can go without
producing any data on standard output or standard error. The producing any data on standard output or standard error. The
default is specified by the `max-silent-time` configuration default is specified by the `max-silent-time` configuration
setting. `0` means no time-out. setting. `0` means no time-out.
- `--timeout` - `--timeout`\
Sets the maximum number of seconds that a builder can run. The Sets the maximum number of seconds that a builder can run. The
default is specified by the `timeout` configuration setting. `0` default is specified by the `timeout` configuration setting. `0`
means no timeout. means no timeout.
- `--keep-going` / `-k` - `--keep-going` / `-k`\
Keep going in case of failed builds, to the greatest extent Keep going in case of failed builds, to the greatest extent
possible. That is, if building an input of some derivation fails, possible. That is, if building an input of some derivation fails,
Nix will still build the other inputs, but not the derivation Nix will still build the other inputs, but not the derivation
@ -113,17 +113,17 @@ Most Nix commands accept the following command-line options:
for builds of substitutes), possibly killing builds in progress (in for builds of substitutes), possibly killing builds in progress (in
case of parallel or distributed builds). case of parallel or distributed builds).
- `--keep-failed` / `-K` - `--keep-failed` / `-K`\
Specifies that in case of a build failure, the temporary directory Specifies that in case of a build failure, the temporary directory
(usually in `/tmp`) in which the build takes place should not be (usually in `/tmp`) in which the build takes place should not be
deleted. The path of the build directory is printed as an deleted. The path of the build directory is printed as an
informational message. informational message.
- `--fallback` - `--fallback`\
Whenever Nix attempts to build a derivation for which substitutes Whenever Nix attempts to build a derivation for which substitutes
are known for each output path, but realising the output paths are known for each output path, but realising the output paths
through the substitutes fails, fall back on building the derivation. through the substitutes fails, fall back on building the derivation.
The most common scenario in which this is useful is when we have The most common scenario in which this is useful is when we have
registered substitutes in order to perform binary distribution from, registered substitutes in order to perform binary distribution from,
say, a network repository. If the repository is down, the say, a network repository. If the repository is down, the
@ -134,12 +134,12 @@ Most Nix commands accept the following command-line options:
failure in obtaining the substitutes to lead to a full build from failure in obtaining the substitutes to lead to a full build from
source (with the related consumption of resources). source (with the related consumption of resources).
- `--readonly-mode` - `--readonly-mode`\
When this option is used, no attempt is made to open the Nix When this option is used, no attempt is made to open the Nix
database. Most Nix operations do need database access, so those database. Most Nix operations do need database access, so those
operations will fail. operations will fail.
- `--arg` *name* *value* - `--arg` *name* *value*\
This option is accepted by `nix-env`, `nix-instantiate`, This option is accepted by `nix-env`, `nix-instantiate`,
`nix-shell` and `nix-build`. When evaluating Nix expressions, the `nix-shell` and `nix-build`. When evaluating Nix expressions, the
expression evaluator will automatically try to call functions that expression evaluator will automatically try to call functions that
@ -151,7 +151,7 @@ Most Nix commands accept the following command-line options:
override a default value). That is, if the evaluator encounters a override a default value). That is, if the evaluator encounters a
function with an argument named *name*, it will call it with value function with an argument named *name*, it will call it with value
*value*. *value*.
For instance, the top-level `default.nix` in Nixpkgs is actually a For instance, the top-level `default.nix` in Nixpkgs is actually a
function: function:
@ -161,7 +161,7 @@ Most Nix commands accept the following command-line options:
... ...
}: ... }: ...
``` ```
So if you call this Nix expression (e.g., when you do `nix-env -i So if you call this Nix expression (e.g., when you do `nix-env -i
pkgname`), the function will be called automatically using the pkgname`), the function will be called automatically using the
value [`builtins.currentSystem`](../expressions/builtins.md) for value [`builtins.currentSystem`](../expressions/builtins.md) for
@ -170,13 +170,13 @@ Most Nix commands accept the following command-line options:
since the argument is a Nix string literal, you have to escape the since the argument is a Nix string literal, you have to escape the
quotes.) quotes.)
- `--argstr` *name* *value* - `--argstr` *name* *value*\
This option is like `--arg`, only the value is not a Nix This option is like `--arg`, only the value is not a Nix
expression but a string. So instead of `--arg system expression but a string. So instead of `--arg system
\"i686-linux\"` (the outer quotes are to keep the shell happy) you \"i686-linux\"` (the outer quotes are to keep the shell happy) you
can say `--argstr system i686-linux`. can say `--argstr system i686-linux`.
- `--attr` / `-A` *attrPath* - `--attr` / `-A` *attrPath*\
Select an attribute from the top-level Nix expression being Select an attribute from the top-level Nix expression being
evaluated. (`nix-env`, `nix-instantiate`, `nix-build` and evaluated. (`nix-env`, `nix-instantiate`, `nix-build` and
`nix-shell` only.) The *attribute path* *attrPath* is a sequence `nix-shell` only.) The *attribute path* *attrPath* is a sequence
@ -185,34 +185,34 @@ Most Nix commands accept the following command-line options:
would cause the expression `e.xorg.xorgserver` to be used. See would cause the expression `e.xorg.xorgserver` to be used. See
[`nix-env --install`](nix-env.md#operation---install) for some [`nix-env --install`](nix-env.md#operation---install) for some
concrete examples. concrete examples.
In addition to attribute names, you can also specify array indices. In addition to attribute names, you can also specify array indices.
For instance, the attribute path `foo.3.bar` selects the `bar` For instance, the attribute path `foo.3.bar` selects the `bar`
attribute of the fourth element of the array in the `foo` attribute attribute of the fourth element of the array in the `foo` attribute
of the top-level expression. of the top-level expression.
- `--expr` / `-E` - `--expr` / `-E`\
Interpret the command line arguments as a list of Nix expressions to Interpret the command line arguments as a list of Nix expressions to
be parsed and evaluated, rather than as a list of file names of Nix be parsed and evaluated, rather than as a list of file names of Nix
expressions. (`nix-instantiate`, `nix-build` and `nix-shell` only.) expressions. (`nix-instantiate`, `nix-build` and `nix-shell` only.)
For `nix-shell`, this option is commonly used to give you a shell in For `nix-shell`, this option is commonly used to give you a shell in
which you can build the packages returned by the expression. If you which you can build the packages returned by the expression. If you
want to get a shell which contain the *built* packages ready for want to get a shell which contain the *built* packages ready for
use, give your expression to the `nix-shell -p` convenience flag use, give your expression to the `nix-shell -p` convenience flag
instead. instead.
- `-I` *path* - `-I` *path*\
Add a path to the Nix expression search path. This option may be Add a path to the Nix expression search path. This option may be
given multiple times. See the `NIX_PATH` environment variable for given multiple times. See the `NIX_PATH` environment variable for
information on the semantics of the Nix search path. Paths added information on the semantics of the Nix search path. Paths added
through `-I` take precedence over `NIX_PATH`. through `-I` take precedence over `NIX_PATH`.
- `--option` *name* *value* - `--option` *name* *value*\
Set the Nix configuration option *name* to *value*. This overrides Set the Nix configuration option *name* to *value*. This overrides
settings in the Nix configuration file (see nix.conf5). settings in the Nix configuration file (see nix.conf5).
- `--repair` - `--repair`\
Fix corrupted or missing store paths by redownloading or rebuilding Fix corrupted or missing store paths by redownloading or rebuilding
them. Note that this is slow because it requires computing a them. Note that this is slow because it requires computing a
cryptographic hash of the contents of every path in the closure of cryptographic hash of the contents of every path in the closure of

View file

@ -2,14 +2,14 @@
Derivations can declare some infrequently used optional attributes. Derivations can declare some infrequently used optional attributes.
- `allowedReferences` - `allowedReferences`\
The optional attribute `allowedReferences` specifies a list of legal The optional attribute `allowedReferences` specifies a list of legal
references (dependencies) of the output of the builder. For example, references (dependencies) of the output of the builder. For example,
```nix ```nix
allowedReferences = []; allowedReferences = [];
``` ```
enforces that the output of a derivation cannot have any runtime enforces that the output of a derivation cannot have any runtime
dependencies on its inputs. To allow an output to have a runtime dependencies on its inputs. To allow an output to have a runtime
dependency on itself, use `"out"` as a list item. This is used in dependency on itself, use `"out"` as a list item. This is used in
@ -17,45 +17,45 @@ Derivations can declare some infrequently used optional attributes.
booting Linux dont have accidental dependencies on other paths in booting Linux dont have accidental dependencies on other paths in
the Nix store. the Nix store.
- `allowedRequisites` - `allowedRequisites`\
This attribute is similar to `allowedReferences`, but it specifies This attribute is similar to `allowedReferences`, but it specifies
the legal requisites of the whole closure, so all the dependencies the legal requisites of the whole closure, so all the dependencies
recursively. For example, recursively. For example,
```nix ```nix
allowedRequisites = [ foobar ]; allowedRequisites = [ foobar ];
``` ```
enforces that the output of a derivation cannot have any other enforces that the output of a derivation cannot have any other
runtime dependency than `foobar`, and in addition it enforces that runtime dependency than `foobar`, and in addition it enforces that
`foobar` itself doesn't introduce any other dependency itself. `foobar` itself doesn't introduce any other dependency itself.
- `disallowedReferences` - `disallowedReferences`\
The optional attribute `disallowedReferences` specifies a list of The optional attribute `disallowedReferences` specifies a list of
illegal references (dependencies) of the output of the builder. For illegal references (dependencies) of the output of the builder. For
example, example,
```nix ```nix
disallowedReferences = [ foo ]; disallowedReferences = [ foo ];
``` ```
enforces that the output of a derivation cannot have a direct enforces that the output of a derivation cannot have a direct
runtime dependencies on the derivation `foo`. runtime dependencies on the derivation `foo`.
- `disallowedRequisites` - `disallowedRequisites`\
This attribute is similar to `disallowedReferences`, but it This attribute is similar to `disallowedReferences`, but it
specifies illegal requisites for the whole closure, so all the specifies illegal requisites for the whole closure, so all the
dependencies recursively. For example, dependencies recursively. For example,
```nix ```nix
disallowedRequisites = [ foobar ]; disallowedRequisites = [ foobar ];
``` ```
enforces that the output of a derivation cannot have any runtime enforces that the output of a derivation cannot have any runtime
dependency on `foobar` or any other derivation depending recursively dependency on `foobar` or any other derivation depending recursively
on `foobar`. on `foobar`.
- `exportReferencesGraph` - `exportReferencesGraph`\
This attribute allows builders access to the references graph of This attribute allows builders access to the references graph of
their inputs. The attribute is a list of inputs in the Nix store their inputs. The attribute is a list of inputs in the Nix store
whose references graph the builder needs to know. The value of whose references graph the builder needs to know. The value of
@ -65,17 +65,17 @@ Derivations can declare some infrequently used optional attributes.
files have the format used by `nix-store --register-validity` files have the format used by `nix-store --register-validity`
(with the deriver fields left empty). For example, when the (with the deriver fields left empty). For example, when the
following derivation is built: following derivation is built:
```nix ```nix
derivation { derivation {
... ...
exportReferencesGraph = [ "libfoo-graph" libfoo ]; exportReferencesGraph = [ "libfoo-graph" libfoo ];
}; };
``` ```
the references graph of `libfoo` is placed in the file the references graph of `libfoo` is placed in the file
`libfoo-graph` in the temporary build directory. `libfoo-graph` in the temporary build directory.
`exportReferencesGraph` is useful for builders that want to do `exportReferencesGraph` is useful for builders that want to do
something with the closure of a store path. Examples include the something with the closure of a store path. Examples include the
builders in NixOS that generate the initial ramdisk for booting builders in NixOS that generate the initial ramdisk for booting
@ -84,66 +84,66 @@ Derivations can declare some infrequently used optional attributes.
with a Nix store containing the closure of a bootable NixOS with a Nix store containing the closure of a bootable NixOS
configuration). configuration).
- `impureEnvVars` - `impureEnvVars`\
This attribute allows you to specify a list of environment variables This attribute allows you to specify a list of environment variables
that should be passed from the environment of the calling user to that should be passed from the environment of the calling user to
the builder. Usually, the environment is cleared completely when the the builder. Usually, the environment is cleared completely when the
builder is executed, but with this attribute you can allow specific builder is executed, but with this attribute you can allow specific
environment variables to be passed unmodified. For example, environment variables to be passed unmodified. For example,
`fetchurl` in Nixpkgs has the line `fetchurl` in Nixpkgs has the line
```nix ```nix
impureEnvVars = [ "http_proxy" "https_proxy" ... ]; impureEnvVars = [ "http_proxy" "https_proxy" ... ];
``` ```
to make it use the proxy server configuration specified by the user to make it use the proxy server configuration specified by the user
in the environment variables `http_proxy` and friends. in the environment variables `http_proxy` and friends.
This attribute is only allowed in *fixed-output derivations* (see This attribute is only allowed in *fixed-output derivations* (see
below), where impurities such as these are okay since (the hash below), where impurities such as these are okay since (the hash
of) the output is known in advance. It is ignored for all other of) the output is known in advance. It is ignored for all other
derivations. derivations.
> **Warning** > **Warning**
> >
> `impureEnvVars` implementation takes environment variables from > `impureEnvVars` implementation takes environment variables from
> the current builder process. When a daemon is building its > the current builder process. When a daemon is building its
> environmental variables are used. Without the daemon, the > environmental variables are used. Without the daemon, the
> environmental variables come from the environment of the > environmental variables come from the environment of the
> `nix-build`. > `nix-build`.
- `outputHash`; `outputHashAlgo`; `outputHashMode` - `outputHash`; `outputHashAlgo`; `outputHashMode`\
These attributes declare that the derivation is a so-called These attributes declare that the derivation is a so-called
*fixed-output derivation*, which means that a cryptographic hash of *fixed-output derivation*, which means that a cryptographic hash of
the output is already known in advance. When the build of a the output is already known in advance. When the build of a
fixed-output derivation finishes, Nix computes the cryptographic fixed-output derivation finishes, Nix computes the cryptographic
hash of the output and compares it to the hash declared with these hash of the output and compares it to the hash declared with these
attributes. If there is a mismatch, the build fails. attributes. If there is a mismatch, the build fails.
The rationale for fixed-output derivations is derivations such as The rationale for fixed-output derivations is derivations such as
those produced by the `fetchurl` function. This function downloads a those produced by the `fetchurl` function. This function downloads a
file from a given URL. To ensure that the downloaded file has not file from a given URL. To ensure that the downloaded file has not
been modified, the caller must also specify a cryptographic hash of been modified, the caller must also specify a cryptographic hash of
the file. For example, the file. For example,
```nix ```nix
fetchurl { fetchurl {
url = "http://ftp.gnu.org/pub/gnu/hello/hello-2.1.1.tar.gz"; url = "http://ftp.gnu.org/pub/gnu/hello/hello-2.1.1.tar.gz";
sha256 = "1md7jsfd8pa45z73bz1kszpp01yw6x5ljkjk2hx7wl800any6465"; sha256 = "1md7jsfd8pa45z73bz1kszpp01yw6x5ljkjk2hx7wl800any6465";
} }
``` ```
It sometimes happens that the URL of the file changes, e.g., because It sometimes happens that the URL of the file changes, e.g., because
servers are reorganised or no longer available. We then must update servers are reorganised or no longer available. We then must update
the call to `fetchurl`, e.g., the call to `fetchurl`, e.g.,
```nix ```nix
fetchurl { fetchurl {
url = "ftp://ftp.nluug.nl/pub/gnu/hello/hello-2.1.1.tar.gz"; url = "ftp://ftp.nluug.nl/pub/gnu/hello/hello-2.1.1.tar.gz";
sha256 = "1md7jsfd8pa45z73bz1kszpp01yw6x5ljkjk2hx7wl800any6465"; sha256 = "1md7jsfd8pa45z73bz1kszpp01yw6x5ljkjk2hx7wl800any6465";
} }
``` ```
If a `fetchurl` derivation was treated like a normal derivation, the If a `fetchurl` derivation was treated like a normal derivation, the
output paths of the derivation and *all derivations depending on it* output paths of the derivation and *all derivations depending on it*
would change. For instance, if we were to change the URL of the would change. For instance, if we were to change the URL of the
@ -151,16 +151,16 @@ Derivations can declare some infrequently used optional attributes.
other packages depend) massive rebuilds would be needed. This is other packages depend) massive rebuilds would be needed. This is
unfortunate for a change which we know cannot have a real effect as unfortunate for a change which we know cannot have a real effect as
it propagates upwards through the dependency graph. it propagates upwards through the dependency graph.
For fixed-output derivations, on the other hand, the name of the For fixed-output derivations, on the other hand, the name of the
output path only depends on the `outputHash*` and `name` attributes, output path only depends on the `outputHash*` and `name` attributes,
while all other attributes are ignored for the purpose of computing while all other attributes are ignored for the purpose of computing
the output path. (The `name` attribute is included because it is the output path. (The `name` attribute is included because it is
part of the path.) part of the path.)
As an example, here is the (simplified) Nix expression for As an example, here is the (simplified) Nix expression for
`fetchurl`: `fetchurl`:
```nix ```nix
{ stdenv, curl }: # The curl program is used for downloading. { stdenv, curl }: # The curl program is used for downloading.
@ -180,43 +180,51 @@ Derivations can declare some infrequently used optional attributes.
inherit url; inherit url;
} }
``` ```
The `outputHashAlgo` attribute specifies the hash algorithm used to The `outputHashAlgo` attribute specifies the hash algorithm used to
compute the hash. It can currently be `"sha1"`, `"sha256"` or compute the hash. It can currently be `"sha1"`, `"sha256"` or
`"sha512"`. `"sha512"`.
The `outputHashMode` attribute determines how the hash is computed. The `outputHashMode` attribute determines how the hash is computed.
It must be one of the following two values: It must be one of the following two values:
- `"flat"` - `"flat"`\
The output must be a non-executable regular file. If it isnt, The output must be a non-executable regular file. If it isnt,
the build fails. The hash is simply computed over the contents the build fails. The hash is simply computed over the contents
of that file (so its equal to what Unix commands like of that file (so its equal to what Unix commands like
`sha256sum` or `sha1sum` produce). `sha256sum` or `sha1sum` produce).
This is the default. This is the default.
- `"recursive"` - `"recursive"`\
The hash is computed over the NAR archive dump of the output The hash is computed over the NAR archive dump of the output
(i.e., the result of [`nix-store (i.e., the result of [`nix-store
--dump`](../command-ref/nix-store.md#operation---dump)). In --dump`](../command-ref/nix-store.md#operation---dump)). In
this case, the output can be anything, including a directory this case, the output can be anything, including a directory
tree. tree.
The `outputHash` attribute, finally, must be a string containing The `outputHash` attribute, finally, must be a string containing
the hash in either hexadecimal or base-32 notation. (See the the hash in either hexadecimal or base-32 notation. (See the
[`nix-hash` command](../command-ref/nix-hash.md) for information [`nix-hash` command](../command-ref/nix-hash.md) for information
about converting to and from base-32 notation.) about converting to and from base-32 notation.)
- `__contentAddressed`
If this **experimental** attribute is set to true, then the derivation
outputs will be stored in a content-addressed location rather than the
traditional input-addressed one.
This only has an effect if the `ca-derivation` experimental feature is enabled.
Setting this attribute also requires setting `outputHashMode` and `outputHashAlgo` like for *fixed-output derivations* (see above).
- `passAsFile` - `passAsFile`\
A list of names of attributes that should be passed via files rather A list of names of attributes that should be passed via files rather
than environment variables. For example, if you have than environment variables. For example, if you have
```nix ```nix
passAsFile = ["big"]; passAsFile = ["big"];
big = "a very long string"; big = "a very long string";
``` ```
then when the builder runs, the environment variable `bigPath` then when the builder runs, the environment variable `bigPath`
will contain the absolute path to a temporary file containing `a will contain the absolute path to a temporary file containing `a
very long string`. That is, for any attribute *x* listed in very long string`. That is, for any attribute *x* listed in
@ -226,7 +234,7 @@ Derivations can declare some infrequently used optional attributes.
builder, since most operating systems impose a limit on the size builder, since most operating systems impose a limit on the size
of the environment (typically, a few hundred kilobyte). of the environment (typically, a few hundred kilobyte).
- `preferLocalBuild` - `preferLocalBuild`\
If this attribute is set to `true` and [distributed building is If this attribute is set to `true` and [distributed building is
enabled](../advanced-topics/distributed-builds.md), then, if enabled](../advanced-topics/distributed-builds.md), then, if
possible, the derivaton will be built locally instead of forwarded possible, the derivaton will be built locally instead of forwarded
@ -234,14 +242,14 @@ Derivations can declare some infrequently used optional attributes.
where the cost of doing a download or remote build would exceed where the cost of doing a download or remote build would exceed
the cost of building locally. the cost of building locally.
- `allowSubstitutes` - `allowSubstitutes`\
If this attribute is set to `false`, then Nix will always build this If this attribute is set to `false`, then Nix will always build this
derivation; it will not try to substitute its outputs. This is derivation; it will not try to substitute its outputs. This is
useful for very trivial derivations (such as `writeText` in Nixpkgs) useful for very trivial derivations (such as `writeText` in Nixpkgs)
that are cheaper to build than to substitute from a binary cache. that are cheaper to build than to substitute from a binary cache.
> **Note** > **Note**
> >
> You need to have a builder configured which satisfies the > You need to have a builder configured which satisfies the
> derivations `system` attribute, since the derivation cannot be > derivations `system` attribute, since the derivation cannot be
> substituted. Thus it is usually a good idea to align `system` with > substituted. Thus it is usually a good idea to align `system` with

View file

@ -2,7 +2,7 @@
Here are the constants built into the Nix expression evaluator: Here are the constants built into the Nix expression evaluator:
- `builtins` - `builtins`\
The set `builtins` contains all the built-in functions and values. The set `builtins` contains all the built-in functions and values.
You can use `builtins` to test for the availability of features in You can use `builtins` to test for the availability of features in
the Nix installation, e.g., the Nix installation, e.g.,
@ -14,7 +14,7 @@ Here are the constants built into the Nix expression evaluator:
This allows a Nix expression to fall back gracefully on older Nix This allows a Nix expression to fall back gracefully on older Nix
installations that dont have the desired built-in function. installations that dont have the desired built-in function.
- `builtins.currentSystem` - `builtins.currentSystem`\
The built-in value `currentSystem` evaluates to the Nix platform The built-in value `currentSystem` evaluates to the Nix platform
identifier for the Nix installation on which the expression is being identifier for the Nix installation on which the expression is being
evaluated, such as `"i686-linux"` or `"x86_64-darwin"`. evaluated, such as `"i686-linux"` or `"x86_64-darwin"`.

View file

@ -9,7 +9,8 @@ scope. Instead, you can access them through the `builtins` built-in
value, which is a set that contains all built-in functions and values. value, which is a set that contains all built-in functions and values.
For instance, `derivation` is also available as `builtins.derivation`. For instance, `derivation` is also available as `builtins.derivation`.
- `derivation` *attrs*; `builtins.derivation` *attrs* <dl>
<dt><code>derivation <var>attrs</var></code>;
`derivation` is described in [its own section](derivations.md). <code>builtins.derivation <var>attrs</var></code></dt>
<dd><p><var>derivation</var> in described in
<a href="derivations.md">its own section</a>.</p></dd>

View file

@ -0,0 +1 @@
</dl>

View file

@ -139,6 +139,13 @@ Nix has the following basic data types:
environment variable `NIX_PATH` will be searched for the given file environment variable `NIX_PATH` will be searched for the given file
or directory name. or directory name.
Antiquotation is supported in any paths except those in angle brackets.
`./${foo}-${bar}.nix` is a more convenient way of writing
`./. + "/" + foo + "-" + bar + ".nix"` or `./. + "/${foo}-${bar}.nix"`. At
least one slash must appear *before* any antiquotations for this to be
recognized as a path. `a.${foo}/b.${bar}` is a syntactically valid division
operation. `./a.${foo}/b.${bar}` is a path.
- *Booleans* with values `true` and `false`. - *Booleans* with values `true` and `false`.
- The null value, denoted as `null`. - The null value, denoted as `null`.

View file

@ -1,48 +1,48 @@
# Glossary # Glossary
- derivation - derivation\
A description of a build action. The result of a derivation is a A description of a build action. The result of a derivation is a
store object. Derivations are typically specified in Nix expressions store object. Derivations are typically specified in Nix expressions
using the [`derivation` primitive](expressions/derivations.md). These are using the [`derivation` primitive](expressions/derivations.md). These are
translated into low-level *store derivations* (implicitly by translated into low-level *store derivations* (implicitly by
`nix-env` and `nix-build`, or explicitly by `nix-instantiate`). `nix-env` and `nix-build`, or explicitly by `nix-instantiate`).
- store - store\
The location in the file system where store objects live. Typically The location in the file system where store objects live. Typically
`/nix/store`. `/nix/store`.
- store path - store path\
The location in the file system of a store object, i.e., an The location in the file system of a store object, i.e., an
immediate child of the Nix store directory. immediate child of the Nix store directory.
- store object - store object\
A file that is an immediate child of the Nix store directory. These A file that is an immediate child of the Nix store directory. These
can be regular files, but also entire directory trees. Store objects can be regular files, but also entire directory trees. Store objects
can be sources (objects copied from outside of the store), can be sources (objects copied from outside of the store),
derivation outputs (objects produced by running a build action), or derivation outputs (objects produced by running a build action), or
derivations (files describing a build action). derivations (files describing a build action).
- substitute - substitute\
A substitute is a command invocation stored in the Nix database that A substitute is a command invocation stored in the Nix database that
describes how to build a store object, bypassing the normal build describes how to build a store object, bypassing the normal build
mechanism (i.e., derivations). Typically, the substitute builds the mechanism (i.e., derivations). Typically, the substitute builds the
store object by downloading a pre-built version of the store object store object by downloading a pre-built version of the store object
from some server. from some server.
- purity - purity\
The assumption that equal Nix derivations when run always produce The assumption that equal Nix derivations when run always produce
the same output. This cannot be guaranteed in general (e.g., a the same output. This cannot be guaranteed in general (e.g., a
builder can rely on external inputs such as the network or the builder can rely on external inputs such as the network or the
system time) but the Nix model assumes it. system time) but the Nix model assumes it.
- Nix expression - Nix expression\
A high-level description of software packages and compositions A high-level description of software packages and compositions
thereof. Deploying software using Nix entails writing Nix thereof. Deploying software using Nix entails writing Nix
expressions for your packages. Nix expressions are translated to expressions for your packages. Nix expressions are translated to
derivations that are stored in the Nix store. These derivations can derivations that are stored in the Nix store. These derivations can
then be built. then be built.
- reference - reference\
A store path `P` is said to have a reference to a store path `Q` if A store path `P` is said to have a reference to a store path `Q` if
the store object at `P` contains the path `Q` somewhere. The the store object at `P` contains the path `Q` somewhere. The
*references* of a store path are the set of store paths to which it *references* of a store path are the set of store paths to which it
@ -52,11 +52,11 @@
output paths), whereas an output path only references other output output paths), whereas an output path only references other output
paths. paths.
- reachable - reachable\
A store path `Q` is reachable from another store path `P` if `Q` A store path `Q` is reachable from another store path `P` if `Q`
is in the *closure* of the *references* relation. is in the *closure* of the *references* relation.
- closure - closure\
The closure of a store path is the set of store paths that are The closure of a store path is the set of store paths that are
directly or indirectly “reachable” from that store path; that is, directly or indirectly “reachable” from that store path; that is,
its the closure of the path under the *references* relation. For its the closure of the path under the *references* relation. For
@ -71,29 +71,29 @@
to path `Q`, then `Q` is in the closure of `P`. Further, if `Q` to path `Q`, then `Q` is in the closure of `P`. Further, if `Q`
references `R` then `R` is also in the closure of `P`. references `R` then `R` is also in the closure of `P`.
- output path - output path\
A store path produced by a derivation. A store path produced by a derivation.
- deriver - deriver\
The deriver of an *output path* is the store The deriver of an *output path* is the store
derivation that built it. derivation that built it.
- validity - validity\
A store path is considered *valid* if it exists in the file system, A store path is considered *valid* if it exists in the file system,
is listed in the Nix database as being valid, and if all paths in is listed in the Nix database as being valid, and if all paths in
its closure are also valid. its closure are also valid.
- user environment - user environment\
An automatically generated store object that consists of a set of An automatically generated store object that consists of a set of
symlinks to “active” applications, i.e., other store paths. These symlinks to “active” applications, i.e., other store paths. These
are generated automatically by are generated automatically by
[`nix-env`](command-ref/nix-env.md). See *profiles*. [`nix-env`](command-ref/nix-env.md). See *profiles*.
- profile - profile\
A symlink to the current *user environment* of a user, e.g., A symlink to the current *user environment* of a user, e.g.,
`/nix/var/nix/profiles/default`. `/nix/var/nix/profiles/default`.
- NAR - NAR\
A *N*ix *AR*chive. This is a serialisation of a path in the Nix A *N*ix *AR*chive. This is a serialisation of a path in the Nix
store. It can contain regular files, directories and symbolic store. It can contain regular files, directories and symbolic
links. NARs are generated and unpacked using `nix-store --dump` links. NARs are generated and unpacked using `nix-store --dump`

View file

@ -1,18 +1,26 @@
# Installing a Binary Distribution # Installing a Binary Distribution
If you are using Linux or macOS versions up to 10.14 (Mojave), the The easiest way to install Nix is to run the following command:
easiest way to install Nix is to run the following command:
```console ```console
$ sh <(curl -L https://nixos.org/nix/install) $ sh <(curl -L https://nixos.org/nix/install)
``` ```
If you're using macOS 10.15 (Catalina) or newer, consult [the macOS This will run the installer interactively (causing it to explain what
installation instructions](#macos-installation) before installing. it is doing more explicitly), and perform the default "type" of install
for your platform:
- single-user on Linux
- multi-user on macOS
As of Nix 2.1.0, the Nix installer will always default to creating a > **Notes on read-only filesystem root in macOS 10.15 Catalina +**
single-user installation, however opting in to the multi-user >
installation is highly recommended. > - It took some time to support this cleanly. You may see posts,
> examples, and tutorials using obsolete workarounds.
> - Supporting it cleanly made macOS installs too complex to qualify
> as single-user, so this type is no longer supported on macOS.
We recommend the multi-user install if it supports your platform and
you can authenticate with `sudo`.
# Single User Installation # Single User Installation
@ -50,9 +58,9 @@ $ rm -rf /nix
The multi-user Nix installation creates system users, and a system The multi-user Nix installation creates system users, and a system
service for the Nix daemon. service for the Nix daemon.
- Linux running systemd, with SELinux disabled **Supported Systems**
- Linux running systemd, with SELinux disabled
- macOS - macOS
You can instruct the installer to perform a multi-user installation on You can instruct the installer to perform a multi-user installation on
your system: your system:
@ -96,165 +104,28 @@ sudo rm /Library/LaunchDaemons/org.nixos.nix-daemon.plist
There may also be references to Nix in `/etc/profile`, `/etc/bashrc`, There may also be references to Nix in `/etc/profile`, `/etc/bashrc`,
and `/etc/zshrc` which you may remove. and `/etc/zshrc` which you may remove.
# macOS Installation # macOS Installation <a name="sect-macos-installation-change-store-prefix"></a><a name="sect-macos-installation-encrypted-volume"></a><a name="sect-macos-installation-symlink"></a><a name="sect-macos-installation-recommended-notes"></a>
<!-- Note: anchors above to catch permalinks to old explanations -->
Starting with macOS 10.15 (Catalina), the root filesystem is read-only. We believe we have ironed out how to cleanly support the read-only root
This means `/nix` can no longer live on your system volume, and that on modern macOS. New installs will do this automatically, and you can
you'll need a workaround to install Nix. also re-run a new installer to convert your existing setup.
The recommended approach, which creates an unencrypted APFS volume for This section previously detailed the situation, options, and trade-offs,
your Nix store and a "synthetic" empty directory to mount it over at but it now only outlines what the installer does. You don't need to know
`/nix`, is least likely to impair Nix or your system. this to run the installer, but it may help if you run into trouble:
> **Note** - create a new APFS volume for your Nix store
> - update `/etc/synthetic.conf` to direct macOS to create a "synthetic"
> With all separate-volume approaches, it's possible something on your empty root directory to mount your volume
> system (particularly daemons/services and restored apps) may need - specify mount options for the volume in `/etc/fstab`
> access to your Nix store before the volume is mounted. Adding - if you have FileVault enabled
> additional encryption makes this more likely. - generate an encryption password
- put it in your system Keychain
If you're using a recent Mac with a [T2 - use it to encrypt the volume
chip](https://www.apple.com/euro/mac/shared/docs/Apple_T2_Security_Chip_Overview.pdf), - create a system LaunchDaemon to mount this volume early enough in the
your drive will still be encrypted at rest (in which case "unencrypted" boot process to avoid problems loading or restoring any programs that
is a bit of a misnomer). To use this approach, just install Nix with: need access to your Nix store
```console
$ sh <(curl -L https://nixos.org/nix/install) --darwin-use-unencrypted-nix-store-volume
```
If you don't like the sound of this, you'll want to weigh the other
approaches and tradeoffs detailed in this section.
> **Note**
>
> All of the known workarounds have drawbacks, but we hope better
> solutions will be available in the future. Some that we have our eye
> on are:
>
> 1. A true firmlink would enable the Nix store to live on the primary
> data volume without the build problems caused by the symlink
> approach. End users cannot currently create true firmlinks.
>
> 2. If the Nix store volume shared FileVault encryption with the
> primary data volume (probably by using the same volume group and
> role), FileVault encryption could be easily supported by the
> installer without requiring manual setup by each user.
## Change the Nix store path prefix
Changing the default prefix for the Nix store is a simple approach which
enables you to leave it on your root volume, where it can take full
advantage of FileVault encryption if enabled. Unfortunately, this
approach also opts your device out of some benefits that are enabled by
using the same prefix across systems:
- Your system won't be able to take advantage of the binary cache
(unless someone is able to stand up and support duplicate caching
infrastructure), which means you'll spend more time waiting for
builds.
- It's harder to build and deploy packages to Linux systems.
It would also possible (and often requested) to just apply this change
ecosystem-wide, but it's an intrusive process that has side effects we
want to avoid for now.
## Use a separate encrypted volume
If you like, you can also add encryption to the recommended approach
taken by the installer. You can do this by pre-creating an encrypted
volume before you run the installer--or you can run the installer and
encrypt the volume it creates later.
In either case, adding encryption to a second volume isn't quite as
simple as enabling FileVault for your boot volume. Before you dive in,
there are a few things to weigh:
1. The additional volume won't be encrypted with your existing
FileVault key, so you'll need another mechanism to decrypt the
volume.
2. You can store the password in Keychain to automatically decrypt the
volume on boot--but it'll have to wait on Keychain and may not mount
before your GUI apps restore. If any of your launchd agents or apps
depend on Nix-installed software (for example, if you use a
Nix-installed login shell), the restore may fail or break.
On a case-by-case basis, you may be able to work around this problem
by using `wait4path` to block execution until your executable is
available.
It's also possible to decrypt and mount the volume earlier with a
login hook--but this mechanism appears to be deprecated and its
future is unclear.
3. You can hard-code the password in the clear, so that your store
volume can be decrypted before Keychain is available.
If you are comfortable navigating these tradeoffs, you can encrypt the
volume with something along the lines of:
```console
$ diskutil apfs enableFileVault /nix -user disk
```
## Symlink the Nix store to a custom location
Another simple approach is using `/etc/synthetic.conf` to symlink the
Nix store to the data volume. This option also enables your store to
share any configured FileVault encryption. Unfortunately, builds that
resolve the symlink may leak the canonical path or even fail.
Because of these downsides, we can't recommend this approach.
## Notes on the recommended approach
This section goes into a little more detail on the recommended approach.
You don't need to understand it to run the installer, but it can serve
as a helpful reference if you run into trouble.
1. In order to compose user-writable locations into the new read-only
system root, Apple introduced a new concept called `firmlinks`,
which it describes as a "bi-directional wormhole" between two
filesystems. You can see the current firmlinks in
`/usr/share/firmlinks`. Unfortunately, firmlinks aren't (currently?)
user-configurable.
For special cases like NFS mount points or package manager roots,
[synthetic.conf(5)](https://developer.apple.com/library/archive/documentation/System/Conceptual/ManPages_iPhoneOS/man5/synthetic.conf.5.html)
supports limited user-controlled file-creation (of symlinks, and
synthetic empty directories) at `/`. To create a synthetic empty
directory for mounting at `/nix`, add the following line to
`/etc/synthetic.conf` (create it if necessary):
nix
2. This configuration is applied at boot time, but you can use
`apfs.util` to trigger creation (not deletion) of new entries
without a reboot:
```console
$ /System/Library/Filesystems/apfs.fs/Contents/Resources/apfs.util -B
```
3. Create the new APFS volume with diskutil:
```console
$ sudo diskutil apfs addVolume diskX APFS 'Nix Store' -mountpoint /nix
```
4. Using `vifs`, add the new mount to `/etc/fstab`. If it doesn't
already have other entries, it should look something like:
#
# Warning - this file should only be modified with vifs(8)
#
# Failure to do so is unsupported and may be destructive.
#
LABEL=Nix\040Store /nix apfs rw,nobrowse
The nobrowse setting will keep Spotlight from indexing this volume,
and keep it from showing up on your desktop.
# Installing a pinned Nix version from a URL # Installing a pinned Nix version from a URL

View file

@ -26,15 +26,6 @@
available for download from the official repository available for download from the official repository
<https://github.com/google/brotli>. <https://github.com/google/brotli>.
- The bzip2 compressor program and the `libbz2` library. Thus you must
have bzip2 installed, including development headers and libraries.
If your distribution does not provide these, you can obtain bzip2
from
<https://sourceware.org/bzip2/>.
- `liblzma`, which is provided by XZ Utils. If your distribution does
not provide this, you can get it from <https://tukaani.org/xz/>.
- cURL and its library. If your distribution does not provide it, you - cURL and its library. If your distribution does not provide it, you
can get it from <https://curl.haxx.se/>. can get it from <https://curl.haxx.se/>.
@ -69,6 +60,3 @@
`--disable-seccomp-sandboxing` option to the `configure` script (Not `--disable-seccomp-sandboxing` option to the `configure` script (Not
recommended unless your system doesn't support `libseccomp`). To get recommended unless your system doesn't support `libseccomp`). To get
the library, visit <https://github.com/seccomp/libseccomp>. the library, visit <https://github.com/seccomp/libseccomp>.
- Niels Lohmann's [JSON library](https://github.com/nlohmann/json).

View file

@ -7,17 +7,17 @@ cache mechanism that Nix usually uses to fetch prebuilt binaries from
The following options can be specified as URL parameters to the S3 URL: The following options can be specified as URL parameters to the S3 URL:
- `profile` - `profile`\
The name of the AWS configuration profile to use. By default Nix The name of the AWS configuration profile to use. By default Nix
will use the `default` profile. will use the `default` profile.
- `region` - `region`\
The region of the S3 bucket. `useast-1` by default. The region of the S3 bucket. `useast-1` by default.
If your bucket is not in `useast-1`, you should always explicitly If your bucket is not in `useast-1`, you should always explicitly
specify the region parameter. specify the region parameter.
- `endpoint` - `endpoint`\
The URL to your S3-compatible service, for when not using Amazon S3. The URL to your S3-compatible service, for when not using Amazon S3.
Do not specify this value if you're using Amazon S3. Do not specify this value if you're using Amazon S3.
@ -26,7 +26,7 @@ The following options can be specified as URL parameters to the S3 URL:
> This endpoint must support HTTPS and will use path-based > This endpoint must support HTTPS and will use path-based
> addressing instead of virtual host based addressing. > addressing instead of virtual host based addressing.
- `scheme` - `scheme`\
The scheme used for S3 requests, `https` (default) or `http`. This The scheme used for S3 requests, `https` (default) or `http`. This
option allows you to disable HTTPS for binary caches which don't option allows you to disable HTTPS for binary caches which don't
support it. support it.

View file

@ -1,22 +1,39 @@
{ {
"nodes": { "nodes": {
"lowdown-src": {
"flake": false,
"locked": {
"lastModified": 1632468475,
"narHash": "sha256-NNOm9CbdA8cuwbvaBHslGbPTiU6bh1Ao+MpEPx4rSGo=",
"owner": "kristapsdz",
"repo": "lowdown",
"rev": "6bd668af3fd098bdd07a1bedd399564141e275da",
"type": "github"
},
"original": {
"owner": "kristapsdz",
"repo": "lowdown",
"type": "github"
}
},
"nixpkgs": { "nixpkgs": {
"locked": { "locked": {
"lastModified": 1614309161, "lastModified": 1632864508,
"narHash": "sha256-93kRxDPyEW9QIpxU71kCaV1r+hgOgP6/aVgC7vvO8IU=", "narHash": "sha256-d127FIvGR41XbVRDPVvozUPQ/uRHbHwvfyKHwEt5xFM=",
"owner": "NixOS", "owner": "NixOS",
"repo": "nixpkgs", "repo": "nixpkgs",
"rev": "0e499fde7af3c28d63e9b13636716b86c3162b93", "rev": "82891b5e2c2359d7e58d08849e4c89511ab94234",
"type": "github" "type": "github"
}, },
"original": { "original": {
"id": "nixpkgs", "id": "nixpkgs",
"ref": "nixos-20.09-small", "ref": "nixos-21.05-small",
"type": "indirect" "type": "indirect"
} }
}, },
"root": { "root": {
"inputs": { "inputs": {
"lowdown-src": "lowdown-src",
"nixpkgs": "nixpkgs" "nixpkgs": "nixpkgs"
} }
} }

279
flake.nix
View file

@ -1,10 +1,10 @@
{ {
description = "The purely functional package manager"; description = "The purely functional package manager";
inputs.nixpkgs.url = "nixpkgs/nixos-20.09-small"; inputs.nixpkgs.url = "nixpkgs/nixos-21.05-small";
#inputs.lowdown-src = { url = "github:kristapsdz/lowdown"; flake = false; }; inputs.lowdown-src = { url = "github:kristapsdz/lowdown"; flake = false; };
outputs = { self, nixpkgs }: outputs = { self, nixpkgs, lowdown-src }:
let let
@ -18,7 +18,9 @@
linux64BitSystems = [ "x86_64-linux" "aarch64-linux" ]; linux64BitSystems = [ "x86_64-linux" "aarch64-linux" ];
linuxSystems = linux64BitSystems ++ [ "i686-linux" ]; linuxSystems = linux64BitSystems ++ [ "i686-linux" ];
systems = linuxSystems ++ [ "x86_64-darwin" ]; systems = linuxSystems ++ [ "x86_64-darwin" "aarch64-darwin" ];
crossSystems = [ "armv6l-linux" "armv7l-linux" ];
forAllSystems = f: nixpkgs.lib.genAttrs systems (system: f system); forAllSystems = f: nixpkgs.lib.genAttrs systems (system: f system);
@ -68,7 +70,7 @@
[ [
buildPackages.bison buildPackages.bison
buildPackages.flex buildPackages.flex
(lib.getBin buildPackages.lowdown) (lib.getBin buildPackages.lowdown-nix)
buildPackages.mdbook buildPackages.mdbook
buildPackages.autoconf-archive buildPackages.autoconf-archive
buildPackages.autoreconfHook buildPackages.autoreconfHook
@ -78,21 +80,21 @@
buildPackages.git buildPackages.git
buildPackages.mercurial buildPackages.mercurial
buildPackages.jq buildPackages.jq
]; ]
++ lib.optionals stdenv.hostPlatform.isLinux [(buildPackages.util-linuxMinimal or buildPackages.utillinuxMinimal)];
buildDeps = buildDeps =
[ curl [ curl
bzip2 xz brotli zlib editline bzip2 xz brotli editline
openssl sqlite openssl sqlite
libarchive libarchive
boost boost
nlohmann_json lowdown-nix
lowdown
gmock gmock
] ]
++ lib.optionals stdenv.isLinux [libseccomp (pkgs.util-linuxMinimal or pkgs.utillinuxMinimal)] ++ lib.optionals stdenv.isLinux [libseccomp]
++ lib.optional (stdenv.isLinux || stdenv.isDarwin) libsodium ++ lib.optional (stdenv.isLinux || stdenv.isDarwin) libsodium
++ lib.optional stdenv.isx86_64 libcpuid; ++ lib.optional stdenv.hostPlatform.isx86_64 libcpuid;
awsDeps = lib.optional (stdenv.isLinux || stdenv.isDarwin) awsDeps = lib.optional (stdenv.isLinux || stdenv.isDarwin)
(aws-sdk-cpp.override { (aws-sdk-cpp.override {
@ -101,7 +103,13 @@
}); });
propagatedDeps = propagatedDeps =
[ (boehmgc.override { enableLargeConfig = true; }) [ ((boehmgc.override {
enableLargeConfig = true;
}).overrideAttrs(o: {
patches = (o.patches or []) ++ [
./boehmgc-coroutine-sp-fallback.diff
];
}))
]; ];
perlDeps = perlDeps =
@ -132,10 +140,11 @@
substitute ${./scripts/install.in} $out/install \ substitute ${./scripts/install.in} $out/install \
${pkgs.lib.concatMapStrings ${pkgs.lib.concatMapStrings
(system: (system: let
'' \ tarball = if builtins.elem system crossSystems then self.hydraJobs.binaryTarballCross.x86_64-linux.${system} else self.hydraJobs.binaryTarball.${system};
--replace '@tarballHash_${system}@' $(nix --experimental-features nix-command hash-file --base16 --type sha256 ${self.hydraJobs.binaryTarball.${system}}/*.tar.xz) \ in '' \
--replace '@tarballPath_${system}@' $(tarballPath ${self.hydraJobs.binaryTarball.${system}}/*.tar.xz) \ --replace '@tarballHash_${system}@' $(nix --experimental-features nix-command hash-file --base16 --type sha256 ${tarball}/*.tar.xz) \
--replace '@tarballPath_${system}@' $(tarballPath ${tarball}/*.tar.xz) \
'' ''
) )
systems systems
@ -169,21 +178,92 @@
installPhase = '' installPhase = ''
mkdir -p $out mkdir -p $out
''; '';
installCheckPhase = "make installcheck";
installCheckPhase = "make installcheck -j$NIX_BUILD_CORES -l$NIX_BUILD_CORES";
}; };
binaryTarball = buildPackages: nix: pkgs: let
inherit (pkgs) cacert;
installerClosureInfo = buildPackages.closureInfo { rootPaths = [ nix cacert ]; };
in
buildPackages.runCommand "nix-binary-tarball-${version}"
{ #nativeBuildInputs = lib.optional (system != "aarch64-linux") shellcheck;
meta.description = "Distribution-independent Nix bootstrap binaries for ${pkgs.system}";
}
''
cp ${installerClosureInfo}/registration $TMPDIR/reginfo
cp ${./scripts/create-darwin-volume.sh} $TMPDIR/create-darwin-volume.sh
substitute ${./scripts/install-nix-from-closure.sh} $TMPDIR/install \
--subst-var-by nix ${nix} \
--subst-var-by cacert ${cacert}
substitute ${./scripts/install-darwin-multi-user.sh} $TMPDIR/install-darwin-multi-user.sh \
--subst-var-by nix ${nix} \
--subst-var-by cacert ${cacert}
substitute ${./scripts/install-systemd-multi-user.sh} $TMPDIR/install-systemd-multi-user.sh \
--subst-var-by nix ${nix} \
--subst-var-by cacert ${cacert}
substitute ${./scripts/install-multi-user.sh} $TMPDIR/install-multi-user \
--subst-var-by nix ${nix} \
--subst-var-by cacert ${cacert}
if type -p shellcheck; then
# SC1090: Don't worry about not being able to find
# $nix/etc/profile.d/nix.sh
shellcheck --exclude SC1090 $TMPDIR/install
shellcheck $TMPDIR/create-darwin-volume.sh
shellcheck $TMPDIR/install-darwin-multi-user.sh
shellcheck $TMPDIR/install-systemd-multi-user.sh
# SC1091: Don't panic about not being able to source
# /etc/profile
# SC2002: Ignore "useless cat" "error", when loading
# .reginfo, as the cat is a much cleaner
# implementation, even though it is "useless"
# SC2116: Allow ROOT_HOME=$(echo ~root) for resolving
# root's home directory
shellcheck --external-sources \
--exclude SC1091,SC2002,SC2116 $TMPDIR/install-multi-user
fi
chmod +x $TMPDIR/install
chmod +x $TMPDIR/create-darwin-volume.sh
chmod +x $TMPDIR/install-darwin-multi-user.sh
chmod +x $TMPDIR/install-systemd-multi-user.sh
chmod +x $TMPDIR/install-multi-user
dir=nix-${version}-${pkgs.system}
fn=$out/$dir.tar.xz
mkdir -p $out/nix-support
echo "file binary-dist $fn" >> $out/nix-support/hydra-build-products
tar cvfJ $fn \
--owner=0 --group=0 --mode=u+rw,uga+r \
--absolute-names \
--hard-dereference \
--transform "s,$TMPDIR/install,$dir/install," \
--transform "s,$TMPDIR/create-darwin-volume.sh,$dir/create-darwin-volume.sh," \
--transform "s,$TMPDIR/reginfo,$dir/.reginfo," \
--transform "s,$NIX_STORE,$dir/store,S" \
$TMPDIR/install \
$TMPDIR/create-darwin-volume.sh \
$TMPDIR/install-darwin-multi-user.sh \
$TMPDIR/install-systemd-multi-user.sh \
$TMPDIR/install-multi-user \
$TMPDIR/reginfo \
$(cat ${installerClosureInfo}/store-paths)
'';
in { in {
# A Nixpkgs overlay that overrides the 'nix' and # A Nixpkgs overlay that overrides the 'nix' and
# 'nix.perl-bindings' packages. # 'nix.perl-bindings' packages.
overlay = final: prev: { overlay = final: prev: {
# An older version of Nix to test against when using the daemon.
# Currently using `nixUnstable` as the stable one doesn't respect
# `NIX_DAEMON_SOCKET_PATH` which is needed for the tests.
nixStable = prev.nix; nixStable = prev.nix;
# Forward from the previous stage as we dont want it to pick the lowdown override
nixUnstable = prev.nixUnstable;
nix = with final; with commonDeps pkgs; stdenv.mkDerivation { nix = with final; with commonDeps pkgs; stdenv.mkDerivation {
name = "nix-${version}"; name = "nix-${version}";
inherit version; inherit version;
@ -233,6 +313,8 @@
separateDebugInfo = true; separateDebugInfo = true;
strictDeps = true;
passthru.perl-bindings = with final; stdenv.mkDerivation { passthru.perl-bindings = with final; stdenv.mkDerivation {
name = "nix-perl-${version}"; name = "nix-perl-${version}";
@ -251,9 +333,9 @@
xz xz
pkgs.perl pkgs.perl
boost boost
nlohmann_json
] ]
++ lib.optional (stdenv.isLinux || stdenv.isDarwin) libsodium; ++ lib.optional (stdenv.isLinux || stdenv.isDarwin) libsodium
++ lib.optional stdenv.isDarwin darwin.apple_sdk.frameworks.Security;
configureFlags = '' configureFlags = ''
--with-dbi=${perlPackages.DBI}/${pkgs.perl.libPrefix} --with-dbi=${perlPackages.DBI}/${pkgs.perl.libPrefix}
@ -267,22 +349,17 @@
}; };
lowdown = with final; stdenv.mkDerivation rec { lowdown-nix = with final; stdenv.mkDerivation rec {
name = "lowdown-0.8.0"; name = "lowdown-0.9.0";
src = fetchurl { src = lowdown-src;
url = "https://kristaps.bsd.lv/lowdown/snapshots/${name}.tar.gz";
hash = "sha512-U9WeGoInT9vrawwa57t6u9dEdRge4/P+0wLxmQyOL9nhzOEUU2FRz2Be9H0dCjYE7p2v3vCXIYk40M+jjULATw==";
};
#src = lowdown-src;
outputs = [ "out" "bin" "dev" ]; outputs = [ "out" "bin" "dev" ];
nativeBuildInputs = [ which ]; nativeBuildInputs = [ buildPackages.which ];
configurePhase = configurePhase = ''
'' ${if (stdenv.isDarwin && stdenv.isAarch64) then "echo \"HAVE_SANDBOX_INIT=false\" > configure.local" else ""}
./configure \ ./configure \
PREFIX=${placeholder "dev"} \ PREFIX=${placeholder "dev"} \
BINDIR=${placeholder "bin"}/bin BINDIR=${placeholder "bin"}/bin
@ -298,92 +375,33 @@
buildStatic = nixpkgs.lib.genAttrs linux64BitSystems (system: self.packages.${system}.nix-static); buildStatic = nixpkgs.lib.genAttrs linux64BitSystems (system: self.packages.${system}.nix-static);
buildCross = nixpkgs.lib.genAttrs crossSystems (crossSystem:
nixpkgs.lib.genAttrs ["x86_64-linux"] (system: self.packages.${system}."nix-${crossSystem}"));
# Perl bindings for various platforms. # Perl bindings for various platforms.
perlBindings = nixpkgs.lib.genAttrs systems (system: self.packages.${system}.nix.perl-bindings); perlBindings = nixpkgs.lib.genAttrs systems (system: self.packages.${system}.nix.perl-bindings);
# Binary tarball for various platforms, containing a Nix store # Binary tarball for various platforms, containing a Nix store
# with the closure of 'nix' package, and the second half of # with the closure of 'nix' package, and the second half of
# the installation script. # the installation script.
binaryTarball = nixpkgs.lib.genAttrs systems (system: binaryTarball = nixpkgs.lib.genAttrs systems (system: binaryTarball nixpkgsFor.${system} nixpkgsFor.${system}.nix nixpkgsFor.${system});
with nixpkgsFor.${system}; binaryTarballCross = nixpkgs.lib.genAttrs ["x86_64-linux"] (system: builtins.listToAttrs (map (crossSystem: {
name = crossSystem;
let value = let
installerClosureInfo = closureInfo { rootPaths = [ nix cacert ]; }; nixpkgsCross = import nixpkgs {
in inherit system crossSystem;
overlays = [ self.overlay ];
runCommand "nix-binary-tarball-${version}" };
{ #nativeBuildInputs = lib.optional (system != "aarch64-linux") shellcheck; in binaryTarball nixpkgsFor.${system} self.packages.${system}."nix-${crossSystem}" nixpkgsCross;
meta.description = "Distribution-independent Nix bootstrap binaries for ${system}"; }) crossSystems));
}
''
cp ${installerClosureInfo}/registration $TMPDIR/reginfo
cp ${./scripts/create-darwin-volume.sh} $TMPDIR/create-darwin-volume.sh
substitute ${./scripts/install-nix-from-closure.sh} $TMPDIR/install \
--subst-var-by nix ${nix} \
--subst-var-by cacert ${cacert}
substitute ${./scripts/install-darwin-multi-user.sh} $TMPDIR/install-darwin-multi-user.sh \
--subst-var-by nix ${nix} \
--subst-var-by cacert ${cacert}
substitute ${./scripts/install-systemd-multi-user.sh} $TMPDIR/install-systemd-multi-user.sh \
--subst-var-by nix ${nix} \
--subst-var-by cacert ${cacert}
substitute ${./scripts/install-multi-user.sh} $TMPDIR/install-multi-user \
--subst-var-by nix ${nix} \
--subst-var-by cacert ${cacert}
if type -p shellcheck; then
# SC1090: Don't worry about not being able to find
# $nix/etc/profile.d/nix.sh
shellcheck --exclude SC1090 $TMPDIR/install
shellcheck $TMPDIR/create-darwin-volume.sh
shellcheck $TMPDIR/install-darwin-multi-user.sh
shellcheck $TMPDIR/install-systemd-multi-user.sh
# SC1091: Don't panic about not being able to source
# /etc/profile
# SC2002: Ignore "useless cat" "error", when loading
# .reginfo, as the cat is a much cleaner
# implementation, even though it is "useless"
# SC2116: Allow ROOT_HOME=$(echo ~root) for resolving
# root's home directory
shellcheck --external-sources \
--exclude SC1091,SC2002,SC2116 $TMPDIR/install-multi-user
fi
chmod +x $TMPDIR/install
chmod +x $TMPDIR/create-darwin-volume.sh
chmod +x $TMPDIR/install-darwin-multi-user.sh
chmod +x $TMPDIR/install-systemd-multi-user.sh
chmod +x $TMPDIR/install-multi-user
dir=nix-${version}-${system}
fn=$out/$dir.tar.xz
mkdir -p $out/nix-support
echo "file binary-dist $fn" >> $out/nix-support/hydra-build-products
tar cvfJ $fn \
--owner=0 --group=0 --mode=u+rw,uga+r \
--absolute-names \
--hard-dereference \
--transform "s,$TMPDIR/install,$dir/install," \
--transform "s,$TMPDIR/create-darwin-volume.sh,$dir/create-darwin-volume.sh," \
--transform "s,$TMPDIR/reginfo,$dir/.reginfo," \
--transform "s,$NIX_STORE,$dir/store,S" \
$TMPDIR/install \
$TMPDIR/create-darwin-volume.sh \
$TMPDIR/install-darwin-multi-user.sh \
$TMPDIR/install-systemd-multi-user.sh \
$TMPDIR/install-multi-user \
$TMPDIR/reginfo \
$(cat ${installerClosureInfo}/store-paths)
'');
# The first half of the installation script. This is uploaded # The first half of the installation script. This is uploaded
# to https://nixos.org/nix/install. It downloads the binary # to https://nixos.org/nix/install. It downloads the binary
# tarball for the user's system and calls the second half of the # tarball for the user's system and calls the second half of the
# installation script. # installation script.
installerScript = installScriptFor [ "x86_64-linux" "i686-linux" "x86_64-darwin" "aarch64-linux" ]; installerScript = installScriptFor [ "x86_64-linux" "i686-linux" "aarch64-linux" "x86_64-darwin" "aarch64-darwin" "armv6l-linux" "armv7l-linux" ];
installerScriptForGHA = installScriptFor [ "x86_64-linux" "x86_64-darwin" ]; installerScriptForGHA = installScriptFor [ "x86_64-linux" "x86_64-darwin" "armv6l-linux" "armv7l-linux"];
# Line coverage analysis. # Line coverage analysis.
coverage = coverage =
@ -481,7 +499,7 @@
packages = forAllSystems (system: { packages = forAllSystems (system: {
inherit (nixpkgsFor.${system}) nix; inherit (nixpkgsFor.${system}) nix;
} // nixpkgs.lib.optionalAttrs (builtins.elem system linux64BitSystems) { } // (nixpkgs.lib.optionalAttrs (builtins.elem system linux64BitSystems) {
nix-static = let nix-static = let
nixpkgs = nixpkgsFor.${system}.pkgsStatic; nixpkgs = nixpkgsFor.${system}.pkgsStatic;
in with commonDeps nixpkgs; nixpkgs.stdenv.mkDerivation { in with commonDeps nixpkgs; nixpkgs.stdenv.mkDerivation {
@ -517,8 +535,51 @@
installCheckFlags = "sysconfdir=$(out)/etc"; installCheckFlags = "sysconfdir=$(out)/etc";
stripAllList = ["bin"]; stripAllList = ["bin"];
strictDeps = true;
hardeningDisable = [ "pie" ];
}; };
}); } // builtins.listToAttrs (map (crossSystem: {
name = "nix-${crossSystem}";
value = let
nixpkgsCross = import nixpkgs {
inherit system crossSystem;
overlays = [ self.overlay ];
};
in with commonDeps nixpkgsCross; nixpkgsCross.stdenv.mkDerivation {
name = "nix-${version}";
src = self;
VERSION_SUFFIX = versionSuffix;
outputs = [ "out" "dev" "doc" ];
nativeBuildInputs = nativeBuildDeps;
buildInputs = buildDeps ++ propagatedDeps;
configureFlags = [ "--sysconfdir=/etc" "--disable-doc-gen" ];
enableParallelBuilding = true;
makeFlags = "profiledir=$(out)/etc/profile.d";
doCheck = true;
installFlags = "sysconfdir=$(out)/etc";
postInstall = ''
mkdir -p $doc/nix-support
echo "doc manual $doc/share/doc/nix/manual" >> $doc/nix-support/hydra-build-products
mkdir -p $out/nix-support
echo "file binary-dist $out/bin/nix" >> $out/nix-support/hydra-build-products
'';
doInstallCheck = true;
installCheckFlags = "sysconfdir=$(out)/etc";
};
}) crossSystems)));
defaultPackage = forAllSystems (system: self.packages.${system}.nix); defaultPackage = forAllSystems (system: self.packages.${system}.nix);

View file

@ -83,12 +83,12 @@ sub downloadFile {
if (!-e $tmpFile) { if (!-e $tmpFile) {
print STDERR "downloading $srcFile to $tmpFile...\n"; print STDERR "downloading $srcFile to $tmpFile...\n";
system("NIX_REMOTE=https://cache.nixos.org/ nix cat-store '$srcFile' > '$tmpFile'") == 0 system("NIX_REMOTE=https://cache.nixos.org/ nix store cat '$srcFile' > '$tmpFile'") == 0
or die "unable to fetch $srcFile\n"; or die "unable to fetch $srcFile\n";
} }
my $sha256_expected = $buildInfo->{buildproducts}->{$productNr}->{sha256hash} or die; my $sha256_expected = $buildInfo->{buildproducts}->{$productNr}->{sha256hash} or die;
my $sha256_actual = `nix hash-file --base16 --type sha256 '$tmpFile'`; my $sha256_actual = `nix hash file --base16 --type sha256 '$tmpFile'`;
chomp $sha256_actual; chomp $sha256_actual;
if ($sha256_expected ne $sha256_actual) { if ($sha256_expected ne $sha256_actual) {
print STDERR "file $tmpFile is corrupt, got $sha256_actual, expected $sha256_expected\n"; print STDERR "file $tmpFile is corrupt, got $sha256_actual, expected $sha256_expected\n";
@ -110,6 +110,9 @@ downloadFile("binaryTarball.i686-linux", "1");
downloadFile("binaryTarball.x86_64-linux", "1"); downloadFile("binaryTarball.x86_64-linux", "1");
downloadFile("binaryTarball.aarch64-linux", "1"); downloadFile("binaryTarball.aarch64-linux", "1");
downloadFile("binaryTarball.x86_64-darwin", "1"); downloadFile("binaryTarball.x86_64-darwin", "1");
downloadFile("binaryTarball.aarch64-darwin", "1");
downloadFile("binaryTarballCross.x86_64-linux.armv6l-linux", "1");
downloadFile("binaryTarballCross.x86_64-linux.armv7l-linux", "1");
downloadFile("installerScript", "1"); downloadFile("installerScript", "1");
for my $fn (glob "$tmpDir/*") { for my $fn (glob "$tmpDir/*") {
@ -133,20 +136,8 @@ for my $fn (glob "$tmpDir/*") {
exit if $version =~ /pre/; exit if $version =~ /pre/;
# Update Nixpkgs in a very hacky way. # Update nix-fallback-paths.nix.
system("cd $nixpkgsDir && git pull") == 0 or die; system("cd $nixpkgsDir && git pull") == 0 or die;
my $oldName = `nix-instantiate --eval $nixpkgsDir -A nix.name`; chomp $oldName;
my $oldHash = `nix-instantiate --eval $nixpkgsDir -A nix.src.outputHash`; chomp $oldHash;
print STDERR "old stable version in Nixpkgs = $oldName / $oldHash\n";
my $fn = "$nixpkgsDir/pkgs/tools/package-management/nix/default.nix";
my $oldFile = read_file($fn);
$oldFile =~ s/$oldName/"$releaseName"/g;
$oldFile =~ s/$oldHash/"$tarballHash"/g;
write_file($fn, $oldFile);
$oldName =~ s/nix-//g;
$oldName =~ s/"//g;
sub getStorePath { sub getStorePath {
my ($jobName) = @_; my ($jobName) = @_;
@ -165,9 +156,10 @@ write_file("$nixpkgsDir/nixos/modules/installer/tools/nix-fallback-paths.nix",
" i686-linux = \"" . getStorePath("build.i686-linux") . "\";\n" . " i686-linux = \"" . getStorePath("build.i686-linux") . "\";\n" .
" aarch64-linux = \"" . getStorePath("build.aarch64-linux") . "\";\n" . " aarch64-linux = \"" . getStorePath("build.aarch64-linux") . "\";\n" .
" x86_64-darwin = \"" . getStorePath("build.x86_64-darwin") . "\";\n" . " x86_64-darwin = \"" . getStorePath("build.x86_64-darwin") . "\";\n" .
" aarch64-darwin = \"" . getStorePath("build.aarch64-darwin") . "\";\n" .
"}\n"); "}\n");
system("cd $nixpkgsDir && git commit -a -m 'nix: $oldName -> $version'") == 0 or die; system("cd $nixpkgsDir && git commit -a -m 'nix-fallback-paths.nix: Update to $version'") == 0 or die;
# Update the "latest" symlink. # Update the "latest" symlink.
$channelsBucket->add_key( $channelsBucket->add_key(

37
misc/fish/completion.fish Normal file
View file

@ -0,0 +1,37 @@
function _nix_complete
# Get the current command up to a cursor.
# - Behaves correctly even with pipes and nested in commands like env.
# - TODO: Returns the command verbatim (does not interpolate variables).
# That might not be optimal for arguments like -f.
set -l nix_args (commandline --current-process --tokenize --cut-at-cursor)
# --cut-at-cursor with --tokenize removes the current token so we need to add it separately.
# https://github.com/fish-shell/fish-shell/issues/7375
# Can be an empty string.
set -l current_token (commandline --current-token --cut-at-cursor)
# Nix wants the index of the argv item to complete but the $nix_args variable
# also contains the program name (argv[0]) so we would need to subtract 1.
# But the variable also misses the current token so it cancels out.
set -l nix_arg_to_complete (count $nix_args)
env NIX_GET_COMPLETIONS=$nix_arg_to_complete $nix_args $current_token
end
function _nix_accepts_files
set -l response (_nix_complete)
# First line is either filenames or no-filenames.
test $response[1] = 'filenames'
end
function _nix
set -l response (_nix_complete)
# Skip the first line since it handled by _nix_accepts_files.
# Tail lines each contain a command followed by a tab character and, optionally, a description.
# This is also the format fish expects.
string collect -- $response[2..-1]
end
# Disable file path completion if paths do not belong in the current context.
complete --command nix --condition 'not _nix_accepts_files' --no-files
complete --command nix --arguments '(_nix)'

1
misc/fish/local.mk Normal file
View file

@ -0,0 +1 @@
$(eval $(call install-file-as, $(d)/completion.fish, $(datarootdir)/fish/vendor_completions.d/nix.fish, 0644))

View file

@ -1,4 +1,4 @@
ifeq ($(OS), Darwin) ifdef HOST_DARWIN
$(eval $(call install-data-in, $(d)/org.nixos.nix-daemon.plist, $(prefix)/Library/LaunchDaemons)) $(eval $(call install-data-in, $(d)/org.nixos.nix-daemon.plist, $(prefix)/Library/LaunchDaemons))

View file

@ -19,7 +19,7 @@
<array> <array>
<string>/bin/sh</string> <string>/bin/sh</string>
<string>-c</string> <string>-c</string>
<string>/bin/wait4path /nix/var/nix/profiles/default/bin/nix-daemon &amp;&amp; /nix/var/nix/profiles/default/bin/nix-daemon</string> <string>/bin/wait4path /nix/var/nix/profiles/default/bin/nix-daemon &amp;&amp; exec /nix/var/nix/profiles/default/bin/nix-daemon</string>
</array> </array>
<key>StandardErrorPath</key> <key>StandardErrorPath</key>
<string>/var/log/nix-daemon.log</string> <string>/var/log/nix-daemon.log</string>

View file

@ -1,4 +1,4 @@
ifeq ($(OS), Linux) ifdef HOST_LINUX
$(foreach n, nix-daemon.socket nix-daemon.service, $(eval $(call install-file-in, $(d)/$(n), $(prefix)/lib/systemd/system, 0644))) $(foreach n, nix-daemon.socket nix-daemon.service, $(eval $(call install-file-in, $(d)/$(n), $(prefix)/lib/systemd/system, 0644)))

View file

@ -1,4 +1,4 @@
ifeq ($(OS), Linux) ifdef HOST_LINUX
$(foreach n, nix-daemon.conf, $(eval $(call install-file-in, $(d)/$(n), $(sysconfdir)/init, 0644))) $(foreach n, nix-daemon.conf, $(eval $(call install-file-in, $(d)/$(n), $(sysconfdir)/init, 0644)))

View file

@ -1,3 +1,5 @@
#compdef nix
function _nix() { function _nix() {
local ifs_bk="$IFS" local ifs_bk="$IFS"
local input=("${(Q)words[@]}") local input=("${(Q)words[@]}")
@ -18,4 +20,4 @@ function _nix() {
_describe 'nix' suggestions _describe 'nix' suggestions
} }
compdef _nix nix _nix "$@"

1
misc/zsh/local.mk Normal file
View file

@ -0,0 +1 @@
$(eval $(call install-file-as, $(d)/completion.zsh, $(datarootdir)/zsh/site-functions/_nix, 0644))

View file

@ -10,8 +10,25 @@ bin-scripts :=
noinst-scripts := noinst-scripts :=
man-pages := man-pages :=
install-tests := install-tests :=
OS = $(shell uname -s)
ifdef HOST_OS
HOST_KERNEL = $(firstword $(subst -, ,$(HOST_OS)))
ifeq ($(HOST_KERNEL), cygwin)
HOST_CYGWIN = 1
endif
ifeq ($(patsubst darwin%,,$(HOST_KERNEL)),)
HOST_DARWIN = 1
endif
ifeq ($(patsubst freebsd%,,$(HOST_KERNEL)),)
HOST_FREEBSD = 1
endif
ifeq ($(HOST_KERNEL), linux)
HOST_LINUX = 1
endif
ifeq ($(patsubst solaris%,,$(HOST_KERNEL)),)
HOST_SOLARIS = 1
endif
endif
# Hack to define a literal space. # Hack to define a literal space.
space := space :=
@ -50,16 +67,16 @@ endif
BUILD_SHARED_LIBS ?= 1 BUILD_SHARED_LIBS ?= 1
ifeq ($(BUILD_SHARED_LIBS), 1) ifeq ($(BUILD_SHARED_LIBS), 1)
ifeq (CYGWIN,$(findstring CYGWIN,$(OS))) ifdef HOST_CYGWIN
GLOBAL_CFLAGS += -U__STRICT_ANSI__ -D_GNU_SOURCE GLOBAL_CFLAGS += -U__STRICT_ANSI__ -D_GNU_SOURCE
GLOBAL_CXXFLAGS += -U__STRICT_ANSI__ -D_GNU_SOURCE GLOBAL_CXXFLAGS += -U__STRICT_ANSI__ -D_GNU_SOURCE
else else
GLOBAL_CFLAGS += -fPIC GLOBAL_CFLAGS += -fPIC
GLOBAL_CXXFLAGS += -fPIC GLOBAL_CXXFLAGS += -fPIC
endif endif
ifneq ($(OS), Darwin) ifndef HOST_DARWIN
ifneq ($(OS), SunOS) ifndef HOST_SOLARIS
ifneq ($(OS), FreeBSD) ifndef HOST_FREEBSD
GLOBAL_LDFLAGS += -Wl,--no-copy-dt-needed-entries GLOBAL_LDFLAGS += -Wl,--no-copy-dt-needed-entries
endif endif
endif endif

View file

@ -1,9 +1,9 @@
libs-list := libs-list :=
ifeq ($(OS), Darwin) ifdef HOST_DARWIN
SO_EXT = dylib SO_EXT = dylib
else else
ifeq (CYGWIN,$(findstring CYGWIN,$(OS))) ifdef HOST_CYGWIN
SO_EXT = dll SO_EXT = dll
else else
SO_EXT = so SO_EXT = so
@ -59,7 +59,7 @@ define build-library
$(1)_OBJS := $$(addprefix $(buildprefix), $$(addsuffix .o, $$(basename $$(_srcs)))) $(1)_OBJS := $$(addprefix $(buildprefix), $$(addsuffix .o, $$(basename $$(_srcs))))
_libs := $$(foreach lib, $$($(1)_LIBS), $$($$(lib)_PATH)) _libs := $$(foreach lib, $$($(1)_LIBS), $$($$(lib)_PATH))
ifeq (CYGWIN,$(findstring CYGWIN,$(OS))) ifdef HOST_CYGWIN
$(1)_INSTALL_DIR ?= $$(bindir) $(1)_INSTALL_DIR ?= $$(bindir)
else else
$(1)_INSTALL_DIR ?= $$(libdir) $(1)_INSTALL_DIR ?= $$(libdir)
@ -73,27 +73,27 @@ define build-library
ifeq ($(BUILD_SHARED_LIBS), 1) ifeq ($(BUILD_SHARED_LIBS), 1)
ifdef $(1)_ALLOW_UNDEFINED ifdef $(1)_ALLOW_UNDEFINED
ifeq ($(OS), Darwin) ifdef HOST_DARWIN
$(1)_LDFLAGS += -undefined suppress -flat_namespace $(1)_LDFLAGS += -undefined suppress -flat_namespace
endif endif
else else
ifneq ($(OS), Darwin) ifndef HOST_DARWIN
ifneq (CYGWIN,$(findstring CYGWIN,$(OS))) ifndef HOST_CYGWIN
$(1)_LDFLAGS += -Wl,-z,defs $(1)_LDFLAGS += -Wl,-z,defs
endif endif
endif endif
endif endif
ifneq ($(OS), Darwin) ifndef HOST_DARWIN
$(1)_LDFLAGS += -Wl,-soname=$$($(1)_NAME).$(SO_EXT) $(1)_LDFLAGS += -Wl,-soname=$$($(1)_NAME).$(SO_EXT)
endif endif
$(1)_PATH := $$(_d)/$$($(1)_NAME).$(SO_EXT) $(1)_PATH := $$(_d)/$$($(1)_NAME).$(SO_EXT)
$$($(1)_PATH): $$($(1)_OBJS) $$(_libs) | $$(_d)/ $$($(1)_PATH): $$($(1)_OBJS) $$(_libs) | $$(_d)/
$$(trace-ld) $(CXX) -o $$(abspath $$@) -shared $$(LDFLAGS) $$(GLOBAL_LDFLAGS) $$($(1)_OBJS) $$($(1)_LDFLAGS) $$($(1)_LDFLAGS_PROPAGATED) $$(foreach lib, $$($(1)_LIBS), $$($$(lib)_LDFLAGS_USE)) $$($(1)_LDFLAGS_UNINSTALLED) $$(trace-ld) $(CXX) -o $$(abspath $$@) -shared $$(foreach lib, $$($(1)_LIBS), $$($$(lib)_LDFLAGS_USE)) $$($(1)_LDFLAGS_UNINSTALLED) $$(LDFLAGS) $$(GLOBAL_LDFLAGS) $$($(1)_OBJS) $$($(1)_LDFLAGS) $$($(1)_LDFLAGS_PROPAGATED)
ifneq ($(OS), Darwin) ifndef HOST_DARWIN
$(1)_LDFLAGS_USE += -Wl,-rpath,$$(abspath $$(_d)) $(1)_LDFLAGS_USE += -Wl,-rpath,$$(abspath $$(_d))
endif endif
$(1)_LDFLAGS_USE += -L$$(_d) -l$$(patsubst lib%,%,$$(strip $$($(1)_NAME))) $(1)_LDFLAGS_USE += -L$$(_d) -l$$(patsubst lib%,%,$$(strip $$($(1)_NAME)))
@ -105,10 +105,10 @@ define build-library
$$(eval $$(call create-dir, $$($(1)_INSTALL_DIR))) $$(eval $$(call create-dir, $$($(1)_INSTALL_DIR)))
$$($(1)_INSTALL_PATH): $$($(1)_OBJS) $$(_libs_final) | $(DESTDIR)$$($(1)_INSTALL_DIR)/ $$($(1)_INSTALL_PATH): $$($(1)_OBJS) $$(_libs_final) | $(DESTDIR)$$($(1)_INSTALL_DIR)/
$$(trace-ld) $(CXX) -o $$@ -shared $$(LDFLAGS) $$(GLOBAL_LDFLAGS) $$($(1)_OBJS) $$($(1)_LDFLAGS) $$($(1)_LDFLAGS_PROPAGATED) $$(foreach lib, $$($(1)_LIBS), $$($$(lib)_LDFLAGS_USE_INSTALLED)) $$(trace-ld) $(CXX) -o $$@ -shared $$(foreach lib, $$($(1)_LIBS), $$($$(lib)_LDFLAGS_USE_INSTALLED)) $$(LDFLAGS) $$(GLOBAL_LDFLAGS) $$($(1)_OBJS) $$($(1)_LDFLAGS) $$($(1)_LDFLAGS_PROPAGATED)
$(1)_LDFLAGS_USE_INSTALLED += -L$$(DESTDIR)$$($(1)_INSTALL_DIR) -l$$(patsubst lib%,%,$$(strip $$($(1)_NAME))) $(1)_LDFLAGS_USE_INSTALLED += -L$$(DESTDIR)$$($(1)_INSTALL_DIR) -l$$(patsubst lib%,%,$$(strip $$($(1)_NAME)))
ifneq ($(OS), Darwin) ifndef HOST_DARWIN
ifeq ($(SET_RPATH_TO_LIBS), 1) ifeq ($(SET_RPATH_TO_LIBS), 1)
$(1)_LDFLAGS_USE_INSTALLED += -Wl,-rpath,$$($(1)_INSTALL_DIR) $(1)_LDFLAGS_USE_INSTALLED += -Wl,-rpath,$$($(1)_INSTALL_DIR)
else else

View file

@ -32,7 +32,7 @@ define build-program
$$(eval $$(call create-dir, $$(_d))) $$(eval $$(call create-dir, $$(_d)))
$$($(1)_PATH): $$($(1)_OBJS) $$(_libs) | $$(_d)/ $$($(1)_PATH): $$($(1)_OBJS) $$(_libs) | $$(_d)/
$$(trace-ld) $(CXX) -o $$@ $$(LDFLAGS) $$(GLOBAL_LDFLAGS) $$($(1)_OBJS) $$($(1)_LDFLAGS) $$(foreach lib, $$($(1)_LIBS), $$($$(lib)_LDFLAGS_USE)) $$(trace-ld) $(CXX) -o $$@ $$(foreach lib, $$($(1)_LIBS), $$($$(lib)_LDFLAGS_USE)) $$(LDFLAGS) $$(GLOBAL_LDFLAGS) $$($(1)_OBJS) $$($(1)_LDFLAGS)
$(1)_INSTALL_DIR ?= $$(bindir) $(1)_INSTALL_DIR ?= $$(bindir)
@ -49,7 +49,7 @@ define build-program
_libs_final := $$(foreach lib, $$($(1)_LIBS), $$($$(lib)_INSTALL_PATH)) _libs_final := $$(foreach lib, $$($(1)_LIBS), $$($$(lib)_INSTALL_PATH))
$(DESTDIR)$$($(1)_INSTALL_PATH): $$($(1)_OBJS) $$(_libs_final) | $(DESTDIR)$$($(1)_INSTALL_DIR)/ $(DESTDIR)$$($(1)_INSTALL_PATH): $$($(1)_OBJS) $$(_libs_final) | $(DESTDIR)$$($(1)_INSTALL_DIR)/
$$(trace-ld) $(CXX) -o $$@ $$(LDFLAGS) $$(GLOBAL_LDFLAGS) $$($(1)_OBJS) $$($(1)_LDFLAGS) $$(foreach lib, $$($(1)_LIBS), $$($$(lib)_LDFLAGS_USE_INSTALLED)) $$(trace-ld) $(CXX) -o $$@ $$(foreach lib, $$($(1)_LIBS), $$($$(lib)_LDFLAGS_USE_INSTALLED)) $$(LDFLAGS) $$(GLOBAL_LDFLAGS) $$($(1)_OBJS) $$($(1)_LDFLAGS)
else else

View file

@ -8,10 +8,15 @@ endif
libnixrust_PATH := $(d)/target/$(RUST_DIR)/libnixrust.$(SO_EXT) libnixrust_PATH := $(d)/target/$(RUST_DIR)/libnixrust.$(SO_EXT)
libnixrust_INSTALL_PATH := $(libdir)/libnixrust.$(SO_EXT) libnixrust_INSTALL_PATH := $(libdir)/libnixrust.$(SO_EXT)
libnixrust_LDFLAGS_USE := -L$(d)/target/$(RUST_DIR) -lnixrust -ldl libnixrust_LDFLAGS_USE := -L$(d)/target/$(RUST_DIR) -lnixrust
libnixrust_LDFLAGS_USE_INSTALLED := -L$(libdir) -lnixrust -ldl libnixrust_LDFLAGS_USE_INSTALLED := -L$(libdir) -lnixrust
ifeq ($(OS), Darwin) ifdef HOST_LINUX
libnixrust_LDFLAGS_USE += -ldl
libnixrust_LDFLAGS_USE_INSTALLED += -ldl
endif
ifdef HOST_DARWIN
libnixrust_BUILD_FLAGS = NIX_LDFLAGS="-undefined dynamic_lookup" libnixrust_BUILD_FLAGS = NIX_LDFLAGS="-undefined dynamic_lookup"
else else
libnixrust_LDFLAGS_USE += -Wl,-rpath,$(abspath $(d)/target/$(RUST_DIR)) libnixrust_LDFLAGS_USE += -Wl,-rpath,$(abspath $(d)/target/$(RUST_DIR))
@ -26,7 +31,7 @@ $(libnixrust_PATH): $(call rwildcard, $(d)/src, *.rs) $(d)/Cargo.toml
$(libnixrust_INSTALL_PATH): $(libnixrust_PATH) $(libnixrust_INSTALL_PATH): $(libnixrust_PATH)
$(target-gen) cp $^ $@ $(target-gen) cp $^ $@
ifeq ($(OS), Darwin) ifdef HOST_DARWIN
install_name_tool -id $@ $@ install_name_tool -id $@ $@
endif endif
@ -35,7 +40,7 @@ clean: clean-rust
clean-rust: clean-rust:
$(suppress) rm -rfv nix-rust/target $(suppress) rm -rfv nix-rust/target
ifneq ($(OS), Darwin) ifndef HOST_DARWIN
check: rust-tests check: rust-tests
rust-tests: rust-tests:

View file

@ -1,6 +1,6 @@
makefiles = local.mk makefiles = local.mk
GLOBAL_CXXFLAGS += -g -Wall -std=c++17 GLOBAL_CXXFLAGS += -g -Wall -std=c++17 -I ../src
-include Makefile.config -include Makefile.config

View file

@ -1,3 +1,4 @@
HOST_OS = @host_os@
CC = @CC@ CC = @CC@
CFLAGS = @CFLAGS@ CFLAGS = @CFLAGS@
CXX = @CXX@ CXX = @CXX@

View file

@ -7,6 +7,8 @@ CXXFLAGS=
AC_PROG_CC AC_PROG_CC
AC_PROG_CXX AC_PROG_CXX
AC_CANONICAL_HOST
# Use 64-bit file system calls so that we can support files > 2 GiB. # Use 64-bit file system calls so that we can support files > 2 GiB.
AC_SYS_LARGEFILE AC_SYS_LARGEFILE

View file

@ -22,6 +22,7 @@ our @EXPORT = qw(
derivationFromPath derivationFromPath
addTempRoot addTempRoot
getBinDir getStoreDir getBinDir getStoreDir
queryRawRealisation
); );
our $VERSION = '0.15'; our $VERSION = '0.15';

View file

@ -15,6 +15,7 @@
#include "crypto.hh" #include "crypto.hh"
#include <sodium.h> #include <sodium.h>
#include <nlohmann/json.hpp>
using namespace nix; using namespace nix;
@ -120,6 +121,18 @@ SV * queryPathInfo(char * path, int base32)
croak("%s", e.what()); croak("%s", e.what());
} }
SV * queryRawRealisation(char * outputId)
PPCODE:
try {
auto realisation = store()->queryRealisation(DrvOutput::parse(outputId));
if (realisation)
XPUSHs(sv_2mortal(newSVpv(realisation->toJSON().dump().c_str(), 0)));
else
XPUSHs(sv_2mortal(newSVpv("", 0)));
} catch (Error & e) {
croak("%s", e.what());
}
SV * queryPathFromHashPart(char * hashPart) SV * queryPathFromHashPart(char * hashPart)
PPCODE: PPCODE:

View file

@ -28,7 +28,7 @@ Store_CXXFLAGS = \
Store_LDFLAGS := $(SODIUM_LIBS) $(NIX_LIBS) Store_LDFLAGS := $(SODIUM_LIBS) $(NIX_LIBS)
ifeq (CYGWIN,$(findstring CYGWIN,$(OS))) ifdef HOST_CYGWIN
archlib = $(shell perl -E 'use Config; print $$Config{archlib};') archlib = $(shell perl -E 'use Config; print $$Config{archlib};')
libperl = $(shell perl -E 'use Config; print $$Config{libperl};') libperl = $(shell perl -E 'use Config; print $$Config{libperl};')
Store_LDFLAGS += $(shell find ${archlib} -name ${libperl}) Store_LDFLAGS += $(shell find ${archlib} -name ${libperl})

View file

@ -1,33 +1,262 @@
#!/bin/sh #!/usr/bin/env bash
set -e set -eu
set -o pipefail
root_disk() { # I'm a little agnostic on the choices, but supporting a wide
diskutil info -plist / # slate of uses for now, including:
} # - import-only: `. create-darwin-volume.sh no-main[ ...]`
# - legacy: `./create-darwin-volume.sh` or `. create-darwin-volume.sh`
# (both will run main())
# - external alt-routine: `./create-darwin-volume.sh no-main func[ ...]`
if [ "${1-}" = "no-main" ]; then
shift
readonly _CREATE_VOLUME_NO_MAIN=1
else
readonly _CREATE_VOLUME_NO_MAIN=0
# declare some things we expect to inherit from install-multi-user
# I don't love this (because it's a bit of a kludge).
#
# CAUTION: (Dec 19 2020)
# This is a stopgap. It doesn't cover the full slate of
# identifiers we inherit--just those necessary to:
# - avoid breaking direct invocations of this script (here/now)
# - avoid hard-to-reverse structural changes before the call to rm
# single-user support is verified
#
# In the near-mid term, I (personally) think we should:
# - decide to deprecate the direct call and add a notice
# - fold all of this into install-darwin-multi-user.sh
# - intentionally remove the old direct-invocation form (kill the
# routine, replace this script w/ deprecation notice and a note
# on the remove-after date)
#
readonly NIX_ROOT="${NIX_ROOT:-/nix}"
# i.e., "disk1" _sudo() {
shift # throw away the 'explanation'
/usr/bin/sudo "$@"
}
failure() {
if [ "$*" = "" ]; then
cat
else
echo "$@"
fi
exit 1
}
task() {
echo "$@"
}
fi
# usually "disk1"
root_disk_identifier() { root_disk_identifier() {
diskutil info -plist / | xmllint --xpath "/plist/dict/key[text()='ParentWholeDisk']/following-sibling::string[1]/text()" - # For performance (~10ms vs 280ms) I'm parsing 'diskX' from stat output
# (~diskXsY)--but I'm retaining the more-semantic approach since
# it documents intent better.
# /usr/sbin/diskutil info -plist / | xmllint --xpath "/plist/dict/key[text()='ParentWholeDisk']/following-sibling::string[1]/text()" -
#
local special_device
special_device="$(/usr/bin/stat -f "%Sd" /)"
echo "${special_device%s[0-9]*}"
} }
find_nix_volume() { # make it easy to play w/ 'Case-sensitive APFS'
diskutil apfs list -plist "$1" | xmllint --xpath "(/plist/dict/array/dict/key[text()='Volumes']/following-sibling::array/dict/key[text()='Name']/following-sibling::string[starts-with(translate(text(),'N','n'),'nix')]/text())[1]" - 2>/dev/null || true readonly NIX_VOLUME_FS="${NIX_VOLUME_FS:-APFS}"
readonly NIX_VOLUME_LABEL="${NIX_VOLUME_LABEL:-Nix Store}"
# Strongly assuming we'll make a volume on the device / is on
# But you can override NIX_VOLUME_USE_DISK to create it on some other device
readonly NIX_VOLUME_USE_DISK="${NIX_VOLUME_USE_DISK:-$(root_disk_identifier)}"
NIX_VOLUME_USE_SPECIAL="${NIX_VOLUME_USE_SPECIAL:-}"
NIX_VOLUME_USE_UUID="${NIX_VOLUME_USE_UUID:-}"
readonly NIX_VOLUME_MOUNTD_DEST="${NIX_VOLUME_MOUNTD_DEST:-/Library/LaunchDaemons/org.nixos.darwin-store.plist}"
if /usr/bin/fdesetup isactive >/dev/null; then
test_filevault_in_use() { return 0; }
# no readonly; we may modify if user refuses from cure_volume
NIX_VOLUME_DO_ENCRYPT="${NIX_VOLUME_DO_ENCRYPT:-1}"
else
test_filevault_in_use() { return 1; }
NIX_VOLUME_DO_ENCRYPT="${NIX_VOLUME_DO_ENCRYPT:-0}"
fi
should_encrypt_volume() {
test_filevault_in_use && (( NIX_VOLUME_DO_ENCRYPT == 1 ))
}
substep() {
printf " %s\n" "" "- $1" "" "${@:2}"
}
volumes_labeled() {
local label="$1"
xsltproc --novalid --stringparam label "$label" - <(/usr/sbin/ioreg -ra -c "AppleAPFSVolume") <<'EOF'
<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform" version="1.0">
<xsl:output method="text"/>
<xsl:template match="/">
<xsl:apply-templates select="/plist/array/dict/key[text()='IORegistryEntryName']/following-sibling::*[1][text()=$label]/.."/>
</xsl:template>
<xsl:template match="dict">
<xsl:apply-templates match="string" select="key[text()='BSD Name']/following-sibling::*[1]"/>
<xsl:text>=</xsl:text>
<xsl:apply-templates match="string" select="key[text()='UUID']/following-sibling::*[1]"/>
<xsl:text>&#xA;</xsl:text>
</xsl:template>
</xsl:stylesheet>
EOF
# I cut label out of the extracted values, but here it is for reference:
# <xsl:apply-templates match="string" select="key[text()='IORegistryEntryName']/following-sibling::*[1]"/>
# <xsl:text>=</xsl:text>
}
right_disk() {
local volume_special="$1" # (i.e., disk1s7)
[[ "$volume_special" == "$NIX_VOLUME_USE_DISK"s* ]]
}
right_volume() {
local volume_special="$1" # (i.e., disk1s7)
# if set, it must match; otherwise ensure it's on the right disk
if [ -z "$NIX_VOLUME_USE_SPECIAL" ]; then
if right_disk "$volume_special"; then
NIX_VOLUME_USE_SPECIAL="$volume_special" # latch on
return 0
else
return 1
fi
else
[ "$volume_special" = "$NIX_VOLUME_USE_SPECIAL" ]
fi
}
right_uuid() {
local volume_uuid="$1"
# if set, it must match; otherwise allow
if [ -z "$NIX_VOLUME_USE_UUID" ]; then
NIX_VOLUME_USE_UUID="$volume_uuid" # latch on
return 0
else
[ "$volume_uuid" = "$NIX_VOLUME_USE_UUID" ]
fi
}
cure_volumes() {
local found volume special uuid
# loop just in case they have more than one volume
# (nothing stops you from doing this)
for volume in $(volumes_labeled "$NIX_VOLUME_LABEL"); do
# CAUTION: this could (maybe) be a more normal read
# loop like:
# while IFS== read -r special uuid; do
# # ...
# done <<<"$(volumes_labeled "$NIX_VOLUME_LABEL")"
#
# I did it with for to skirt a problem with the obvious
# pattern replacing stdin and causing user prompts
# inside (which also use read and access stdin) to skip
#
# If there's an existing encrypted volume we can't find
# in keychain, the user never gets prompted to delete
# the volume, and the install fails.
#
# If you change this, a human needs to test a very
# specific scenario: you already have an encrypted
# Nix Store volume, and have deleted its credential
# from keychain. Ensure the script asks you if it can
# delete the volume, and then prompts for your sudo
# password to confirm.
#
# shellcheck disable=SC1097
IFS== read -r special uuid <<< "$volume"
# take the first one that's on the right disk
if [ -z "${found:-}" ]; then
if right_volume "$special" && right_uuid "$uuid"; then
cure_volume "$special" "$uuid"
found="${special} (${uuid})"
else
warning <<EOF
Ignoring ${special} (${uuid}) because I am looking for:
disk=${NIX_VOLUME_USE_DISK} special=${NIX_VOLUME_USE_SPECIAL:-${NIX_VOLUME_USE_DISK}sX} uuid=${NIX_VOLUME_USE_UUID:-any}
EOF
# TODO: give chance to delete if ! headless?
fi
else
warning <<EOF
Ignoring ${special} (${uuid}), already found target: $found
EOF
# TODO reminder? I feel like I want one
# idiom that reminds some warnings, or warns
# some reminders?
# TODO: if ! headless, chance to delete?
fi
done
if [ -z "${found:-}" ]; then
readonly NIX_VOLUME_USE_SPECIAL NIX_VOLUME_USE_UUID
fi
}
volume_encrypted() {
local volume_special="$1" # (i.e., disk1s7)
# Trying to match the first line of output; known first lines:
# No cryptographic users for <special>
# Cryptographic user for <special> (1 found)
# Cryptographic users for <special> (2 found)
/usr/sbin/diskutil apfs listCryptoUsers -plist "$volume_special" | /usr/bin/grep -q APFSCryptoUserUUID
} }
test_fstab() { test_fstab() {
grep -q "/nix apfs rw" /etc/fstab 2>/dev/null /usr/bin/grep -q "$NIX_ROOT apfs rw" /etc/fstab 2>/dev/null
} }
test_nix_symlink() { test_nix_root_is_symlink() {
[ -L "/nix" ] || grep -q "^nix." /etc/synthetic.conf 2>/dev/null [ -L "$NIX_ROOT" ]
} }
test_synthetic_conf() { test_synthetic_conf_either(){
grep -q "^nix$" /etc/synthetic.conf 2>/dev/null /usr/bin/grep -qE "^${NIX_ROOT:1}($|\t.{3,}$)" /etc/synthetic.conf 2>/dev/null
}
test_synthetic_conf_mountable() {
/usr/bin/grep -q "^${NIX_ROOT:1}$" /etc/synthetic.conf 2>/dev/null
}
test_synthetic_conf_symlinked() {
/usr/bin/grep -qE "^${NIX_ROOT:1}\t.{3,}$" /etc/synthetic.conf 2>/dev/null
}
test_nix_volume_mountd_installed() {
test -e "$NIX_VOLUME_MOUNTD_DEST"
}
# current volume password
test_keychain_by_uuid() {
local volume_uuid="$1"
# Note: doesn't need sudo just to check; doesn't output pw
security find-generic-password -s "$volume_uuid" &>/dev/null
}
get_volume_pass() {
local volume_uuid="$1"
_sudo \
"to confirm keychain has a password that unlocks this volume" \
security find-generic-password -s "$volume_uuid" -w
}
verify_volume_pass() {
local volume_special="$1" # (i.e., disk1s7)
local volume_uuid="$2"
/usr/sbin/diskutil apfs unlockVolume "$volume_special" -verify -stdinpassphrase -user "$volume_uuid"
}
volume_pass_works() {
local volume_special="$1" # (i.e., disk1s7)
local volume_uuid="$2"
get_volume_pass "$volume_uuid" | verify_volume_pass "$volume_special" "$volume_uuid"
} }
# Create the paths defined in synthetic.conf, saving us a reboot. # Create the paths defined in synthetic.conf, saving us a reboot.
create_synthetic_objects(){ create_synthetic_objects() {
# Big Sur takes away the -B flag we were using and replaces it # Big Sur takes away the -B flag we were using and replaces it
# with a -t flag that appears to do the same thing (but they # with a -t flag that appears to do the same thing (but they
# don't behave exactly the same way in terms of return values). # don't behave exactly the same way in terms of return values).
@ -41,129 +270,575 @@ create_synthetic_objects(){
} }
test_nix() { test_nix() {
test -d "/nix" test -d "$NIX_ROOT"
} }
test_t2_chip_present(){ test_voldaemon() {
# Use xartutil to see if system has a t2 chip. test -f "$NIX_VOLUME_MOUNTD_DEST"
#
# This isn't well-documented on its own; until it is,
# let's keep track of knowledge/assumptions.
#
# Warnings:
# - Don't search "xart" if porn will cause you trouble :)
# - Other xartutil flags do dangerous things. Don't run them
# naively. If you must, search "xartutil" first.
#
# Assumptions:
# - the "xART session seeds recovery utility"
# appears to interact with xartstorageremoted
# - `sudo xartutil --list` lists xART sessions
# and their seeds and exits 0 if successful. If
# not, it exits 1 and prints an error such as:
# xartutil: ERROR: No supported link to the SEP present
# - xART sessions/seeds are present when a T2 chip is
# (and not, otherwise)
# - the presence of a T2 chip means a newly-created
# volume on the primary drive will be
# encrypted at rest
# - all together: `sudo xartutil --list`
# should exit 0 if a new Nix Store volume will
# be encrypted at rest, and exit 1 if not.
sudo xartutil --list >/dev/null 2>/dev/null
} }
test_filevault_in_use() { generate_mount_command() {
fdesetup isactive >/dev/null local cmd_type="$1" # encrypted|unencrypted
local volume_uuid mountpoint cmd=()
printf -v volume_uuid "%q" "$2"
printf -v mountpoint "%q" "$NIX_ROOT"
case "$cmd_type" in
encrypted)
cmd=(/bin/sh -c "/usr/bin/security find-generic-password -s '$volume_uuid' -w | /usr/sbin/diskutil apfs unlockVolume '$volume_uuid' -mountpoint '$mountpoint' -stdinpassphrase");;
unencrypted)
cmd=(/usr/sbin/diskutil mount -mountPoint "$mountpoint" "$volume_uuid");;
*)
failure "Invalid first arg $cmd_type to generate_mount_command";;
esac
printf " <string>%s</string>\n" "${cmd[@]}"
} }
# use after error msg for conditions we don't understand generate_mount_daemon() {
suggest_report_error(){ local cmd_type="$1" # encrypted|unencrypted
# ex "error: something sad happened :(" >&2 local volume_uuid="$2"
echo " please report this @ https://github.com/nixos/nix/issues" >&2 cat <<EOF
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE plist PUBLIC "-//Apple Computer//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
<plist version="1.0">
<dict>
<key>RunAtLoad</key>
<true/>
<key>Label</key>
<string>org.nixos.darwin-store</string>
<key>ProgramArguments</key>
<array>
$(generate_mount_command "$cmd_type" "$volume_uuid")
</array>
</dict>
</plist>
EOF
} }
main() { _eat_bootout_err() {
( /usr/bin/grep -v "Boot-out failed: 36: Operation now in progress"
echo "" }
echo " ------------------------------------------------------------------ "
echo " | This installer will create a volume for the nix store and |"
echo " | configure it to mount at /nix. Follow these steps to uninstall. |"
echo " ------------------------------------------------------------------ "
echo ""
echo " 1. Remove the entry from fstab using 'sudo vifs'"
echo " 2. Destroy the data volume using 'diskutil apfs deleteVolume'"
echo " 3. Remove the 'nix' line from /etc/synthetic.conf or the file"
echo ""
) >&2
if test_nix_symlink; then # TODO: remove with --uninstall?
echo "error: /nix is a symlink, please remove it and make sure it's not in synthetic.conf (in which case a reboot is required)" >&2 uninstall_launch_daemon_directions() {
echo " /nix -> $(readlink "/nix")" >&2 local daemon_label="$1" # i.e., org.nixos.blah-blah
exit 2 local daemon_plist="$2" # abspath
substep "Uninstall LaunchDaemon $daemon_label" \
" sudo launchctl bootout system/$daemon_label" \
" sudo rm $daemon_plist"
}
uninstall_launch_daemon_prompt() {
local daemon_label="$1" # i.e., org.nixos.blah-blah
local daemon_plist="$2" # abspath
local reason_for_daemon="$3"
cat <<EOF
The installer adds a LaunchDaemon to $reason_for_daemon: $daemon_label
EOF
if ui_confirm "Can I remove it?"; then
_sudo "to terminate the daemon" \
launchctl bootout "system/$daemon_label" 2> >(_eat_bootout_err >&2) || true
# this can "fail" with a message like:
# Boot-out failed: 36: Operation now in progress
_sudo "to remove the daemon definition" rm "$daemon_plist"
fi
}
nix_volume_mountd_uninstall_directions() {
uninstall_launch_daemon_directions "org.nixos.darwin-store" \
"$NIX_VOLUME_MOUNTD_DEST"
}
nix_volume_mountd_uninstall_prompt() {
uninstall_launch_daemon_prompt "org.nixos.darwin-store" \
"$NIX_VOLUME_MOUNTD_DEST" \
"mount your Nix volume"
}
# TODO: move nix_daemon to install-darwin-multi-user if/when uninstall_launch_daemon_prompt moves up to install-multi-user
nix_daemon_uninstall_prompt() {
uninstall_launch_daemon_prompt "org.nixos.nix-daemon" \
"$NIX_DAEMON_DEST" \
"run the nix-daemon"
}
# TODO: remove with --uninstall?
nix_daemon_uninstall_directions() {
uninstall_launch_daemon_directions "org.nixos.nix-daemon" \
"$NIX_DAEMON_DEST"
}
# TODO: remove with --uninstall?
synthetic_conf_uninstall_directions() {
# :1 to strip leading slash
substep "Remove ${NIX_ROOT:1} from /etc/synthetic.conf" \
" If nix is the only entry: sudo rm /etc/synthetic.conf" \
" Otherwise: sudo /usr/bin/sed -i '' -e '/^${NIX_ROOT:1}$/d' /etc/synthetic.conf"
}
synthetic_conf_uninstall_prompt() {
cat <<EOF
During install, I add '${NIX_ROOT:1}' to /etc/synthetic.conf, which instructs
macOS to create an empty root directory for mounting the Nix volume.
EOF
# make the edit to a copy
/usr/bin/grep -vE "^${NIX_ROOT:1}($|\t.{3,}$)" /etc/synthetic.conf > "$SCRATCH/synthetic.conf.edit"
if test_synthetic_conf_symlinked; then
warning <<EOF
/etc/synthetic.conf already contains a line instructing your system
to make '${NIX_ROOT}' as a symlink:
$(/usr/bin/grep -nE "^${NIX_ROOT:1}\t.{3,}$" /etc/synthetic.conf)
This may mean your system has/had a non-standard Nix install.
The volume-creation process in this installer is *not* compatible
with a symlinked store, so I'll have to remove this instruction to
continue.
If you want/need to keep this instruction, answer 'n' to abort.
EOF
fi fi
if ! test_synthetic_conf; then # ask to rm if this left the file empty aside from comments, else edit
echo "Configuring /etc/synthetic.conf..." >&2 if /usr/bin/diff -q <(:) <(/usr/bin/grep -v "^#" "$SCRATCH/synthetic.conf.edit") &>/dev/null; then
echo nix | sudo tee -a /etc/synthetic.conf if confirm_rm "/etc/synthetic.conf"; then
if ! test_synthetic_conf; then if test_nix_root_is_symlink; then
echo "error: failed to configure synthetic.conf;" >&2 failure >&2 <<EOF
suggest_report_error I removed /etc/synthetic.conf, but $NIX_ROOT is already a symlink
exit 1 (-> $(readlink "$NIX_ROOT")). The system should remove it when you reboot.
Once you've rebooted, run the installer again.
EOF
fi
return 0
fi
else
if confirm_edit "$SCRATCH/synthetic.conf.edit" "/etc/synthetic.conf"; then
if test_nix_root_is_symlink; then
failure >&2 <<EOF
I edited Nix out of /etc/synthetic.conf, but $NIX_ROOT is already a symlink
(-> $(readlink "$NIX_ROOT")). The system should remove it when you reboot.
Once you've rebooted, run the installer again.
EOF
fi
return 0
fi fi
fi fi
# fallback instructions
echo "Manually remove nix from /etc/synthetic.conf"
return 1
}
if ! test_nix; then add_nix_vol_fstab_line() {
echo "Creating mountpoint for /nix..." >&2 local uuid="$1"
create_synthetic_objects # the ones we defined in synthetic.conf # shellcheck disable=SC1003,SC2026
if ! test_nix; then local escaped_mountpoint="${NIX_ROOT/ /'\\\'040}"
sudo mkdir -p /nix 2>/dev/null || true shift
EDITOR="/usr/bin/ex" _sudo "to add nix to fstab" "$@" <<EOF
:a
UUID=$uuid $escaped_mountpoint apfs rw,noauto,nobrowse,suid,owners
.
:x
EOF
# TODO: preserving my notes on suid,owners above until resolved
# There *may* be some issue regarding volume ownership, see nix#3156
#
# It seems like the cheapest fix is adding "suid,owners" to fstab, but:
# - We don't have much info on this condition yet
# - I'm not certain if these cause other problems?
# - There's a "chown" component some people claim to need to fix this
# that I don't understand yet
# (Note however that I've had to add a chown step to handle
# single->multi-user reinstalls, which may cover this)
#
# I'm not sure if it's safe to approach this way?
#
# I think I think the most-proper way to test for it is:
# diskutil info -plist "$NIX_VOLUME_LABEL" | xmllint --xpath "(/plist/dict/key[text()='GlobalPermissionsEnabled'])/following-sibling::*[1][name()='true']" -; echo $?
#
# There's also `sudo /usr/sbin/vsdbutil -c /path` (which is much faster, but is also
# deprecated and needs minor parsing).
#
# If no one finds a problem with doing so, I think the simplest approach
# is to just eagerly set this. I found a few imperative approaches:
# (diskutil enableOwnership, ~100ms), a cheap one (/usr/sbin/vsdbutil -a, ~40-50ms),
# a very cheap one (append the internal format to /var/db/volinfo.database).
#
# But vsdbutil's deprecation notice suggests using fstab, so I want to
# give that a whirl first.
#
# TODO: when this is workable, poke infinisil about reproducing the issue
# and confirming this fix?
}
delete_nix_vol_fstab_line() {
# TODO: I'm scaffolding this to handle the new nix volumes
# but it might be nice to generalize a smidge further to
# go ahead and set up a pattern for curing "old" things
# we no longer do?
EDITOR="/usr/bin/patch" _sudo "to cut nix from fstab" "$@" < <(/usr/bin/diff /etc/fstab <(/usr/bin/grep -v "$NIX_ROOT apfs rw" /etc/fstab))
# leaving some parts out of the grep; people may fiddle this a little?
}
# TODO: hope to remove with --uninstall
fstab_uninstall_directions() {
substep "Remove ${NIX_ROOT} from /etc/fstab" \
" If nix is the only entry: sudo rm /etc/fstab" \
" Otherwise, run 'sudo /usr/sbin/vifs' to remove the nix line"
}
fstab_uninstall_prompt() {
cat <<EOF
During install, I add '${NIX_ROOT}' to /etc/fstab so that macOS knows what
mount options to use for the Nix volume.
EOF
cp /etc/fstab "$SCRATCH/fstab.edit"
# technically doesn't need the _sudo path, but throwing away the
# output is probably better than mostly-duplicating the code...
delete_nix_vol_fstab_line patch "$SCRATCH/fstab.edit" &>/dev/null
# if the patch test edit, minus comment lines, is equal to empty (:)
if /usr/bin/diff -q <(:) <(/usr/bin/grep -v "^#" "$SCRATCH/fstab.edit") &>/dev/null; then
# this edit would leave it empty; propose deleting it
if confirm_rm "/etc/fstab"; then
return 0
else
echo "Remove nix from /etc/fstab (or remove the file)"
fi fi
if ! test_nix; then else
echo "error: failed to bootstrap /nix; if a reboot doesn't help," >&2 echo "I might be able to help you make this edit. Here's the diff:"
suggest_report_error if ! _diff "/etc/fstab" "$SCRATCH/fstab.edit" && ui_confirm "Does the change above look right?"; then
exit 1 delete_nix_vol_fstab_line /usr/sbin/vifs
else
echo "Remove nix from /etc/fstab (or remove the file)"
fi fi
fi fi
}
disk="$(root_disk_identifier)" remove_volume() {
volume=$(find_nix_volume "$disk") local volume_special="$1" # (i.e., disk1s7)
if [ -z "$volume" ]; then _sudo "to unmount the Nix volume" \
echo "Creating a Nix Store volume..." >&2 /usr/sbin/diskutil unmount force "$volume_special" || true # might not be mounted
_sudo "to delete the Nix volume" \
/usr/sbin/diskutil apfs deleteVolume "$volume_special"
}
if test_filevault_in_use; then # aspiration: robust enough to both fix problems
# TODO: Not sure if it's in-scope now, but `diskutil apfs list` # *and* update older darwin volumes
# shows both filevault and encrypted at rest status, and it cure_volume() {
# may be the more semantic way to test for this? It'll show local volume_special="$1" # (i.e., disk1s7)
# `FileVault: No (Encrypted at rest)` local volume_uuid="$2"
# `FileVault: No` header "Found existing Nix volume"
# `FileVault: Yes (Unlocked)` row " special" "$volume_special"
# and so on. row " uuid" "$volume_uuid"
if test_t2_chip_present; then
echo "warning: boot volume is FileVault-encrypted, but the Nix store volume" >&2 if volume_encrypted "$volume_special"; then
echo " is only encrypted at rest." >&2 row "encrypted" "yes"
echo " See https://nixos.org/nix/manual/#sect-macos-installation" >&2 if volume_pass_works "$volume_special" "$volume_uuid"; then
NIX_VOLUME_DO_ENCRYPT=0
ok "Found a working decryption password in keychain :)"
echo ""
else
# - this is a volume we made, and
# - the user encrypted it on their own
# - something deleted the credential
# - this is an old or BYO volume and the pw
# just isn't somewhere we can find it.
#
# We're going to explain why we're freaking out
# and prompt them to either delete the volume
# (requiring a sudo auth), or abort to fix
warning <<EOF
This volume is encrypted, but I don't see a password to decrypt it.
The quick fix is to let me delete this volume and make you a new one.
If that's okay, enter your (sudo) password to continue. If not, you
can ensure the decryption password is in your system keychain with a
"Where" (service) field set to this volume's UUID:
$volume_uuid
EOF
if password_confirm "delete this volume"; then
remove_volume "$volume_special"
else else
echo "error: refusing to create Nix store volume because the boot volume is" >&2 # TODO: this is a good design case for a warn-and
echo " FileVault encrypted, but encryption-at-rest is not available." >&2 # remind idiom...
echo " Manually create a volume for the store and re-run this script." >&2 failure <<EOF
echo " See https://nixos.org/nix/manual/#sect-macos-installation" >&2 Your Nix volume is encrypted, but I couldn't find its password. Either:
exit 1 - Delete or rename the volume out of the way
- Ensure its decryption password is in the system keychain with a
"Where" (service) field set to this volume's UUID:
$volume_uuid
EOF
fi
fi
elif test_filevault_in_use; then
row "encrypted" "no"
warning <<EOF
FileVault is on, but your $NIX_VOLUME_LABEL volume isn't encrypted.
EOF
# if we're interactive, give them a chance to
# encrypt the volume. If not, /shrug
if ! headless && (( NIX_VOLUME_DO_ENCRYPT == 1 )); then
if ui_confirm "Should I encrypt it and add the decryption key to your keychain?"; then
encrypt_volume "$volume_uuid" "$NIX_VOLUME_LABEL"
NIX_VOLUME_DO_ENCRYPT=0
else
NIX_VOLUME_DO_ENCRYPT=0
reminder "FileVault is on, but your $NIX_VOLUME_LABEL volume isn't encrypted."
fi fi
fi fi
sudo diskutil apfs addVolume "$disk" APFS 'Nix Store' -mountpoint /nix
volume="Nix Store"
else else
echo "Using existing '$volume' volume" >&2 row "encrypted" "no"
fi
if ! test_fstab; then
echo "Configuring /etc/fstab..." >&2
label=$(echo "$volume" | sed 's/ /\\040/g')
# shellcheck disable=SC2209
printf "\$a\nLABEL=%s /nix apfs rw,nobrowse\n.\nwq\n" "$label" | EDITOR=ed sudo vifs
fi fi
} }
main "$@" remove_volume_artifacts() {
if test_synthetic_conf_either; then
# NIX_ROOT is in synthetic.conf
if synthetic_conf_uninstall_prompt; then
# TODO: moot until we tackle uninstall, but when we're
# actually uninstalling, we should issue:
# reminder "macOS will clean up the empty mount-point directory at $NIX_ROOT on reboot."
:
fi
fi
if test_fstab; then
fstab_uninstall_prompt
fi
if test_nix_volume_mountd_installed; then
nix_volume_mountd_uninstall_prompt
fi
}
setup_synthetic_conf() {
if test_nix_root_is_symlink; then
if ! test_synthetic_conf_symlinked; then
failure >&2 <<EOF
error: $NIX_ROOT is a symlink (-> $(readlink "$NIX_ROOT")).
Please remove it. If nix is in /etc/synthetic.conf, remove it and reboot.
EOF
fi
fi
if ! test_synthetic_conf_mountable; then
task "Configuring /etc/synthetic.conf to make a mount-point at $NIX_ROOT" >&2
# technically /etc/synthetic.d/nix is supported in Big Sur+
# but handling both takes even more code...
_sudo "to add Nix to /etc/synthetic.conf" \
/usr/bin/ex /etc/synthetic.conf <<EOF
:a
${NIX_ROOT:1}
.
:x
EOF
if ! test_synthetic_conf_mountable; then
failure "error: failed to configure synthetic.conf" >&2
fi
create_synthetic_objects
if ! test_nix; then
failure >&2 <<EOF
error: failed to bootstrap $NIX_ROOT
If you enabled FileVault after booting, this is likely a known issue
with macOS that you'll have to reboot to fix. If you didn't enable FV,
though, please open an issue describing how the system that you see
this error on was set up.
EOF
fi
fi
}
setup_fstab() {
local volume_uuid="$1"
# fstab used to be responsible for mounting the volume. Now the last
# step adds a LaunchDaemon responsible for mounting. This is technically
# redundant for mounting, but diskutil appears to pick up mount options
# from fstab (and diskutil's support for specifying them directly is not
# consistent across versions/subcommands).
if ! test_fstab; then
task "Configuring /etc/fstab to specify volume mount options" >&2
add_nix_vol_fstab_line "$volume_uuid" /usr/sbin/vifs
fi
}
encrypt_volume() {
local volume_uuid="$1"
local volume_label="$2"
local password
# Note: mount/unmount are late additions to support the right order
# of operations for creating the volume and then baking its uuid into
# other artifacts; not as well-trod wrt to potential errors, race
# conditions, etc.
/usr/sbin/diskutil mount "$volume_label"
password="$(/usr/bin/xxd -l 32 -p -c 256 /dev/random)"
_sudo "to add your Nix volume's password to Keychain" \
/usr/bin/security -i <<EOF
add-generic-password -a "$volume_label" -s "$volume_uuid" -l "$volume_label encryption password" -D "Encrypted volume password" -j "Added automatically by the Nix installer for use by $NIX_VOLUME_MOUNTD_DEST" -w "$password" -T /System/Library/CoreServices/APFSUserAgent -T /System/Library/CoreServices/CSUserAgent -T /usr/bin/security "/Library/Keychains/System.keychain"
EOF
builtin printf "%s" "$password" | _sudo "to encrypt your Nix volume" \
/usr/sbin/diskutil apfs encryptVolume "$volume_label" -user disk -stdinpassphrase
/usr/sbin/diskutil unmount force "$volume_label"
}
create_volume() {
# Notes:
# 1) using `-nomount` instead of `-mountpoint "$NIX_ROOT"` to get
# its UUID and set mount opts in fstab before first mount
#
# 2) system is in some sense less secure than user keychain... (it's
# possible to read the password for decrypting the keychain) but
# the user keychain appears to be available too late. As far as I
# can tell, the file with this password (/var/db/SystemKey) is
# inside the FileVault envelope. If that isn't true, it may make
# sense to store the password inside the envelope?
#
# 3) At some point it would be ideal to have a small binary to serve
# as the daemon itself, and for it to replace /usr/bin/security here.
#
# 4) *UserAgent exemptions should let the system seamlessly supply the
# password if noauto is removed from fstab entry. This is intentional;
# the user will hopefully look for help if the volume stops mounting,
# rather than failing over into subtle race-condition problems.
#
# 5) If we ever get users griping about not having space to do
# anything useful with Nix, it is possibly to specify
# `-reserve 10g` or something, which will fail w/o that much
#
# 6) getting special w/ awk may be fragile, but doing it to:
# - save time over running slow diskutil commands
# - skirt risk we grab wrong volume if multiple match
_sudo "to create a new APFS volume '$NIX_VOLUME_LABEL' on $NIX_VOLUME_USE_DISK" \
/usr/sbin/diskutil apfs addVolume "$NIX_VOLUME_USE_DISK" "$NIX_VOLUME_FS" "$NIX_VOLUME_LABEL" -nomount | /usr/bin/awk '/Created new APFS Volume/ {print $5}'
}
volume_uuid_from_special() {
local volume_special="$1" # (i.e., disk1s7)
# For reasons I won't pretend to fathom, this returns 253 when it works
/System/Library/Filesystems/apfs.fs/Contents/Resources/apfs.util -k "$volume_special" || true
}
# this sometimes clears immediately, and AFAIK clears
# within about 1s. diskutil info on an unmounted path
# fails in around 50-100ms and a match takes about
# 250-300ms. I suspect it's usually ~250-750ms
await_volume() {
# caution: this could, in theory, get stuck
until /usr/sbin/diskutil info "$NIX_ROOT" &>/dev/null; do
:
done
}
setup_volume() {
local use_special use_uuid profile_packages
task "Creating a Nix volume" >&2
use_special="${NIX_VOLUME_USE_SPECIAL:-$(create_volume)}"
use_uuid=${NIX_VOLUME_USE_UUID:-$(volume_uuid_from_special "$use_special")}
setup_fstab "$use_uuid"
if should_encrypt_volume; then
encrypt_volume "$use_uuid" "$NIX_VOLUME_LABEL"
setup_volume_daemon "encrypted" "$use_uuid"
# TODO: might be able to save ~60ms by caching or setting
# this somewhere rather than re-checking here.
elif volume_encrypted "$use_special"; then
setup_volume_daemon "encrypted" "$use_uuid"
else
setup_volume_daemon "unencrypted" "$use_uuid"
fi
await_volume
if [ "$(/usr/sbin/diskutil info -plist "$NIX_ROOT" | xmllint --xpath "(/plist/dict/key[text()='GlobalPermissionsEnabled'])/following-sibling::*[1]" -)" = "<false/>" ]; then
_sudo "to set enableOwnership (enabling users to own files)" \
/usr/sbin/diskutil enableOwnership "$NIX_ROOT"
fi
# TODO: below is a vague kludge for now; I just don't know
# what if any safe action there is to take here. Also, the
# reminder isn't very helpful.
# I'm less sure where this belongs, but it also wants mounted, pre-install
if type -p nix-env; then
profile_packages="$(nix-env --query --installed)"
# TODO: can probably do below faster w/ read
# intentionally unquoted string to eat whitespace in wc output
# shellcheck disable=SC2046,SC2059
if ! [ $(printf "$profile_packages" | /usr/bin/wc -l) = "0" ]; then
reminder <<EOF
Nix now supports only multi-user installs on Darwin/macOS, and your user's
Nix profile has some packages in it. These packages may obscure those in the
default profile, including the Nix this installer will add. You should
review these packages:
$profile_packages
EOF
fi
fi
}
setup_volume_daemon() {
local cmd_type="$1" # encrypted|unencrypted
local volume_uuid="$2"
if ! test_voldaemon; then
task "Configuring LaunchDaemon to mount '$NIX_VOLUME_LABEL'" >&2
_sudo "to install the Nix volume mounter" /usr/bin/ex "$NIX_VOLUME_MOUNTD_DEST" <<EOF
:a
$(generate_mount_daemon "$cmd_type" "$volume_uuid")
.
:x
EOF
# TODO: should probably alert the user if this is disabled?
_sudo "to launch the Nix volume mounter" \
launchctl bootstrap system "$NIX_VOLUME_MOUNTD_DEST" || true
# TODO: confirm whether kickstart is necessesary?
# I feel a little superstitous, but it can guard
# against multiple problems (doesn't start, old
# version still running for some reason...)
_sudo "to launch the Nix volume mounter" \
launchctl kickstart -k system/org.nixos.darwin-store
fi
}
setup_darwin_volume() {
setup_synthetic_conf
setup_volume
}
if [ "$_CREATE_VOLUME_NO_MAIN" = 1 ]; then
if [ -n "$*" ]; then
"$@" # expose functions in case we want multiple routines?
fi
else
# no reason to pay for bash to process this
main() {
{
echo ""
echo " ------------------------------------------------------------------ "
echo " | This installer will create a volume for the nix store and |"
echo " | configure it to mount at $NIX_ROOT. Follow these steps to uninstall. |"
echo " ------------------------------------------------------------------ "
echo ""
echo " 1. Remove the entry from fstab using 'sudo /usr/sbin/vifs'"
echo " 2. Run 'sudo launchctl bootout system/org.nixos.darwin-store'"
echo " 3. Remove $NIX_VOLUME_MOUNTD_DEST"
echo " 4. Destroy the data volume using '/usr/sbin/diskutil apfs deleteVolume'"
echo " 5. Remove the 'nix' line from /etc/synthetic.conf (or the file)"
echo ""
} >&2
setup_darwin_volume
}
main "$@"
fi

View file

@ -3,59 +3,110 @@
set -eu set -eu
set -o pipefail set -o pipefail
readonly PLIST_DEST=/Library/LaunchDaemons/org.nixos.nix-daemon.plist readonly NIX_DAEMON_DEST=/Library/LaunchDaemons/org.nixos.nix-daemon.plist
# create by default; set 0 to DIY, use a symlink, etc.
readonly NIX_VOLUME_CREATE=${NIX_VOLUME_CREATE:-1} # now default
NIX_FIRST_BUILD_UID="301" NIX_FIRST_BUILD_UID="301"
NIX_BUILD_USER_NAME_TEMPLATE="_nixbld%d" NIX_BUILD_USER_NAME_TEMPLATE="_nixbld%d"
dsclattr() { # caution: may update times on / if not run as normal non-root user
/usr/bin/dscl . -read "$1" \ read_only_root() {
| awk "/$2/ { print \$2 }" # this touch command ~should~ always produce an error
# as of this change I confirmed /usr/bin/touch emits:
# "touch: /: Operation not permitted" Monterey
# "touch: /: Read-only file system" Catalina+ and Big Sur
# "touch: /: Permission denied" Mojave
# (not matching prefix for compat w/ coreutils touch in case using
# an explicit path causes problems; its prefix differs)
case "$(/usr/bin/touch / 2>&1)" in
*"Read-only file system") # Catalina, Big Sur
return 0
;;
*"Operation not permitted") # Monterey
return 0
;;
*)
return 1
;;
esac
# Avoiding the slow semantic way to get this information (~330ms vs ~8ms)
# unless using touch causes problems. Just in case, that approach is:
# diskutil info -plist / | <find the Writable or WritableVolume keys>, i.e.
# diskutil info -plist / | xmllint --xpath "name(/plist/dict/key[text()='Writable']/following-sibling::*[1])" -
} }
poly_validate_assumptions() { if read_only_root && [ "$NIX_VOLUME_CREATE" = 1 ]; then
if [ "$(uname -s)" != "Darwin" ]; then should_create_volume() { return 0; }
failure "This script is for use with macOS!" else
should_create_volume() { return 1; }
fi
# shellcheck source=./create-darwin-volume.sh
. "$EXTRACTED_NIX_PATH/create-darwin-volume.sh" "no-main"
dsclattr() {
/usr/bin/dscl . -read "$1" \
| /usr/bin/awk "/$2/ { print \$2 }"
}
test_nix_daemon_installed() {
test -e "$NIX_DAEMON_DEST"
}
poly_cure_artifacts() {
if should_create_volume; then
task "Fixing any leftover Nix volume state"
cat <<EOF
Before I try to install, I'll check for any existing Nix volume config
and ask for your permission to remove it (so that the installer can
start fresh). I'll also ask for permission to fix any issues I spot.
EOF
cure_volumes
remove_volume_artifacts
fi fi
} }
poly_service_installed_check() { poly_service_installed_check() {
[ -e "$PLIST_DEST" ] if should_create_volume; then
test_nix_daemon_installed || test_nix_volume_mountd_installed
else
test_nix_daemon_installed
fi
} }
poly_service_uninstall_directions() { poly_service_uninstall_directions() {
cat <<EOF echo "$1. Remove macOS-specific components:"
$1. Delete $PLIST_DEST if should_create_volume && test_nix_volume_mountd_installed; then
darwin_volume_uninstall_directions
sudo launchctl unload $PLIST_DEST fi
sudo rm $PLIST_DEST if test_nix_daemon_installed; then
nix_daemon_uninstall_directions
EOF fi
} }
poly_service_setup_note() { poly_service_setup_note() {
cat <<EOF if should_create_volume; then
- load and start a LaunchDaemon (at $PLIST_DEST) for nix-daemon echo " - create a Nix volume and a LaunchDaemon to mount it"
fi
EOF echo " - create a LaunchDaemon (at $NIX_DAEMON_DEST) for nix-daemon"
echo ""
} }
poly_extra_try_me_commands(){ poly_extra_try_me_commands() {
: :
}
poly_extra_setup_instructions(){
:
} }
poly_configure_nix_daemon_service() { poly_configure_nix_daemon_service() {
task "Setting up the nix-daemon LaunchDaemon"
_sudo "to set up the nix-daemon as a LaunchDaemon" \ _sudo "to set up the nix-daemon as a LaunchDaemon" \
cp -f "/nix/var/nix/profiles/default$PLIST_DEST" "$PLIST_DEST" /bin/cp -f "/nix/var/nix/profiles/default$NIX_DAEMON_DEST" "$NIX_DAEMON_DEST"
_sudo "to load the LaunchDaemon plist for nix-daemon" \ _sudo "to load the LaunchDaemon plist for nix-daemon" \
launchctl load /Library/LaunchDaemons/org.nixos.nix-daemon.plist launchctl load /Library/LaunchDaemons/org.nixos.nix-daemon.plist
_sudo "to start the nix-daemon" \ _sudo "to start the nix-daemon" \
launchctl start org.nixos.nix-daemon launchctl kickstart -k system/org.nixos.nix-daemon
} }
poly_group_exists() { poly_group_exists() {
@ -96,6 +147,8 @@ poly_user_home_get() {
} }
poly_user_home_set() { poly_user_home_set() {
# This can trigger a permission prompt now:
# "Terminal" would like to administer your computer. Administration can include modifying passwords, networking, and system settings.
_sudo "in order to give $1 a safe home directory" \ _sudo "in order to give $1 a safe home directory" \
/usr/bin/dscl . -create "/Users/$1" "NFSHomeDirectory" "$2" /usr/bin/dscl . -create "/Users/$1" "NFSHomeDirectory" "$2"
} }
@ -121,7 +174,7 @@ poly_user_shell_set() {
poly_user_in_group_check() { poly_user_in_group_check() {
username=$1 username=$1
group=$2 group=$2
dseditgroup -o checkmember -m "$username" "$group" > /dev/null 2>&1 /usr/sbin/dseditgroup -o checkmember -m "$username" "$group" > /dev/null 2>&1
} }
poly_user_in_group_set() { poly_user_in_group_set() {
@ -151,3 +204,21 @@ poly_create_build_user() {
/usr/bin/dscl . create "/Users/$username" \ /usr/bin/dscl . create "/Users/$username" \
UniqueID "${uid}" UniqueID "${uid}"
} }
poly_prepare_to_install() {
if should_create_volume; then
header "Preparing a Nix volume"
# intentional indent below to match task indent
cat <<EOF
Nix traditionally stores its data in the root directory $NIX_ROOT, but
macOS now (starting in 10.15 Catalina) has a read-only root directory.
To support Nix, I will create a volume and configure macOS to mount it
at $NIX_ROOT.
EOF
setup_darwin_volume
fi
if [ "$(diskutil info -plist /nix | xmllint --xpath "(/plist/dict/key[text()='GlobalPermissionsEnabled'])/following-sibling::*[1]" -)" = "<false/>" ]; then
failure "This script needs a /nix volume with global permissions! This may require running sudo diskutil enableOwnership /nix."
fi
}

View file

@ -33,7 +33,7 @@ NIX_BUILD_USER_NAME_TEMPLATE="nixbld%d"
readonly NIX_ROOT="/nix" readonly NIX_ROOT="/nix"
readonly NIX_EXTRA_CONF=${NIX_EXTRA_CONF:-} readonly NIX_EXTRA_CONF=${NIX_EXTRA_CONF:-}
readonly PROFILE_TARGETS=("/etc/bashrc" "/etc/profile.d/nix.sh" "/etc/zshenv" "/etc/bash.bashrc" "/etc/zsh/zshenv") readonly PROFILE_TARGETS=("/etc/bashrc" "/etc/profile.d/nix.sh" "/etc/zshrc" "/etc/bash.bashrc" "/etc/zsh/zshrc")
readonly PROFILE_BACKUP_SUFFIX=".backup-before-nix" readonly PROFILE_BACKUP_SUFFIX=".backup-before-nix"
readonly PROFILE_NIX_FILE="$NIX_ROOT/var/nix/profiles/default/etc/profile.d/nix-daemon.sh" readonly PROFILE_NIX_FILE="$NIX_ROOT/var/nix/profiles/default/etc/profile.d/nix-daemon.sh"
@ -43,7 +43,7 @@ readonly NIX_INSTALLED_CACERT="@cacert@"
#readonly NIX_INSTALLED_CACERT="/nix/store/7dxhzymvy330i28ii676fl1pqwcahv2f-nss-cacert-3.49.2" #readonly NIX_INSTALLED_CACERT="/nix/store/7dxhzymvy330i28ii676fl1pqwcahv2f-nss-cacert-3.49.2"
readonly EXTRACTED_NIX_PATH="$(dirname "$0")" readonly EXTRACTED_NIX_PATH="$(dirname "$0")"
readonly ROOT_HOME=$(echo ~root) readonly ROOT_HOME=~root
if [ -t 0 ]; then if [ -t 0 ]; then
readonly IS_HEADLESS='no' readonly IS_HEADLESS='no'
@ -59,14 +59,19 @@ headless() {
fi fi
} }
contactme() { contact_us() {
echo "You can open an issue at https://github.com/nixos/nix/issues"
echo ""
echo "Or feel free to contact the team:"
echo " - Matrix: #nix:nixos.org"
echo " - IRC: in #nixos on irc.libera.chat"
echo " - twitter: @nixos_org"
echo " - forum: https://discourse.nixos.org"
}
get_help() {
echo "We'd love to help if you need it." echo "We'd love to help if you need it."
echo "" echo ""
echo "If you can, open an issue at https://github.com/nixos/nix/issues" contact_us
echo ""
echo "Or feel free to contact the team,"
echo " - on IRC #nixos on irc.freenode.net"
echo " - on twitter @nixos_org"
} }
uninstall_directions() { uninstall_directions() {
@ -102,7 +107,6 @@ $step. Delete the files Nix added to your system:
and that is it. and that is it.
EOF EOF
} }
nix_user_for_core() { nix_user_for_core() {
@ -170,7 +174,7 @@ failure() {
header "oh no!" header "oh no!"
_textout "$RED" "$@" _textout "$RED" "$@"
echo "" echo ""
_textout "$RED" "$(contactme)" _textout "$RED" "$(get_help)"
trap finish_cleanup EXIT trap finish_cleanup EXIT
exit 1 exit 1
} }
@ -201,6 +205,95 @@ ui_confirm() {
return 1 return 1
} }
printf -v _UNCHANGED_GRP_FMT "%b" $'\033[2m%='"$ESC" # "dim"
# bold+invert+red and bold+invert+green just for the +/- below
# red/green foreground for rest of the line
printf -v _OLD_LINE_FMT "%b" $'\033[1;7;31m-'"$ESC ${RED}%L${ESC}"
printf -v _NEW_LINE_FMT "%b" $'\033[1;7;32m+'"$ESC ${GREEN}%L${ESC}"
_diff() {
# simple colorized diff comatible w/ pre `--color` versions
diff --unchanged-group-format="$_UNCHANGED_GRP_FMT" --old-line-format="$_OLD_LINE_FMT" --new-line-format="$_NEW_LINE_FMT" --unchanged-line-format=" %L" "$@"
}
confirm_rm() {
local path="$1"
if ui_confirm "Can I remove $path?"; then
_sudo "to remove $path" rm "$path"
fi
}
confirm_edit() {
local path="$1"
local edit_path="$2"
cat <<EOF
Nix isn't the only thing in $path,
but I think I know how to edit it out.
Here's the diff:
EOF
# could technically test the diff, but caller should do it
_diff "$path" "$edit_path"
if ui_confirm "Does the change above look right?"; then
_sudo "remove nix from $path" cp "$edit_path" "$path"
fi
}
_SERIOUS_BUSINESS="${RED}%s:${ESC} "
password_confirm() {
local do_something_consequential="$1"
if ui_confirm "Can I $do_something_consequential?"; then
# shellcheck disable=SC2059
sudo -kv --prompt="$(printf "${_SERIOUS_BUSINESS}" "Enter your password to $do_something_consequential")"
else
return 1
fi
}
# Support accumulating reminders over the course of a run and showing
# them at the end. An example where this helps: the installer changes
# something, but it won't work without a reboot. If you tell the user
# when you do it, they may miss it in the stream. The value of the
# setting isn't enough to decide whether to message because you only
# need to message if you *changed* it.
# reminders stored in array delimited by empty entry; if ! headless,
# user is asked to confirm after each delimiter.
_reminders=()
((_remind_num=1))
remind() {
# (( arithmetic expression ))
if (( _remind_num > 1 )); then
header "Reminders"
for line in "${_reminders[@]}"; do
echo "$line"
if ! headless && [ "${#line}" = 0 ]; then
if read -r -p "Press enter/return to acknowledge."; then
printf $'\033[A\33[2K\r'
fi
fi
done
fi
}
reminder() {
printf -v label "${BLUE}[ %d ]${ESC}" "$_remind_num"
_reminders+=("$label")
if [[ "$*" = "" ]]; then
while read -r line; do
_reminders+=("$line")
done
else
# this expands each arg to an array entry (and each entry will
# ultimately be a separate line in the output)
_reminders+=("$@")
fi
_reminders+=("")
((_remind_num++))
}
__sudo() { __sudo() {
local expl="$1" local expl="$1"
local cmd="$2" local cmd="$2"
@ -221,18 +314,18 @@ _sudo() {
local expl="$1" local expl="$1"
shift shift
if ! headless; then if ! headless; then
__sudo "$expl" "$*" __sudo "$expl" "$*" >&2
fi fi
sudo "$@" sudo "$@"
} }
readonly SCRATCH=$(mktemp -d -t tmp.XXXXXXXXXX) readonly SCRATCH=$(mktemp -d "${TMPDIR:-/tmp/}tmp.XXXXXXXXXX")
function finish_cleanup { finish_cleanup() {
rm -rf "$SCRATCH" rm -rf "$SCRATCH"
} }
function finish_fail { finish_fail() {
finish_cleanup finish_cleanup
failure <<EOF failure <<EOF
@ -244,45 +337,46 @@ EOF
} }
trap finish_fail EXIT trap finish_fail EXIT
channel_update_failed=0 finish_success() {
function finish_success {
finish_cleanup
ok "Alright! We're done!" ok "Alright! We're done!"
if [ "x$channel_update_failed" = x1 ]; then
echo ""
echo "But fetching the nixpkgs channel failed. (Are you offline?)"
echo "To try again later, run \"sudo -i nix-channel --update nixpkgs\"."
fi
cat <<EOF cat <<EOF
Before Nix will work in your existing shells, you'll need to close
them and open them again. Other than that, you should be ready to go.
Try it! Open a new terminal, and type: Try it! Open a new terminal, and type:
$(poly_extra_try_me_commands) $(poly_extra_try_me_commands)
$ nix-shell -p nix-info --run "nix-info -m" $ nix-shell -p nix-info --run "nix-info -m"
$(poly_extra_setup_instructions)
Thank you for using this installer. If you have any feedback, don't
hesitate:
$(contactme) Thank you for using this installer. If you have any feedback or need
help, don't hesitate:
$(contact_us)
EOF EOF
remind
finish_cleanup
} }
finish_uninstall_success() {
ok "Alright! Nix should be removed!"
cat <<EOF
If you spot anything this uninstaller missed or have feedback,
don't hesitate:
$(contact_us)
EOF
remind
finish_cleanup
}
remove_nix_artifacts() {
failure "Not implemented yet"
}
cure_artifacts() {
poly_cure_artifacts
# remove_nix_artifacts (LATER)
}
validate_starting_assumptions() { validate_starting_assumptions() {
poly_validate_assumptions
if [ $EUID -eq 0 ]; then
failure <<EOF
Please do not run this script with root privileges. We will call sudo
when we need to.
EOF
fi
if type nix-env 2> /dev/null >&2; then if type nix-env 2> /dev/null >&2; then
warning <<EOF warning <<EOF
Nix already appears to be installed. This installer may run into issues. Nix already appears to be installed. This installer may run into issues.
@ -444,18 +538,46 @@ create_build_users() {
create_directories() { create_directories() {
# FIXME: remove all of this because it duplicates LocalStore::LocalStore(). # FIXME: remove all of this because it duplicates LocalStore::LocalStore().
task "Setting up the basic directory structure"
if [ -d "$NIX_ROOT" ]; then
# if /nix already exists, take ownership
#
# Caution: notes below are macOS-y
# This is a bit of a goldilocks zone for taking ownership
# if there are already files on the volume; the volume is
# now mounted, but we haven't added a bunch of new files
# this is probably a bit slow; I've been seeing 3.3-4s even
# when promptly installed over a fresh single-user install.
# In case anyone's aware of a shortcut.
# `|| true`: .Trashes errors w/o full disk perm
# rumor per #4488 that macOS 11.2 may not have
# sbin on path, and that's where chown is, but
# since this bit is cross-platform:
# - first try with `command -vp` to try and find
# chown in the usual places
# - fall back on `command -v` which would find
# any chown on path
# if we don't find one, the command is already
# hiding behind || true, and the general state
# should be one the user can repair once they
# figure out where chown is...
local get_chr_own="$(command -vp chown)"
if [[ -z "$get_chr_own" ]]; then
get_chr_own="$(command -v chown)"
fi
_sudo "to take root ownership of existing Nix store files" \
"$get_chr_own" -R "root:$NIX_BUILD_GROUP_NAME" "$NIX_ROOT" || true
fi
_sudo "to make the basic directory structure of Nix (part 1)" \ _sudo "to make the basic directory structure of Nix (part 1)" \
mkdir -pv -m 0755 /nix /nix/var /nix/var/log /nix/var/log/nix /nix/var/log/nix/drvs /nix/var/nix{,/db,/gcroots,/profiles,/temproots,/userpool} /nix/var/nix/{gcroots,profiles}/per-user install -dv -m 0755 /nix /nix/var /nix/var/log /nix/var/log/nix /nix/var/log/nix/drvs /nix/var/nix{,/db,/gcroots,/profiles,/temproots,/userpool} /nix/var/nix/{gcroots,profiles}/per-user
_sudo "to make the basic directory structure of Nix (part 2)" \ _sudo "to make the basic directory structure of Nix (part 2)" \
mkdir -pv -m 1775 /nix/store install -dv -g "$NIX_BUILD_GROUP_NAME" -m 1775 /nix/store
_sudo "to make the basic directory structure of Nix (part 3)" \
chgrp "$NIX_BUILD_GROUP_NAME" /nix/store
_sudo "to place the default nix daemon configuration (part 1)" \ _sudo "to place the default nix daemon configuration (part 1)" \
mkdir -pv -m 0555 /etc/nix install -dv -m 0555 /etc/nix
} }
place_channel_configuration() { place_channel_configuration() {
@ -475,7 +597,7 @@ This installation tool will set up your computer with the Nix package
manager. This will happen in a few stages: manager. This will happen in a few stages:
1. Make sure your computer doesn't already have Nix. If it does, I 1. Make sure your computer doesn't already have Nix. If it does, I
will show you instructions on how to clean up your old one. will show you instructions on how to clean up your old install.
2. Show you what we are going to install and where. Then we will ask 2. Show you what we are going to install and where. Then we will ask
if you are ready to continue. if you are ready to continue.
@ -574,11 +696,15 @@ EOF
} }
install_from_extracted_nix() { install_from_extracted_nix() {
task "Installing Nix"
( (
cd "$EXTRACTED_NIX_PATH" cd "$EXTRACTED_NIX_PATH"
_sudo "to copy the basic Nix files to the new store at $NIX_ROOT/store" \ _sudo "to copy the basic Nix files to the new store at $NIX_ROOT/store" \
rsync -rlpt --chmod=-w ./store/* "$NIX_ROOT/store/" cp -RLp ./store/* "$NIX_ROOT/store/"
_sudo "to make the new store non-writable at $NIX_ROOT/store" \
chmod -R ugo-w "$NIX_ROOT/store/"
if [ -d "$NIX_INSTALLED_NIX" ]; then if [ -d "$NIX_INSTALLED_NIX" ]; then
echo " Alright! We have our first nix at $NIX_INSTALLED_NIX" echo " Alright! We have our first nix at $NIX_INSTALLED_NIX"
@ -589,9 +715,8 @@ $NIX_INSTALLED_NIX.
EOF EOF
fi fi
cat ./.reginfo \ _sudo "to load data for the first time in to the Nix Database" \
| _sudo "to load data for the first time in to the Nix Database" \ "$NIX_INSTALLED_NIX/bin/nix-store" --load-db < ./.reginfo
"$NIX_INSTALLED_NIX/bin/nix-store" --load-db
echo " Just finished getting the nix database ready." echo " Just finished getting the nix database ready."
) )
@ -610,6 +735,7 @@ EOF
} }
configure_shell_profile() { configure_shell_profile() {
task "Setting up shell profiles: ${PROFILE_TARGETS[*]}"
for profile_target in "${PROFILE_TARGETS[@]}"; do for profile_target in "${PROFILE_TARGETS[@]}"; do
if [ -e "$profile_target" ]; then if [ -e "$profile_target" ]; then
_sudo "to back up your current $profile_target to $profile_target$PROFILE_BACKUP_SUFFIX" \ _sudo "to back up your current $profile_target to $profile_target$PROFILE_BACKUP_SUFFIX" \
@ -629,14 +755,27 @@ configure_shell_profile() {
tee -a "$profile_target" tee -a "$profile_target"
fi fi
done done
# TODO: should we suggest '. $PROFILE_NIX_FILE'? It would get them on
# their way less disruptively, but a counter-argument is that they won't
# immediately notice if something didn't get set up right?
reminder "Nix won't work in active shell sessions until you restart them."
}
cert_in_store() {
# in a subshell
# - change into the cert-file dir
# - get the phyiscal pwd
# and test if this path is in the Nix store
[[ "$(cd -- "$(dirname "$NIX_SSL_CERT_FILE")" && exec pwd -P)" == "$NIX_ROOT/store/"* ]]
} }
setup_default_profile() { setup_default_profile() {
_sudo "to installing a bootstrapping Nix in to the default Profile" \ task "Setting up the default profile"
_sudo "to install a bootstrapping Nix in to the default profile" \
HOME="$ROOT_HOME" "$NIX_INSTALLED_NIX/bin/nix-env" -i "$NIX_INSTALLED_NIX" HOME="$ROOT_HOME" "$NIX_INSTALLED_NIX/bin/nix-env" -i "$NIX_INSTALLED_NIX"
if [ -z "${NIX_SSL_CERT_FILE:-}" ] || ! [ -f "${NIX_SSL_CERT_FILE:-}" ]; then if [ -z "${NIX_SSL_CERT_FILE:-}" ] || ! [ -f "${NIX_SSL_CERT_FILE:-}" ] || cert_in_store; then
_sudo "to installing a bootstrapping SSL certificate just for Nix in to the default Profile" \ _sudo "to install a bootstrapping SSL certificate just for Nix in to the default profile" \
HOME="$ROOT_HOME" "$NIX_INSTALLED_NIX/bin/nix-env" -i "$NIX_INSTALLED_CACERT" HOME="$ROOT_HOME" "$NIX_INSTALLED_NIX/bin/nix-env" -i "$NIX_INSTALLED_CACERT"
export NIX_SSL_CERT_FILE=/nix/var/nix/profiles/default/etc/ssl/certs/ca-bundle.crt export NIX_SSL_CERT_FILE=/nix/var/nix/profiles/default/etc/ssl/certs/ca-bundle.crt
fi fi
@ -645,9 +784,13 @@ setup_default_profile() {
# Have to explicitly pass NIX_SSL_CERT_FILE as part of the sudo call, # Have to explicitly pass NIX_SSL_CERT_FILE as part of the sudo call,
# otherwise it will be lost in environments where sudo doesn't pass # otherwise it will be lost in environments where sudo doesn't pass
# all the environment variables by default. # all the environment variables by default.
_sudo "to update the default channel in the default profile" \ if ! _sudo "to update the default channel in the default profile" \
HOME="$ROOT_HOME" NIX_SSL_CERT_FILE="$NIX_SSL_CERT_FILE" "$NIX_INSTALLED_NIX/bin/nix-channel" --update nixpkgs \ HOME="$ROOT_HOME" NIX_SSL_CERT_FILE="$NIX_SSL_CERT_FILE" "$NIX_INSTALLED_NIX/bin/nix-channel" --update nixpkgs; then
|| channel_update_failed=1 reminder <<EOF
I had trouble fetching the nixpkgs channel (are you offline?)
To try again later, run: sudo -i nix-channel --update nixpkgs
EOF
fi
fi fi
} }
@ -662,6 +805,17 @@ EOF
} }
main() { main() {
# TODO: I've moved this out of validate_starting_assumptions so we
# can fail faster in this case. Sourcing install-darwin... now runs
# `touch /` to detect Read-only root, but it could update times on
# pre-Catalina macOS if run as root user.
if [ $EUID -eq 0 ]; then
failure <<EOF
Please do not run this script with root privileges. We will call sudo
when we need to.
EOF
fi
if [ "$(uname -s)" = "Darwin" ]; then if [ "$(uname -s)" = "Darwin" ]; then
# shellcheck source=./install-darwin-multi-user.sh # shellcheck source=./install-darwin-multi-user.sh
. "$EXTRACTED_NIX_PATH/install-darwin-multi-user.sh" . "$EXTRACTED_NIX_PATH/install-darwin-multi-user.sh"
@ -675,17 +829,24 @@ main() {
welcome_to_nix welcome_to_nix
chat_about_sudo chat_about_sudo
cure_artifacts
# TODO: there's a tension between cure and validate. I moved the
# the sudo/root check out of validate to the head of this func.
# Cure is *intended* to subsume the validate-and-abort approach,
# so it may eventually obsolete it.
validate_starting_assumptions validate_starting_assumptions
setup_report setup_report
if ! ui_confirm "Ready to continue?"; then if ! ui_confirm "Ready to continue?"; then
ok "Alright, no changes have been made :)" ok "Alright, no changes have been made :)"
contactme get_help
trap finish_cleanup EXIT trap finish_cleanup EXIT
exit 1 exit 1
fi fi
poly_prepare_to_install
create_build_group create_build_group
create_build_users create_build_users
create_directories create_directories
@ -695,6 +856,7 @@ main() {
configure_shell_profile configure_shell_profile
set +eu set +eu
# shellcheck disable=SC1091
. /etc/profile . /etc/profile
set -eu set -eu
@ -706,5 +868,20 @@ main() {
trap finish_success EXIT trap finish_success EXIT
} }
# set an empty initial arg for bare invocations in case we need to
# disambiguate someone directly invoking this later.
if [ "${#@}" = 0 ]; then
set ""
fi
main # ACTION for override
case "${1-}" in
# uninstall)
# shift
# uninstall "$@";;
# install == same as the no-arg condition for now (but, explicit)
""|install)
main;;
*) # holding space for future options (like uninstall + install?)
failure "install-multi-user: invalid argument";;
esac

View file

@ -26,18 +26,9 @@ fi
# macOS support for 10.12.6 or higher # macOS support for 10.12.6 or higher
if [ "$(uname -s)" = "Darwin" ]; then if [ "$(uname -s)" = "Darwin" ]; then
IFS='.' read macos_major macos_minor macos_patch << EOF IFS='.' read -r macos_major macos_minor macos_patch << EOF
$(sw_vers -productVersion) $(sw_vers -productVersion)
EOF EOF
# TODO: this is a temporary speed-bump to keep people from naively installing Nix
# on macOS Big Sur (11.0+, 10.16+) until nixpkgs updates are ready for them.
# *Ideally* this is gone before next Nix release. If you're intentionally working on
# Nix + Big Sur, just comment out this block and be on your way :)
if [ "$macos_major" -gt 10 ] || { [ "$macos_major" -eq 10 ] && [ "$macos_minor" -gt 15 ]; }; then
echo "$0: nixpkgs isn't quite ready to support macOS $(sw_vers -productVersion) yet"
exit 1
fi
if [ "$macos_major" -lt 10 ] || { [ "$macos_major" -eq 10 ] && [ "$macos_minor" -lt 12 ]; } || { [ "$macos_minor" -eq 12 ] && [ "$macos_patch" -lt 6 ]; }; then if [ "$macos_major" -lt 10 ] || { [ "$macos_major" -eq 10 ] && [ "$macos_minor" -lt 12 ]; } || { [ "$macos_minor" -eq 12 ] && [ "$macos_patch" -lt 6 ]; }; then
# patch may not be present; command substitution for simplicity # patch may not be present; command substitution for simplicity
echo "$0: macOS $(sw_vers -productVersion) is not supported, upgrade to 10.12.6 or higher" echo "$0: macOS $(sw_vers -productVersion) is not supported, upgrade to 10.12.6 or higher"
@ -46,21 +37,40 @@ EOF
fi fi
# Determine if we could use the multi-user installer or not # Determine if we could use the multi-user installer or not
if [ "$(uname -s)" = "Darwin" ]; then if [ "$(uname -s)" = "Linux" ]; then
echo "Note: a multi-user installation is possible. See https://nixos.org/nix/manual/#sect-multi-user-installation" >&2
elif [ "$(uname -s)" = "Linux" ]; then
echo "Note: a multi-user installation is possible. See https://nixos.org/nix/manual/#sect-multi-user-installation" >&2 echo "Note: a multi-user installation is possible. See https://nixos.org/nix/manual/#sect-multi-user-installation" >&2
fi fi
INSTALL_MODE=no-daemon case "$(uname -s)" in
CREATE_DARWIN_VOLUME=0 "Darwin")
INSTALL_MODE=daemon;;
*)
INSTALL_MODE=no-daemon;;
esac
# space-separated string
ACTIONS=
# handle the command line flags # handle the command line flags
while [ $# -gt 0 ]; do while [ $# -gt 0 ]; do
case $1 in case $1 in
--daemon) --daemon)
INSTALL_MODE=daemon;; INSTALL_MODE=daemon
ACTIONS="${ACTIONS}install "
;;
--no-daemon) --no-daemon)
INSTALL_MODE=no-daemon;; if [ "$(uname -s)" = "Darwin" ]; then
printf '\e[1;31mError: --no-daemon installs are no-longer supported on Darwin/macOS!\e[0m\n' >&2
exit 1
fi
INSTALL_MODE=no-daemon
# intentional tail space
ACTIONS="${ACTIONS}install "
;;
# --uninstall)
# # intentional tail space
# ACTIONS="${ACTIONS}uninstall "
# ;;
--no-channel-add) --no-channel-add)
export NIX_INSTALLER_NO_CHANNEL_ADD=1;; export NIX_INSTALLER_NO_CHANNEL_ADD=1;;
--daemon-user-count) --daemon-user-count)
@ -69,13 +79,18 @@ while [ $# -gt 0 ]; do
--no-modify-profile) --no-modify-profile)
NIX_INSTALLER_NO_MODIFY_PROFILE=1;; NIX_INSTALLER_NO_MODIFY_PROFILE=1;;
--darwin-use-unencrypted-nix-store-volume) --darwin-use-unencrypted-nix-store-volume)
CREATE_DARWIN_VOLUME=1;; {
echo "Warning: the flag --darwin-use-unencrypted-nix-store-volume"
echo " is no longer needed and will be removed in the future."
echo ""
} >&2;;
--nix-extra-conf-file) --nix-extra-conf-file)
export NIX_EXTRA_CONF="$(cat $2)" # shellcheck disable=SC2155
export NIX_EXTRA_CONF="$(cat "$2")"
shift;; shift;;
*) *)
( {
echo "Nix Installer [--daemon|--no-daemon] [--daemon-user-count INT] [--no-channel-add] [--no-modify-profile] [--darwin-use-unencrypted-nix-store-volume] [--nix-extra-conf-file FILE]" echo "Nix Installer [--daemon|--no-daemon] [--daemon-user-count INT] [--no-channel-add] [--no-modify-profile] [--nix-extra-conf-file FILE]"
echo "Choose installation method." echo "Choose installation method."
echo "" echo ""
@ -91,55 +106,25 @@ while [ $# -gt 0 ]; do
echo "" echo ""
echo " --no-channel-add: Don't add any channels. nixpkgs-unstable is installed by default." echo " --no-channel-add: Don't add any channels. nixpkgs-unstable is installed by default."
echo "" echo ""
echo " --no-modify-profile: Skip channel installation. When not provided nixpkgs-unstable" echo " --no-modify-profile: Don't modify the user profile to automatically load nix."
echo " is installed by default."
echo "" echo ""
echo " --daemon-user-count: Number of build users to create. Defaults to 32." echo " --daemon-user-count: Number of build users to create. Defaults to 32."
echo "" echo ""
echo " --nix-extra-conf-file: Path to nix.conf to prepend when installing /etc/nix.conf" echo " --nix-extra-conf-file: Path to nix.conf to prepend when installing /etc/nix/nix.conf"
echo "" echo ""
if [ -n "${INVOKED_FROM_INSTALL_IN:-}" ]; then if [ -n "${INVOKED_FROM_INSTALL_IN:-}" ]; then
echo " --tarball-url-prefix URL: Base URL to download the Nix tarball from." echo " --tarball-url-prefix URL: Base URL to download the Nix tarball from."
fi fi
) >&2 } >&2
# darwin and Catalina+
if [ "$(uname -s)" = "Darwin" ] && { [ "$macos_major" -gt 10 ] || { [ "$macos_major" -eq 10 ] && [ "$macos_minor" -gt 14 ]; }; }; then
(
echo " --darwin-use-unencrypted-nix-store-volume: Create an APFS volume for the Nix"
echo " store and mount it at /nix. This is the recommended way to create"
echo " /nix with a read-only / on macOS >=10.15."
echo " See: https://nixos.org/nix/manual/#sect-macos-installation"
echo ""
) >&2
fi
exit;; exit;;
esac esac
shift shift
done done
if [ "$(uname -s)" = "Darwin" ]; then
if [ "$CREATE_DARWIN_VOLUME" = 1 ]; then
printf '\e[1;31mCreating volume and mountpoint /nix.\e[0m\n'
"$self/create-darwin-volume.sh"
fi
writable="$(diskutil info -plist / | xmllint --xpath "name(/plist/dict/key[text()='Writable']/following-sibling::*[1])" -)"
if ! [ -e $dest ] && [ "$writable" = "false" ]; then
(
echo ""
echo "Installing on macOS >=10.15 requires relocating the store to an apfs volume."
echo "Use sh <(curl -L https://nixos.org/nix/install) --darwin-use-unencrypted-nix-store-volume or run the preparation steps manually."
echo "See https://nixos.org/nix/manual/#sect-macos-installation"
echo ""
) >&2
exit 1
fi
fi
if [ "$INSTALL_MODE" = "daemon" ]; then if [ "$INSTALL_MODE" = "daemon" ]; then
printf '\e[1;31mSwitching to the Multi-user Installer\e[0m\n' printf '\e[1;31mSwitching to the Multi-user Installer\e[0m\n'
exec "$self/install-multi-user" exec "$self/install-multi-user" $ACTIONS # let ACTIONS split
exit 0 exit 0
fi fi
@ -194,6 +179,7 @@ if ! "$nix/bin/nix-store" --load-db < "$self/.reginfo"; then
exit 1 exit 1
fi fi
# shellcheck source=./nix-profile.sh.in
. "$nix/etc/profile.d/nix.sh" . "$nix/etc/profile.d/nix.sh"
if ! "$nix/bin/nix-env" -i "$nix"; then if ! "$nix/bin/nix-env" -i "$nix"; then

View file

@ -41,10 +41,8 @@ handle_network_proxy() {
fi fi
} }
poly_validate_assumptions() { poly_cure_artifacts() {
if [ "$(uname -s)" != "Linux" ]; then :
failure "This script is for use with Linux!"
fi
} }
poly_service_installed_check() { poly_service_installed_check() {
@ -72,7 +70,7 @@ poly_service_setup_note() {
EOF EOF
} }
poly_extra_try_me_commands(){ poly_extra_try_me_commands() {
if [ -e /run/systemd/system ]; then if [ -e /run/systemd/system ]; then
: :
else else
@ -81,19 +79,10 @@ poly_extra_try_me_commands(){
EOF EOF
fi fi
} }
poly_extra_setup_instructions(){
if [ -e /run/systemd/system ]; then
:
else
cat <<EOF
Additionally, you may want to add nix-daemon to your init-system.
EOF
fi
}
poly_configure_nix_daemon_service() { poly_configure_nix_daemon_service() {
if [ -e /run/systemd/system ]; then if [ -e /run/systemd/system ]; then
task "Setting up the nix-daemon systemd service"
_sudo "to set up the nix-daemon service" \ _sudo "to set up the nix-daemon service" \
systemctl link "/nix/var/nix/profiles/default$SERVICE_SRC" systemctl link "/nix/var/nix/profiles/default$SERVICE_SRC"
@ -110,6 +99,8 @@ poly_configure_nix_daemon_service() {
_sudo "to start the nix-daemon.service" \ _sudo "to start the nix-daemon.service" \
systemctl restart nix-daemon.service systemctl restart nix-daemon.service
else
reminder "I don't support your init system yet; you may want to add nix-daemon manually."
fi fi
} }
@ -207,3 +198,7 @@ poly_create_build_user() {
--password "!" \ --password "!" \
"$username" "$username"
} }
poly_prepare_to_install() {
:
}

View file

@ -40,21 +40,25 @@ case "$(uname -s).$(uname -m)" in
path=@tarballPath_aarch64-linux@ path=@tarballPath_aarch64-linux@
system=aarch64-linux system=aarch64-linux
;; ;;
Linux.armv6l_linux)
hash=@tarballHash_armv6l-linux@
path=@tarballPath_armv6l-linux@
system=armv6l-linux
;;
Linux.armv7l_linux)
hash=@tarballHash_armv7l-linux@
path=@tarballPath_armv7l-linux@
system=armv7l-linux
;;
Darwin.x86_64) Darwin.x86_64)
hash=@tarballHash_x86_64-darwin@ hash=@tarballHash_x86_64-darwin@
path=@tarballPath_x86_64-darwin@ path=@tarballPath_x86_64-darwin@
system=x86_64-darwin system=x86_64-darwin
;; ;;
Darwin.arm64|Darwin.aarch64) Darwin.arm64|Darwin.aarch64)
# check for Rosetta 2 support hash=@tarballHash_aarch64-darwin@
if ! [ -f /Library/Apple/System/Library/LaunchDaemons/com.apple.oahd.plist ]; then path=@tarballPath_aarch64-darwin@
oops "Rosetta 2 is not installed on this ARM64 macOS machine. Run softwareupdate --install-rosetta then restart installation" system=aarch64-darwin
fi
hash=@binaryTarball_x86_64-darwin@
path=@tarballPath_x86_64-darwin@
# eventually maybe: aarch64-darwin
system=x86_64-darwin
;; ;;
*) oops "sorry, there is no binary distribution of Nix for your platform";; *) oops "sorry, there is no binary distribution of Nix for your platform";;
esac esac
@ -72,14 +76,21 @@ fi
tarball=$tmpDir/nix-@nixVersion@-$system.tar.xz tarball=$tmpDir/nix-@nixVersion@-$system.tar.xz
require_util curl "download the binary tarball"
require_util tar "unpack the binary tarball" require_util tar "unpack the binary tarball"
if [ "$(uname -s)" != "Darwin" ]; then if [ "$(uname -s)" != "Darwin" ]; then
require_util xz "unpack the binary tarball" require_util xz "unpack the binary tarball"
fi fi
if command -v wget > /dev/null 2>&1; then
fetch() { wget "$1" -O "$2"; }
elif command -v curl > /dev/null 2>&1; then
fetch() { curl -L "$1" -o "$2"; }
else
oops "you don't have wget or curl installed, which I need to download the binary tarball"
fi
echo "downloading Nix @nixVersion@ binary tarball for $system from '$url' to '$tmpDir'..." echo "downloading Nix @nixVersion@ binary tarball for $system from '$url' to '$tmpDir'..."
curl -L "$url" -o "$tarball" || oops "failed to download '$url'" fetch "$url" "$tarball" || oops "failed to download '$url'"
if command -v sha256sum > /dev/null 2>&1; then if command -v sha256sum > /dev/null 2>&1; then
hash2="$(sha256sum -b "$tarball" | cut -c1-64)" hash2="$(sha256sum -b "$tarball" | cut -c1-64)"

View file

@ -270,14 +270,23 @@ connected:
{ {
Activity act(*logger, lvlTalkative, actUnknown, fmt("copying dependencies to '%s'", storeUri)); Activity act(*logger, lvlTalkative, actUnknown, fmt("copying dependencies to '%s'", storeUri));
copyPaths(store, ref<Store>(sshStore), store->parseStorePathSet(inputs), NoRepair, NoCheckSigs, substitute); copyPaths(*store, *sshStore, store->parseStorePathSet(inputs), NoRepair, NoCheckSigs, substitute);
} }
uploadLock = -1; uploadLock = -1;
auto drv = store->readDerivation(*drvPath); auto drv = store->readDerivation(*drvPath);
auto outputHashes = staticOutputHashes(*store, drv); auto outputHashes = staticOutputHashes(*store, drv);
drv.inputSrcs = store->parseStorePathSet(inputs);
// Hijack the inputs paths of the derivation to include all the paths
// that come from the `inputDrvs` set.
// We dont do that for the derivations whose `inputDrvs` is empty
// because
// 1. Its not needed
// 2. Changing the `inputSrcs` set changes the associated output ids,
// which break CA derivations
if (!drv.inputDrvs.empty())
drv.inputSrcs = store->parseStorePathSet(inputs);
auto result = sshStore->buildDerivation(*drvPath, drv); auto result = sshStore->buildDerivation(*drvPath, drv);
@ -312,7 +321,7 @@ connected:
if (auto localStore = store.dynamic_pointer_cast<LocalStore>()) if (auto localStore = store.dynamic_pointer_cast<LocalStore>())
for (auto & path : missingPaths) for (auto & path : missingPaths)
localStore->locksHeld.insert(store->printStorePath(path)); /* FIXME: ugly */ localStore->locksHeld.insert(store->printStorePath(path)); /* FIXME: ugly */
copyPaths(ref<Store>(sshStore), store, missingPaths, NoRepair, NoCheckSigs, NoSubstitute); copyPaths(*sshStore, *store, missingPaths, NoRepair, NoCheckSigs, NoSubstitute);
} }
// XXX: Should be done as part of `copyPaths` // XXX: Should be done as part of `copyPaths`
for (auto & realisation : missingRealisations) { for (auto & realisation : missingRealisations) {

View file

@ -54,7 +54,31 @@ void StoreCommand::run()
run(getStore()); run(getStore());
} }
RealisedPathsCommand::RealisedPathsCommand(bool recursive) EvalCommand::EvalCommand()
{
}
EvalCommand::~EvalCommand()
{
if (evalState)
evalState->printStats();
}
ref<Store> EvalCommand::getEvalStore()
{
if (!evalStore)
evalStore = evalStoreUrl ? openStore(*evalStoreUrl) : getStore();
return ref<Store>(evalStore);
}
ref<EvalState> EvalCommand::getEvalState()
{
if (!evalState)
evalState = std::make_shared<EvalState>(searchPath, getEvalStore(), getStore());
return ref<EvalState>(evalState);
}
BuiltPathsCommand::BuiltPathsCommand(bool recursive)
: recursive(recursive) : recursive(recursive)
{ {
if (recursive) if (recursive)
@ -81,44 +105,53 @@ RealisedPathsCommand::RealisedPathsCommand(bool recursive)
}); });
} }
void RealisedPathsCommand::run(ref<Store> store) void BuiltPathsCommand::run(ref<Store> store)
{ {
std::vector<RealisedPath> paths; BuiltPaths paths;
if (all) { if (all) {
if (installables.size()) if (installables.size())
throw UsageError("'--all' does not expect arguments"); throw UsageError("'--all' does not expect arguments");
// XXX: Only uses opaque paths, ignores all the realisations // XXX: Only uses opaque paths, ignores all the realisations
for (auto & p : store->queryAllValidPaths()) for (auto & p : store->queryAllValidPaths())
paths.push_back(p); paths.push_back(BuiltPath::Opaque{p});
} else { } else {
auto pathSet = toRealisedPaths(store, realiseMode, operateOn, installables); paths = toBuiltPaths(getEvalStore(), store, realiseMode, operateOn, installables);
if (recursive) { if (recursive) {
auto roots = std::move(pathSet); // XXX: This only computes the store path closure, ignoring
pathSet = {}; // intermediate realisations
RealisedPath::closure(*store, roots, pathSet); StorePathSet pathsRoots, pathsClosure;
for (auto & root : paths) {
auto rootFromThis = root.outPaths();
pathsRoots.insert(rootFromThis.begin(), rootFromThis.end());
}
store->computeFSClosure(pathsRoots, pathsClosure);
for (auto & path : pathsClosure)
paths.push_back(BuiltPath::Opaque{path});
} }
for (auto & path : pathSet)
paths.push_back(path);
} }
run(store, std::move(paths)); run(store, std::move(paths));
} }
StorePathsCommand::StorePathsCommand(bool recursive) StorePathsCommand::StorePathsCommand(bool recursive)
: RealisedPathsCommand(recursive) : BuiltPathsCommand(recursive)
{ {
} }
void StorePathsCommand::run(ref<Store> store, std::vector<RealisedPath> paths) void StorePathsCommand::run(ref<Store> store, BuiltPaths && paths)
{ {
StorePaths storePaths; StorePathSet storePaths;
for (auto & p : paths) for (auto & builtPath : paths)
storePaths.push_back(p.path()); for (auto & p : builtPath.outPaths())
storePaths.insert(p);
run(store, std::move(storePaths)); auto sorted = store->topoSortPaths(storePaths);
std::reverse(sorted.begin(), sorted.end());
run(store, std::move(sorted));
} }
void StorePathCommand::run(ref<Store> store, std::vector<StorePath> storePaths) void StorePathCommand::run(ref<Store> store, std::vector<StorePath> && storePaths)
{ {
if (storePaths.size() != 1) if (storePaths.size() != 1)
throw UsageError("this command requires exactly one store path"); throw UsageError("this command requires exactly one store path");
@ -162,7 +195,7 @@ void MixProfile::updateProfile(const StorePath & storePath)
profile2, storePath)); profile2, storePath));
} }
void MixProfile::updateProfile(const DerivedPathsWithHints & buildables) void MixProfile::updateProfile(const BuiltPaths & buildables)
{ {
if (!profile) return; if (!profile) return;
@ -170,22 +203,19 @@ void MixProfile::updateProfile(const DerivedPathsWithHints & buildables)
for (auto & buildable : buildables) { for (auto & buildable : buildables) {
std::visit(overloaded { std::visit(overloaded {
[&](DerivedPathWithHints::Opaque bo) { [&](BuiltPath::Opaque bo) {
result.push_back(bo.path); result.push_back(bo.path);
}, },
[&](DerivedPathWithHints::Built bfd) { [&](BuiltPath::Built bfd) {
for (auto & output : bfd.outputs) { for (auto & output : bfd.outputs) {
/* Output path should be known because we just tried to result.push_back(output.second);
build it. */
assert(output.second);
result.push_back(*output.second);
} }
}, },
}, buildable.raw()); }, buildable.raw());
} }
if (result.size() != 1) if (result.size() != 1)
throw Error("'--profile' requires that the arguments produce a single store path, but there are %d", result.size()); throw UsageError("'--profile' requires that the arguments produce a single store path, but there are %d", result.size());
updateProfile(result[0]); updateProfile(result[0]);
} }

View file

@ -45,11 +45,18 @@ private:
struct EvalCommand : virtual StoreCommand, MixEvalArgs struct EvalCommand : virtual StoreCommand, MixEvalArgs
{ {
ref<EvalState> getEvalState(); EvalCommand();
std::shared_ptr<EvalState> evalState;
~EvalCommand(); ~EvalCommand();
ref<Store> getEvalStore();
ref<EvalState> getEvalState();
private:
std::shared_ptr<Store> evalStore;
std::shared_ptr<EvalState> evalState;
}; };
struct MixFlakeOptions : virtual Args, EvalCommand struct MixFlakeOptions : virtual Args, EvalCommand
@ -101,6 +108,8 @@ enum class Realise {
exists. */ exists. */
Derivation, Derivation,
/* Evaluate in dry-run mode. Postcondition: nothing. */ /* Evaluate in dry-run mode. Postcondition: nothing. */
// FIXME: currently unused, but could be revived if we can
// evaluate derivations in-memory.
Nothing Nothing
}; };
@ -143,7 +152,7 @@ private:
}; };
/* A command that operates on zero or more store paths. */ /* A command that operates on zero or more store paths. */
struct RealisedPathsCommand : public InstallablesCommand struct BuiltPathsCommand : public InstallablesCommand
{ {
private: private:
@ -156,26 +165,26 @@ protected:
public: public:
RealisedPathsCommand(bool recursive = false); BuiltPathsCommand(bool recursive = false);
using StoreCommand::run; using StoreCommand::run;
virtual void run(ref<Store> store, std::vector<RealisedPath> paths) = 0; virtual void run(ref<Store> store, BuiltPaths && paths) = 0;
void run(ref<Store> store) override; void run(ref<Store> store) override;
bool useDefaultInstallables() override { return !all; } bool useDefaultInstallables() override { return !all; }
}; };
struct StorePathsCommand : public RealisedPathsCommand struct StorePathsCommand : public BuiltPathsCommand
{ {
StorePathsCommand(bool recursive = false); StorePathsCommand(bool recursive = false);
using RealisedPathsCommand::run; using BuiltPathsCommand::run;
virtual void run(ref<Store> store, std::vector<StorePath> storePaths) = 0; virtual void run(ref<Store> store, std::vector<StorePath> && storePaths) = 0;
void run(ref<Store> store, std::vector<RealisedPath> paths) override; void run(ref<Store> store, BuiltPaths && paths) override;
}; };
/* A command that operates on exactly one store path. */ /* A command that operates on exactly one store path. */
@ -185,7 +194,7 @@ struct StorePathCommand : public StorePathsCommand
virtual void run(ref<Store> store, const StorePath & storePath) = 0; virtual void run(ref<Store> store, const StorePath & storePath) = 0;
void run(ref<Store> store, std::vector<StorePath> storePaths) override; void run(ref<Store> store, std::vector<StorePath> && storePaths) override;
}; };
/* A helper class for registering commands globally. */ /* A helper class for registering commands globally. */
@ -216,26 +225,37 @@ static RegisterCommand registerCommand2(std::vector<std::string> && name)
return RegisterCommand(std::move(name), [](){ return make_ref<T>(); }); return RegisterCommand(std::move(name), [](){ return make_ref<T>(); });
} }
DerivedPathsWithHints build(ref<Store> store, Realise mode, BuiltPaths build(
std::vector<std::shared_ptr<Installable>> installables, BuildMode bMode = bmNormal); ref<Store> evalStore,
ref<Store> store, Realise mode,
const std::vector<std::shared_ptr<Installable>> & installables,
BuildMode bMode = bmNormal);
std::set<StorePath> toStorePaths(ref<Store> store, std::set<StorePath> toStorePaths(
Realise mode, OperateOn operateOn, ref<Store> evalStore,
std::vector<std::shared_ptr<Installable>> installables);
StorePath toStorePath(ref<Store> store,
Realise mode, OperateOn operateOn,
std::shared_ptr<Installable> installable);
std::set<StorePath> toDerivations(ref<Store> store,
std::vector<std::shared_ptr<Installable>> installables,
bool useDeriver = false);
std::set<RealisedPath> toRealisedPaths(
ref<Store> store, ref<Store> store,
Realise mode, Realise mode,
OperateOn operateOn, OperateOn operateOn,
std::vector<std::shared_ptr<Installable>> installables); const std::vector<std::shared_ptr<Installable>> & installables);
StorePath toStorePath(
ref<Store> evalStore,
ref<Store> store,
Realise mode,
OperateOn operateOn,
std::shared_ptr<Installable> installable);
std::set<StorePath> toDerivations(
ref<Store> store,
const std::vector<std::shared_ptr<Installable>> & installables,
bool useDeriver = false);
BuiltPaths toBuiltPaths(
ref<Store> evalStore,
ref<Store> store,
Realise mode,
OperateOn operateOn,
const std::vector<std::shared_ptr<Installable>> & installables);
/* Helper function to generate args that invoke $EDITOR on /* Helper function to generate args that invoke $EDITOR on
filename:lineno. */ filename:lineno. */
@ -252,7 +272,7 @@ struct MixProfile : virtual StoreCommand
/* If 'profile' is set, make it point at the store path produced /* If 'profile' is set, make it point at the store path produced
by 'buildables'. */ by 'buildables'. */
void updateProfile(const DerivedPathsWithHints & buildables); void updateProfile(const BuiltPaths & buildables);
}; };
struct MixDefaultProfile : MixProfile struct MixDefaultProfile : MixProfile

View file

@ -58,9 +58,13 @@ MixFlakeOptions::MixFlakeOptions()
addFlag({ addFlag({
.longName = "no-registries", .longName = "no-registries",
.description = "Don't allow lookups in the flake registries.", .description =
"Don't allow lookups in the flake registries. This option is deprecated; use `--no-use-registries`.",
.category = category, .category = category,
.handler = {&lockFlags.useRegistries, false} .handler = {[&]() {
lockFlags.useRegistries = false;
warn("'--no-registries' is deprecated; use '--no-use-registries'");
}}
}); });
addFlag({ addFlag({
@ -171,14 +175,50 @@ Strings SourceExprCommand::getDefaultFlakeAttrPathPrefixes()
void SourceExprCommand::completeInstallable(std::string_view prefix) void SourceExprCommand::completeInstallable(std::string_view prefix)
{ {
if (file) return; // FIXME if (file) {
evalSettings.pureEval = false;
auto state = getEvalState();
Expr *e = state->parseExprFromFile(
resolveExprPath(state->checkSourcePath(lookupFileArg(*state, *file)))
);
completeFlakeRefWithFragment( Value root;
getEvalState(), state->eval(e, root);
lockFlags,
getDefaultFlakeAttrPathPrefixes(), auto autoArgs = getAutoArgs(*state);
getDefaultFlakeAttrPaths(),
prefix); std::string prefix_ = std::string(prefix);
auto sep = prefix_.rfind('.');
std::string searchWord;
if (sep != std::string::npos) {
searchWord = prefix_.substr(sep, std::string::npos);
prefix_ = prefix_.substr(0, sep);
} else {
searchWord = prefix_;
prefix_ = "";
}
Value &v1(*findAlongAttrPath(*state, prefix_, *autoArgs, root).first);
state->forceValue(v1);
Value v2;
state->autoCallFunction(*autoArgs, v1, v2);
if (v2.type() == nAttrs) {
for (auto & i : *v2.attrs) {
std::string name = i.name;
if (name.find(searchWord) == 0) {
completions->add(i.name);
}
}
}
} else {
completeFlakeRefWithFragment(
getEvalState(),
lockFlags,
getDefaultFlakeAttrPathPrefixes(),
getDefaultFlakeAttrPaths(),
prefix);
}
} }
void completeFlakeRefWithFragment( void completeFlakeRefWithFragment(
@ -249,19 +289,6 @@ void completeFlakeRefWithFragment(
completeFlakeRef(evalState->store, prefix); completeFlakeRef(evalState->store, prefix);
} }
ref<EvalState> EvalCommand::getEvalState()
{
if (!evalState)
evalState = std::make_shared<EvalState>(searchPath, getStore());
return ref<EvalState>(evalState);
}
EvalCommand::~EvalCommand()
{
if (evalState)
evalState->printStats();
}
void completeFlakeRef(ref<Store> store, std::string_view prefix) void completeFlakeRef(ref<Store> store, std::string_view prefix)
{ {
if (prefix == "") if (prefix == "")
@ -285,9 +312,9 @@ void completeFlakeRef(ref<Store> store, std::string_view prefix)
} }
} }
DerivedPathWithHints Installable::toDerivedPathWithHints() DerivedPath Installable::toDerivedPath()
{ {
auto buildables = toDerivedPathsWithHints(); auto buildables = toDerivedPaths();
if (buildables.size() != 1) if (buildables.size() != 1)
throw Error("installable '%s' evaluates to %d derivations, where only one is expected", what(), buildables.size()); throw Error("installable '%s' evaluates to %d derivations, where only one is expected", what(), buildables.size());
return std::move(buildables[0]); return std::move(buildables[0]);
@ -321,22 +348,19 @@ struct InstallableStorePath : Installable
std::string what() override { return store->printStorePath(storePath); } std::string what() override { return store->printStorePath(storePath); }
DerivedPathsWithHints toDerivedPathsWithHints() override DerivedPaths toDerivedPaths() override
{ {
if (storePath.isDerivation()) { if (storePath.isDerivation()) {
std::map<std::string, std::optional<StorePath>> outputs;
auto drv = store->readDerivation(storePath); auto drv = store->readDerivation(storePath);
for (auto & [name, output] : drv.outputsAndOptPaths(*store))
outputs.emplace(name, output.second);
return { return {
DerivedPathWithHints::Built { DerivedPath::Built {
.drvPath = storePath, .drvPath = storePath,
.outputs = std::move(outputs) .outputs = drv.outputNames(),
} }
}; };
} else { } else {
return { return {
DerivedPathWithHints::Opaque { DerivedPath::Opaque {
.path = storePath, .path = storePath,
} }
}; };
@ -349,22 +373,24 @@ struct InstallableStorePath : Installable
} }
}; };
DerivedPathsWithHints InstallableValue::toDerivedPathsWithHints() DerivedPaths InstallableValue::toDerivedPaths()
{ {
DerivedPathsWithHints res; DerivedPaths res;
std::map<StorePath, std::map<std::string, std::optional<StorePath>>> drvsToOutputs; std::map<StorePath, std::set<std::string>> drvsToOutputs;
RealisedPath::Set drvsToCopy;
// Group by derivation, helps with .all in particular // Group by derivation, helps with .all in particular
for (auto & drv : toDerivations()) { for (auto & drv : toDerivations()) {
auto outputName = drv.outputName; auto outputName = drv.outputName;
if (outputName == "") if (outputName == "")
throw Error("derivation '%s' lacks an 'outputName' attribute", state->store->printStorePath(drv.drvPath)); throw Error("derivation '%s' lacks an 'outputName' attribute", state->store->printStorePath(drv.drvPath));
drvsToOutputs[drv.drvPath].insert_or_assign(outputName, drv.outPath); drvsToOutputs[drv.drvPath].insert(outputName);
drvsToCopy.insert(drv.drvPath);
} }
for (auto & i : drvsToOutputs) for (auto & i : drvsToOutputs)
res.push_back(DerivedPathWithHints::Built { i.first, i.second }); res.push_back(DerivedPath::Built { i.first, i.second });
return res; return res;
} }
@ -503,7 +529,11 @@ std::tuple<std::string, FlakeRef, InstallableValue::DerivationInfo> InstallableF
auto root = cache->getRoot(); auto root = cache->getRoot();
for (auto & attrPath : getActualAttrPaths()) { for (auto & attrPath : getActualAttrPaths()) {
auto attr = root->findAlongAttrPath(parseAttrPath(*state, attrPath)); auto attr = root->findAlongAttrPath(
parseAttrPath(*state, attrPath),
true
);
if (!attr) continue; if (!attr) continue;
if (!attr->isDerivation()) if (!attr->isDerivation())
@ -572,10 +602,10 @@ InstallableFlake::getCursors(EvalState & state)
std::shared_ptr<flake::LockedFlake> InstallableFlake::getLockedFlake() const std::shared_ptr<flake::LockedFlake> InstallableFlake::getLockedFlake() const
{ {
flake::LockFlags lockFlagsApplyConfig = lockFlags;
lockFlagsApplyConfig.applyNixConfig = true;
if (!_lockedFlake) { if (!_lockedFlake) {
_lockedFlake = std::make_shared<flake::LockedFlake>(lockFlake(*state, flakeRef, lockFlags)); _lockedFlake = std::make_shared<flake::LockedFlake>(lockFlake(*state, flakeRef, lockFlagsApplyConfig));
_lockedFlake->flake.config.apply();
// FIXME: send new config to the daemon.
} }
return _lockedFlake; return _lockedFlake;
} }
@ -624,6 +654,17 @@ std::vector<std::shared_ptr<Installable>> SourceExprCommand::parseInstallables(
for (auto & s : ss) { for (auto & s : ss) {
std::exception_ptr ex; std::exception_ptr ex;
if (s.find('/') != std::string::npos) {
try {
result.push_back(std::make_shared<InstallableStorePath>(store, store->followLinksToStorePath(s)));
continue;
} catch (BadStorePath &) {
} catch (...) {
if (!ex)
ex = std::current_exception();
}
}
try { try {
auto [flakeRef, fragment] = parseFlakeRefWithFragment(s, absPath(".")); auto [flakeRef, fragment] = parseFlakeRefWithFragment(s, absPath("."));
result.push_back(std::make_shared<InstallableFlake>( result.push_back(std::make_shared<InstallableFlake>(
@ -638,25 +679,7 @@ std::vector<std::shared_ptr<Installable>> SourceExprCommand::parseInstallables(
ex = std::current_exception(); ex = std::current_exception();
} }
if (s.find('/') != std::string::npos) {
try {
result.push_back(std::make_shared<InstallableStorePath>(store, store->followLinksToStorePath(s)));
continue;
} catch (BadStorePath &) {
} catch (...) {
if (!ex)
ex = std::current_exception();
}
}
std::rethrow_exception(ex); std::rethrow_exception(ex);
/*
throw Error(
pathExists(s)
? "path '%s' is not a flake or a store path"
: "don't know how to handle argument '%s'", s);
*/
} }
} }
@ -671,109 +694,121 @@ std::shared_ptr<Installable> SourceExprCommand::parseInstallable(
return installables.front(); return installables.front();
} }
DerivedPathsWithHints build(ref<Store> store, Realise mode, BuiltPaths getBuiltPaths(ref<Store> evalStore, ref<Store> store, const DerivedPaths & hopefullyBuiltPaths)
std::vector<std::shared_ptr<Installable>> installables, BuildMode bMode)
{ {
if (mode == Realise::Nothing) BuiltPaths res;
settings.readOnlyMode = true; for (auto & b : hopefullyBuiltPaths)
std::visit(
DerivedPathsWithHints buildables; overloaded{
[&](DerivedPath::Opaque bo) {
std::vector<DerivedPath> pathsToBuild; res.push_back(BuiltPath::Opaque{bo.path});
for (auto & i : installables) {
for (auto & b : i->toDerivedPathsWithHints()) {
std::visit(overloaded {
[&](DerivedPathWithHints::Opaque bo) {
pathsToBuild.push_back(bo);
}, },
[&](DerivedPathWithHints::Built bfd) { [&](DerivedPath::Built bfd) {
StringSet outputNames; OutputPathMap outputs;
for (auto & output : bfd.outputs) auto drv = evalStore->readDerivation(bfd.drvPath);
outputNames.insert(output.first); auto outputHashes = staticOutputHashes(*evalStore, drv); // FIXME: expensive
pathsToBuild.push_back( auto drvOutputs = drv.outputsAndOptPaths(*store);
DerivedPath::Built{bfd.drvPath, outputNames});
},
}, b.raw());
buildables.push_back(std::move(b));
}
}
if (mode == Realise::Nothing)
printMissing(store, pathsToBuild, lvlError);
else if (mode == Realise::Outputs)
store->buildPaths(pathsToBuild, bMode);
return buildables;
}
std::set<RealisedPath> toRealisedPaths(
ref<Store> store,
Realise mode,
OperateOn operateOn,
std::vector<std::shared_ptr<Installable>> installables)
{
std::set<RealisedPath> res;
if (operateOn == OperateOn::Output) {
for (auto & b : build(store, mode, installables))
std::visit(overloaded {
[&](DerivedPathWithHints::Opaque bo) {
res.insert(bo.path);
},
[&](DerivedPathWithHints::Built bfd) {
auto drv = store->readDerivation(bfd.drvPath);
auto outputHashes = staticOutputHashes(*store, drv);
for (auto & output : bfd.outputs) { for (auto & output : bfd.outputs) {
if (settings.isExperimentalFeatureEnabled("ca-derivations")) { if (!outputHashes.count(output))
if (!outputHashes.count(output.first)) throw Error(
throw Error( "the derivation '%s' doesn't have an output named '%s'",
"the derivation '%s' doesn't have an output named '%s'", store->printStorePath(bfd.drvPath), output);
store->printStorePath(bfd.drvPath), if (settings.isExperimentalFeatureEnabled(
output.first); "ca-derivations")) {
auto outputId = DrvOutput{outputHashes.at(output.first), output.first}; auto outputId =
auto realisation = store->queryRealisation(outputId); DrvOutput{outputHashes.at(output), output};
auto realisation =
store->queryRealisation(outputId);
if (!realisation) if (!realisation)
throw Error("cannot operate on an output of unbuilt content-addresed derivation '%s'", outputId.to_string()); throw Error(
res.insert(RealisedPath{*realisation}); "cannot operate on an output of unbuilt "
} "content-addressed derivation '%s'",
else { outputId.to_string());
// If ca-derivations isn't enabled, behave as if outputs.insert_or_assign(
// all the paths are opaque to keep the default output, realisation->outPath);
// behavior } else {
assert(output.second); // If ca-derivations isn't enabled, assume that
res.insert(*output.second); // the output path is statically known.
assert(drvOutputs.count(output));
assert(drvOutputs.at(output).second);
outputs.insert_or_assign(
output, *drvOutputs.at(output).second);
} }
} }
res.push_back(BuiltPath::Built{bfd.drvPath, outputs});
}, },
}, b.raw()); },
} else { b.raw());
if (mode == Realise::Nothing)
settings.readOnlyMode = true;
for (auto & i : installables)
for (auto & b : i->toDerivedPathsWithHints())
if (auto bfd = std::get_if<DerivedPathWithHints::Built>(&b))
res.insert(bfd->drvPath);
}
return res; return res;
} }
StorePathSet toStorePaths(ref<Store> store, BuiltPaths build(
ref<Store> evalStore,
ref<Store> store,
Realise mode,
const std::vector<std::shared_ptr<Installable>> & installables,
BuildMode bMode)
{
if (mode == Realise::Nothing)
settings.readOnlyMode = true;
std::vector<DerivedPath> pathsToBuild;
for (auto & i : installables) {
auto b = i->toDerivedPaths();
pathsToBuild.insert(pathsToBuild.end(), b.begin(), b.end());
}
if (mode == Realise::Nothing || mode == Realise::Derivation)
printMissing(store, pathsToBuild, lvlError);
else if (mode == Realise::Outputs)
store->buildPaths(pathsToBuild, bMode, evalStore);
return getBuiltPaths(evalStore, store, pathsToBuild);
}
BuiltPaths toBuiltPaths(
ref<Store> evalStore,
ref<Store> store,
Realise mode,
OperateOn operateOn,
const std::vector<std::shared_ptr<Installable>> & installables)
{
if (operateOn == OperateOn::Output)
return build(evalStore, store, mode, installables);
else {
if (mode == Realise::Nothing)
settings.readOnlyMode = true;
BuiltPaths res;
for (auto & drvPath : toDerivations(store, installables, true))
res.push_back(BuiltPath::Opaque{drvPath});
return res;
}
}
StorePathSet toStorePaths(
ref<Store> evalStore,
ref<Store> store,
Realise mode, OperateOn operateOn, Realise mode, OperateOn operateOn,
std::vector<std::shared_ptr<Installable>> installables) const std::vector<std::shared_ptr<Installable>> & installables)
{ {
StorePathSet outPaths; StorePathSet outPaths;
for (auto & path : toRealisedPaths(store, mode, operateOn, installables)) for (auto & path : toBuiltPaths(evalStore, store, mode, operateOn, installables)) {
outPaths.insert(path.path()); auto thisOutPaths = path.outPaths();
outPaths.insert(thisOutPaths.begin(), thisOutPaths.end());
}
return outPaths; return outPaths;
} }
StorePath toStorePath(ref<Store> store, StorePath toStorePath(
ref<Store> evalStore,
ref<Store> store,
Realise mode, OperateOn operateOn, Realise mode, OperateOn operateOn,
std::shared_ptr<Installable> installable) std::shared_ptr<Installable> installable)
{ {
auto paths = toStorePaths(store, mode, operateOn, {installable}); auto paths = toStorePaths(evalStore, store, mode, operateOn, {installable});
if (paths.size() != 1) if (paths.size() != 1)
throw Error("argument '%s' should evaluate to one store path", installable->what()); throw Error("argument '%s' should evaluate to one store path", installable->what());
@ -781,15 +816,17 @@ StorePath toStorePath(ref<Store> store,
return *paths.begin(); return *paths.begin();
} }
StorePathSet toDerivations(ref<Store> store, StorePathSet toDerivations(
std::vector<std::shared_ptr<Installable>> installables, bool useDeriver) ref<Store> store,
const std::vector<std::shared_ptr<Installable>> & installables,
bool useDeriver)
{ {
StorePathSet drvPaths; StorePathSet drvPaths;
for (auto & i : installables) for (auto & i : installables)
for (auto & b : i->toDerivedPathsWithHints()) for (auto & b : i->toDerivedPaths())
std::visit(overloaded { std::visit(overloaded {
[&](DerivedPathWithHints::Opaque bo) { [&](DerivedPath::Opaque bo) {
if (!useDeriver) if (!useDeriver)
throw Error("argument '%s' did not evaluate to a derivation", i->what()); throw Error("argument '%s' did not evaluate to a derivation", i->what());
auto derivers = store->queryValidDerivers(bo.path); auto derivers = store->queryValidDerivers(bo.path);
@ -798,7 +835,7 @@ StorePathSet toDerivations(ref<Store> store,
// FIXME: use all derivers? // FIXME: use all derivers?
drvPaths.insert(*derivers.begin()); drvPaths.insert(*derivers.begin());
}, },
[&](DerivedPathWithHints::Built bfd) { [&](DerivedPath::Built bfd) {
drvPaths.insert(bfd.drvPath); drvPaths.insert(bfd.drvPath);
}, },
}, b.raw()); }, b.raw());

View file

@ -23,17 +23,23 @@ struct App
// FIXME: add args, sandbox settings, metadata, ... // FIXME: add args, sandbox settings, metadata, ...
}; };
struct UnresolvedApp
{
App unresolved;
App resolve(ref<Store> evalStore, ref<Store> store);
};
struct Installable struct Installable
{ {
virtual ~Installable() { } virtual ~Installable() { }
virtual std::string what() = 0; virtual std::string what() = 0;
virtual DerivedPathsWithHints toDerivedPathsWithHints() = 0; virtual DerivedPaths toDerivedPaths() = 0;
DerivedPathWithHints toDerivedPathWithHints(); DerivedPath toDerivedPath();
App toApp(EvalState & state); UnresolvedApp toApp(EvalState & state);
virtual std::pair<Value *, Pos> toValue(EvalState & state) virtual std::pair<Value *, Pos> toValue(EvalState & state)
{ {
@ -74,7 +80,7 @@ struct InstallableValue : Installable
virtual std::vector<DerivationInfo> toDerivations() = 0; virtual std::vector<DerivationInfo> toDerivations() = 0;
DerivedPathsWithHints toDerivedPathsWithHints() override; DerivedPaths toDerivedPaths() override;
}; };
struct InstallableFlake : InstallableValue struct InstallableFlake : InstallableValue

View file

@ -8,8 +8,8 @@ libcmd_SOURCES := $(wildcard $(d)/*.cc)
libcmd_CXXFLAGS += -I src/libutil -I src/libstore -I src/libexpr -I src/libmain -I src/libfetchers libcmd_CXXFLAGS += -I src/libutil -I src/libstore -I src/libexpr -I src/libmain -I src/libfetchers
libcmd_LDFLAGS = -llowdown libcmd_LDFLAGS += -llowdown -pthread
libcmd_LIBS = libstore libutil libexpr libmain libfetchers libcmd_LIBS = libstore libutil libexpr libmain libfetchers
$(eval $(call install-file-in, $(d)/nix-cmd.pc, $(prefix)/lib/pkgconfig, 0644)) $(eval $(call install-file-in, $(d)/nix-cmd.pc, $(libdir)/pkgconfig, 0644))

View file

@ -12,7 +12,7 @@ std::string renderMarkdownToTerminal(std::string_view markdown)
struct lowdown_opts opts { struct lowdown_opts opts {
.type = LOWDOWN_TERM, .type = LOWDOWN_TERM,
.maxdepth = 20, .maxdepth = 20,
.cols = std::min(getWindowSize().second, (unsigned short) 80), .cols = std::max(getWindowSize().second, (unsigned short) 80),
.hmargin = 0, .hmargin = 0,
.vmargin = 0, .vmargin = 0,
.feat = LOWDOWN_COMMONMARK | LOWDOWN_FENCED | LOWDOWN_DEFLIST | LOWDOWN_TABLES, .feat = LOWDOWN_COMMONMARK | LOWDOWN_FENCED | LOWDOWN_DEFLIST | LOWDOWN_TABLES,
@ -25,7 +25,7 @@ std::string renderMarkdownToTerminal(std::string_view markdown)
Finally freeDoc([&]() { lowdown_doc_free(doc); }); Finally freeDoc([&]() { lowdown_doc_free(doc); });
size_t maxn = 0; size_t maxn = 0;
auto node = lowdown_doc_parse(doc, &maxn, markdown.data(), markdown.size()); auto node = lowdown_doc_parse(doc, &maxn, markdown.data(), markdown.size(), nullptr);
if (!node) if (!node)
throw Error("cannot parse Markdown document"); throw Error("cannot parse Markdown document");
Finally freeNode([&]() { lowdown_node_free(node); }); Finally freeNode([&]() { lowdown_node_free(node); });
@ -40,11 +40,11 @@ std::string renderMarkdownToTerminal(std::string_view markdown)
throw Error("cannot allocate Markdown output buffer"); throw Error("cannot allocate Markdown output buffer");
Finally freeBuffer([&]() { lowdown_buf_free(buf); }); Finally freeBuffer([&]() { lowdown_buf_free(buf); });
int rndr_res = lowdown_term_rndr(buf, nullptr, renderer, node); int rndr_res = lowdown_term_rndr(buf, renderer, node);
if (!rndr_res) if (!rndr_res)
throw Error("allocation error while rendering Markdown"); throw Error("allocation error while rendering Markdown");
return std::string(buf->data, buf->size); return filterANSIEscapes(std::string(buf->data, buf->size), !shouldANSI());
} }
} }

View file

@ -19,7 +19,7 @@ static Strings parseAttrPath(std::string_view s)
++i; ++i;
while (1) { while (1) {
if (i == s.end()) if (i == s.end())
throw Error("missing closing quote in selection path '%1%'", s); throw ParseError("missing closing quote in selection path '%1%'", s);
if (*i == '"') break; if (*i == '"') break;
cur.push_back(*i++); cur.push_back(*i++);
} }
@ -100,7 +100,7 @@ std::pair<Value *, Pos> findAlongAttrPath(EvalState & state, const string & attr
} }
Pos findDerivationFilename(EvalState & state, Value & v, std::string what) Pos findPackageFilename(EvalState & state, Value & v, std::string what)
{ {
Value * v2; Value * v2;
try { try {
@ -116,14 +116,14 @@ Pos findDerivationFilename(EvalState & state, Value & v, std::string what)
auto colon = pos.rfind(':'); auto colon = pos.rfind(':');
if (colon == std::string::npos) if (colon == std::string::npos)
throw Error("cannot parse meta.position attribute '%s'", pos); throw ParseError("cannot parse meta.position attribute '%s'", pos);
std::string filename(pos, 0, colon); std::string filename(pos, 0, colon);
unsigned int lineno; unsigned int lineno;
try { try {
lineno = std::stoi(std::string(pos, colon + 1)); lineno = std::stoi(std::string(pos, colon + 1));
} catch (std::invalid_argument & e) { } catch (std::invalid_argument & e) {
throw Error("cannot parse line number '%s'", pos); throw ParseError("cannot parse line number '%s'", pos);
} }
Symbol file = state.symbols.create(filename); Symbol file = state.symbols.create(filename);

View file

@ -14,7 +14,7 @@ std::pair<Value *, Pos> findAlongAttrPath(EvalState & state, const string & attr
Bindings & autoArgs, Value & vIn); Bindings & autoArgs, Value & vIn);
/* Heuristic to find the filename and lineno or a nix value. */ /* Heuristic to find the filename and lineno or a nix value. */
Pos findDerivationFilename(EvalState & state, Value & v, std::string what); Pos findPackageFilename(EvalState & state, Value & v, std::string what);
std::vector<Symbol> parseAttrPath(EvalState & state, std::string_view s); std::vector<Symbol> parseAttrPath(EvalState & state, std::string_view s);

View file

@ -17,8 +17,8 @@ struct Attr
{ {
Symbol name; Symbol name;
Value * value; Value * value;
Pos * pos; ptr<Pos> pos;
Attr(Symbol name, Value * value, Pos * pos = &noPos) Attr(Symbol name, Value * value, ptr<Pos> pos = ptr(&noPos))
: name(name), value(value), pos(pos) { }; : name(name), value(value), pos(pos) { };
Attr() : pos(&noPos) { }; Attr() : pos(&noPos) { };
bool operator < (const Attr & a) const bool operator < (const Attr & a) const
@ -35,12 +35,13 @@ class Bindings
{ {
public: public:
typedef uint32_t size_t; typedef uint32_t size_t;
ptr<Pos> pos;
private: private:
size_t size_, capacity_; size_t size_, capacity_;
Attr attrs[0]; Attr attrs[0];
Bindings(size_t capacity) : size_(0), capacity_(capacity) { } Bindings(size_t capacity) : pos(&noPos), size_(0), capacity_(capacity) { }
Bindings(const Bindings & bindings) = delete; Bindings(const Bindings & bindings) = delete;
public: public:

View file

@ -61,6 +61,14 @@ MixEvalArgs::MixEvalArgs()
fetchers::overrideRegistry(from.input, to.input, extraAttrs); fetchers::overrideRegistry(from.input, to.input, extraAttrs);
}} }}
}); });
addFlag({
.longName = "eval-store",
.description = "The Nix store to use for evaluations.",
.category = category,
.labels = {"store-url"},
.handler = {&evalStoreUrl},
});
} }
Bindings * MixEvalArgs::getAutoArgs(EvalState & state) Bindings * MixEvalArgs::getAutoArgs(EvalState & state)

View file

@ -16,8 +16,9 @@ struct MixEvalArgs : virtual Args
Strings searchPath; Strings searchPath;
private: std::optional<std::string> evalStoreUrl;
private:
std::map<std::string, std::string> autoArgs; std::map<std::string, std::string> autoArgs;
}; };

View file

@ -486,11 +486,11 @@ std::shared_ptr<AttrCursor> AttrCursor::getAttr(std::string_view name)
return getAttr(root->state.symbols.create(name)); return getAttr(root->state.symbols.create(name));
} }
std::shared_ptr<AttrCursor> AttrCursor::findAlongAttrPath(const std::vector<Symbol> & attrPath) std::shared_ptr<AttrCursor> AttrCursor::findAlongAttrPath(const std::vector<Symbol> & attrPath, bool force)
{ {
auto res = shared_from_this(); auto res = shared_from_this();
for (auto & attr : attrPath) { for (auto & attr : attrPath) {
res = res->maybeGetAttr(attr); res = res->maybeGetAttr(attr, force);
if (!res) return {}; if (!res) return {};
} }
return res; return res;

View file

@ -102,7 +102,7 @@ public:
std::shared_ptr<AttrCursor> getAttr(std::string_view name); std::shared_ptr<AttrCursor> getAttr(std::string_view name);
std::shared_ptr<AttrCursor> findAlongAttrPath(const std::vector<Symbol> & attrPath); std::shared_ptr<AttrCursor> findAlongAttrPath(const std::vector<Symbol> & attrPath, bool force = false);
std::string getString(); std::string getString();

View file

@ -64,7 +64,11 @@ static char * dupStringWithLen(const char * s, size_t size)
RootValue allocRootValue(Value * v) RootValue allocRootValue(Value * v)
{ {
#if HAVE_BOEHMGC
return std::allocate_shared<Value *>(traceable_allocator<Value *>(), v); return std::allocate_shared<Value *>(traceable_allocator<Value *>(), v);
#else
return std::make_shared<Value *>(v);
#endif
} }
@ -201,6 +205,15 @@ string showType(const Value & v)
} }
} }
Pos Value::determinePos(const Pos &pos) const
{
switch (internalType) {
case tAttrs: return *attrs->pos;
case tLambda: return lambda.fun->pos;
case tApp: return app.left->determinePos(pos);
default: return pos;
}
}
bool Value::isTrivial() const bool Value::isTrivial() const
{ {
@ -224,22 +237,34 @@ static void * oomHandler(size_t requested)
} }
class BoehmGCStackAllocator : public StackAllocator { class BoehmGCStackAllocator : public StackAllocator {
boost::coroutines2::protected_fixedsize_stack stack { boost::coroutines2::protected_fixedsize_stack stack {
// We allocate 8 MB, the default max stack size on NixOS. // We allocate 8 MB, the default max stack size on NixOS.
// A smaller stack might be quicker to allocate but reduces the stack // A smaller stack might be quicker to allocate but reduces the stack
// depth available for source filter expressions etc. // depth available for source filter expressions etc.
std::max(boost::context::stack_traits::default_size(), static_cast<std::size_t>(8 * 1024 * 1024)) std::max(boost::context::stack_traits::default_size(), static_cast<std::size_t>(8 * 1024 * 1024))
}; };
// This is specific to boost::coroutines2::protected_fixedsize_stack.
// The stack protection page is included in sctx.size, so we have to
// subtract one page size from the stack size.
std::size_t pfss_usable_stack_size(boost::context::stack_context &sctx) {
return sctx.size - boost::context::stack_traits::page_size();
}
public: public:
boost::context::stack_context allocate() override { boost::context::stack_context allocate() override {
auto sctx = stack.allocate(); auto sctx = stack.allocate();
GC_add_roots(static_cast<char *>(sctx.sp) - sctx.size, sctx.sp);
// Stacks generally start at a high address and grow to lower addresses.
// Architectures that do the opposite are rare; in fact so rare that
// boost_routine does not implement it.
// So we subtract the stack size.
GC_add_roots(static_cast<char *>(sctx.sp) - pfss_usable_stack_size(sctx), sctx.sp);
return sctx; return sctx;
} }
void deallocate(boost::context::stack_context sctx) override { void deallocate(boost::context::stack_context sctx) override {
GC_remove_roots(static_cast<char *>(sctx.sp) - sctx.size, sctx.sp); GC_remove_roots(static_cast<char *>(sctx.sp) - pfss_usable_stack_size(sctx), sctx.sp);
stack.deallocate(sctx); stack.deallocate(sctx);
} }
@ -353,7 +378,10 @@ static Strings parseNixPath(const string & s)
} }
EvalState::EvalState(const Strings & _searchPath, ref<Store> store) EvalState::EvalState(
const Strings & _searchPath,
ref<Store> store,
std::shared_ptr<Store> buildStore)
: sWith(symbols.create("<with>")) : sWith(symbols.create("<with>"))
, sOutPath(symbols.create("outPath")) , sOutPath(symbols.create("outPath"))
, sDrvPath(symbols.create("drvPath")) , sDrvPath(symbols.create("drvPath"))
@ -386,6 +414,7 @@ EvalState::EvalState(const Strings & _searchPath, ref<Store> store)
, sEpsilon(symbols.create("")) , sEpsilon(symbols.create(""))
, repair(NoRepair) , repair(NoRepair)
, store(store) , store(store)
, buildStore(buildStore ? buildStore : store)
, regexCache(makeRegexCache()) , regexCache(makeRegexCache())
, baseEnv(allocEnv(128)) , baseEnv(allocEnv(128))
, staticBaseEnv(false, 0) , staticBaseEnv(false, 0)
@ -436,6 +465,23 @@ EvalState::~EvalState()
} }
void EvalState::requireExperimentalFeatureOnEvaluation(
const std::string & feature,
const std::string_view fName,
const Pos & pos)
{
if (!settings.isExperimentalFeatureEnabled(feature)) {
throw EvalError({
.msg = hintfmt(
"Cannot call '%2%' because experimental Nix feature '%1%' is disabled. You can enable it via '--extra-experimental-features %1%'.",
feature,
fName
),
.errPos = pos
});
}
}
Path EvalState::checkSourcePath(const Path & path_) Path EvalState::checkSourcePath(const Path & path_)
{ {
if (!allowedPaths) return path_; if (!allowedPaths) return path_;
@ -741,7 +787,7 @@ inline Value * EvalState::lookupVar(Env * env, const ExprVar & var, bool noEval)
} }
Bindings::iterator j = env->values[0]->attrs->find(var.name); Bindings::iterator j = env->values[0]->attrs->find(var.name);
if (j != env->values[0]->attrs->end()) { if (j != env->values[0]->attrs->end()) {
if (countCalls && j->pos) attrSelects[*j->pos]++; if (countCalls) attrSelects[*j->pos]++;
return j->value; return j->value;
} }
if (!env->prevWith) if (!env->prevWith)
@ -751,18 +797,10 @@ inline Value * EvalState::lookupVar(Env * env, const ExprVar & var, bool noEval)
} }
std::atomic<uint64_t> nrValuesFreed{0};
void finalizeValue(void * obj, void * data)
{
nrValuesFreed++;
}
Value * EvalState::allocValue() Value * EvalState::allocValue()
{ {
nrValues++; nrValues++;
auto v = (Value *) allocBytes(sizeof(Value)); auto v = (Value *) allocBytes(sizeof(Value));
//GC_register_finalizer_no_order(v, finalizeValue, nullptr, nullptr, nullptr);
return v; return v;
} }
@ -804,9 +842,9 @@ void EvalState::mkThunk_(Value & v, Expr * expr)
} }
void EvalState::mkPos(Value & v, Pos * pos) void EvalState::mkPos(Value & v, ptr<Pos> pos)
{ {
if (pos && pos->file.set()) { if (pos->file.set()) {
mkAttrs(v, 3); mkAttrs(v, 3);
mkString(*allocAttr(v, sFile), pos->file); mkString(*allocAttr(v, sFile), pos->file);
mkInt(*allocAttr(v, sLine), pos->line); mkInt(*allocAttr(v, sLine), pos->line);
@ -829,39 +867,37 @@ Value * Expr::maybeThunk(EvalState & state, Env & env)
} }
unsigned long nrAvoided = 0;
Value * ExprVar::maybeThunk(EvalState & state, Env & env) Value * ExprVar::maybeThunk(EvalState & state, Env & env)
{ {
Value * v = state.lookupVar(&env, *this, true); Value * v = state.lookupVar(&env, *this, true);
/* The value might not be initialised in the environment yet. /* The value might not be initialised in the environment yet.
In that case, ignore it. */ In that case, ignore it. */
if (v) { nrAvoided++; return v; } if (v) { state.nrAvoided++; return v; }
return Expr::maybeThunk(state, env); return Expr::maybeThunk(state, env);
} }
Value * ExprString::maybeThunk(EvalState & state, Env & env) Value * ExprString::maybeThunk(EvalState & state, Env & env)
{ {
nrAvoided++; state.nrAvoided++;
return &v; return &v;
} }
Value * ExprInt::maybeThunk(EvalState & state, Env & env) Value * ExprInt::maybeThunk(EvalState & state, Env & env)
{ {
nrAvoided++; state.nrAvoided++;
return &v; return &v;
} }
Value * ExprFloat::maybeThunk(EvalState & state, Env & env) Value * ExprFloat::maybeThunk(EvalState & state, Env & env)
{ {
nrAvoided++; state.nrAvoided++;
return &v; return &v;
} }
Value * ExprPath::maybeThunk(EvalState & state, Env & env) Value * ExprPath::maybeThunk(EvalState & state, Env & env)
{ {
nrAvoided++; state.nrAvoided++;
return &v; return &v;
} }
@ -876,38 +912,23 @@ void EvalState::evalFile(const Path & path_, Value & v, bool mustBeTrivial)
return; return;
} }
Path path2 = resolveExprPath(path); Path resolvedPath = resolveExprPath(path);
if ((i = fileEvalCache.find(path2)) != fileEvalCache.end()) { if ((i = fileEvalCache.find(resolvedPath)) != fileEvalCache.end()) {
v = i->second; v = i->second;
return; return;
} }
printTalkative("evaluating file '%1%'", path2); printTalkative("evaluating file '%1%'", resolvedPath);
Expr * e = nullptr; Expr * e = nullptr;
auto j = fileParseCache.find(path2); auto j = fileParseCache.find(resolvedPath);
if (j != fileParseCache.end()) if (j != fileParseCache.end())
e = j->second; e = j->second;
if (!e) if (!e)
e = parseExprFromFile(checkSourcePath(path2)); e = parseExprFromFile(checkSourcePath(resolvedPath));
fileParseCache[path2] = e; cacheFile(path, resolvedPath, e, v, mustBeTrivial);
try {
// Enforce that 'flake.nix' is a direct attrset, not a
// computation.
if (mustBeTrivial &&
!(dynamic_cast<ExprAttrs *>(e)))
throw Error("file '%s' must be an attribute set", path);
eval(e, v);
} catch (Error & e) {
addErrorTrace(e, "while evaluating the file '%1%':", path2);
throw;
}
fileEvalCache[path2] = v;
if (path != path2) fileEvalCache[path] = v;
} }
@ -918,6 +939,32 @@ void EvalState::resetFileCache()
} }
void EvalState::cacheFile(
const Path & path,
const Path & resolvedPath,
Expr * e,
Value & v,
bool mustBeTrivial)
{
fileParseCache[resolvedPath] = e;
try {
// Enforce that 'flake.nix' is a direct attrset, not a
// computation.
if (mustBeTrivial &&
!(dynamic_cast<ExprAttrs *>(e)))
throw EvalError("file '%s' must be an attribute set", path);
eval(e, v);
} catch (Error & e) {
addErrorTrace(e, "while evaluating the file '%1%':", resolvedPath);
throw;
}
fileEvalCache[resolvedPath] = v;
if (path != resolvedPath) fileEvalCache[path] = v;
}
void EvalState::eval(Expr * e, Value & v) void EvalState::eval(Expr * e, Value & v)
{ {
e->eval(*this, baseEnv, v); e->eval(*this, baseEnv, v);
@ -1008,7 +1055,7 @@ void ExprAttrs::eval(EvalState & state, Env & env, Value & v)
} else } else
vAttr = i.second.e->maybeThunk(state, i.second.inherited ? env : env2); vAttr = i.second.e->maybeThunk(state, i.second.inherited ? env : env2);
env2.values[displ++] = vAttr; env2.values[displ++] = vAttr;
v.attrs->push_back(Attr(i.first, vAttr, &i.second.pos)); v.attrs->push_back(Attr(i.first, vAttr, ptr(&i.second.pos)));
} }
/* If the rec contains an attribute called `__overrides', then /* If the rec contains an attribute called `__overrides', then
@ -1040,7 +1087,7 @@ void ExprAttrs::eval(EvalState & state, Env & env, Value & v)
else else
for (auto & i : attrs) for (auto & i : attrs)
v.attrs->push_back(Attr(i.first, i.second.e->maybeThunk(state, env), &i.second.pos)); v.attrs->push_back(Attr(i.first, i.second.e->maybeThunk(state, env), ptr(&i.second.pos)));
/* Dynamic attrs apply *after* rec and __overrides. */ /* Dynamic attrs apply *after* rec and __overrides. */
for (auto & i : dynamicAttrs) { for (auto & i : dynamicAttrs) {
@ -1057,9 +1104,11 @@ void ExprAttrs::eval(EvalState & state, Env & env, Value & v)
i.valueExpr->setName(nameSym); i.valueExpr->setName(nameSym);
/* Keep sorted order so find can catch duplicates */ /* Keep sorted order so find can catch duplicates */
v.attrs->push_back(Attr(nameSym, i.valueExpr->maybeThunk(state, *dynamicEnv), &i.pos)); v.attrs->push_back(Attr(nameSym, i.valueExpr->maybeThunk(state, *dynamicEnv), ptr(&i.pos)));
v.attrs->sort(); // FIXME: inefficient v.attrs->sort(); // FIXME: inefficient
} }
v.attrs->pos = ptr(&pos);
} }
@ -1114,12 +1163,10 @@ static string showAttrPath(EvalState & state, Env & env, const AttrPath & attrPa
} }
unsigned long nrLookups = 0;
void ExprSelect::eval(EvalState & state, Env & env, Value & v) void ExprSelect::eval(EvalState & state, Env & env, Value & v)
{ {
Value vTmp; Value vTmp;
Pos * pos2 = 0; ptr<Pos> pos2(&noPos);
Value * vAttrs = &vTmp; Value * vAttrs = &vTmp;
e->eval(state, env, vTmp); e->eval(state, env, vTmp);
@ -1127,7 +1174,7 @@ void ExprSelect::eval(EvalState & state, Env & env, Value & v)
try { try {
for (auto & i : attrPath) { for (auto & i : attrPath) {
nrLookups++; state.nrLookups++;
Bindings::iterator j; Bindings::iterator j;
Symbol name = getName(i, state, env); Symbol name = getName(i, state, env);
if (def) { if (def) {
@ -1145,13 +1192,13 @@ void ExprSelect::eval(EvalState & state, Env & env, Value & v)
} }
vAttrs = j->value; vAttrs = j->value;
pos2 = j->pos; pos2 = j->pos;
if (state.countCalls && pos2) state.attrSelects[*pos2]++; if (state.countCalls) state.attrSelects[*pos2]++;
} }
state.forceValue(*vAttrs, ( pos2 != NULL ? *pos2 : this->pos ) ); state.forceValue(*vAttrs, (*pos2 != noPos ? *pos2 : this->pos ) );
} catch (Error & e) { } catch (Error & e) {
if (pos2 && pos2->file != state.sDerivationNix) if (*pos2 != noPos && pos2->file != state.sDerivationNix)
addErrorTrace(e, *pos2, "while evaluating the attribute '%1%'", addErrorTrace(e, *pos2, "while evaluating the attribute '%1%'",
showAttrPath(state, env, attrPath)); showAttrPath(state, env, attrPath));
throw; throw;
@ -1557,7 +1604,6 @@ void ExprConcatStrings::eval(EvalState & state, Env & env, Value & v)
and none of the strings are allowed to have contexts. */ and none of the strings are allowed to have contexts. */
if (first) { if (first) {
firstType = vTmp.type(); firstType = vTmp.type();
first = false;
} }
if (firstType == nInt) { if (firstType == nInt) {
@ -1578,7 +1624,12 @@ void ExprConcatStrings::eval(EvalState & state, Env & env, Value & v)
} else } else
throwEvalError(pos, "cannot add %1% to a float", showType(vTmp)); throwEvalError(pos, "cannot add %1% to a float", showType(vTmp));
} else } else
s << state.coerceToString(pos, vTmp, context, false, firstType == nString); /* skip canonization of first path, which would only be not
canonized in the first place if it's coming from a ./${foo} type
path */
s << state.coerceToString(pos, vTmp, context, false, firstType == nString, !first);
first = false;
} }
if (firstType == nInt) if (firstType == nInt)
@ -1597,7 +1648,7 @@ void ExprConcatStrings::eval(EvalState & state, Env & env, Value & v)
void ExprPos::eval(EvalState & state, Env & env, Value & v) void ExprPos::eval(EvalState & state, Env & env, Value & v)
{ {
state.mkPos(v, &pos); state.mkPos(v, ptr(&pos));
} }
@ -1767,7 +1818,7 @@ std::optional<string> EvalState::tryAttrsToString(const Pos & pos, Value & v,
} }
string EvalState::coerceToString(const Pos & pos, Value & v, PathSet & context, string EvalState::coerceToString(const Pos & pos, Value & v, PathSet & context,
bool coerceMore, bool copyToStore) bool coerceMore, bool copyToStore, bool canonicalizePath)
{ {
forceValue(v, pos); forceValue(v, pos);
@ -1779,7 +1830,7 @@ string EvalState::coerceToString(const Pos & pos, Value & v, PathSet & context,
} }
if (v.type() == nPath) { if (v.type() == nPath) {
Path path(canonPath(v.path)); Path path(canonicalizePath ? canonPath(v.path) : v.path);
return copyToStore ? copyPathToStore(context, path) : path; return copyToStore ? copyPathToStore(context, path) : path;
} }
@ -2091,9 +2142,12 @@ Strings EvalSettings::getDefaultNixPath()
} }
}; };
add(getHome() + "/.nix-defexpr/channels"); if (!evalSettings.restrictEval && !evalSettings.pureEval) {
add(settings.nixStateDir + "/profiles/per-user/root/channels/nixpkgs", "nixpkgs"); add(getHome() + "/.nix-defexpr/channels");
add(settings.nixStateDir + "/profiles/per-user/root/channels"); add(settings.nixStateDir + "/profiles/per-user/root/channels/nixpkgs", "nixpkgs");
add(settings.nixStateDir + "/profiles/per-user/root/channels");
}
return res; return res;
} }

View file

@ -94,8 +94,14 @@ public:
Value vEmptySet; Value vEmptySet;
/* Store used to materialise .drv files. */
const ref<Store> store; const ref<Store> store;
/* Store used to build stuff. */
const ref<Store> buildStore;
RootValue vCallFlake = nullptr;
RootValue vImportedDrvToDerivation = nullptr;
private: private:
SrcToStore srcToStore; SrcToStore srcToStore;
@ -128,9 +134,18 @@ private:
public: public:
EvalState(const Strings & _searchPath, ref<Store> store); EvalState(
const Strings & _searchPath,
ref<Store> store,
std::shared_ptr<Store> buildStore = nullptr);
~EvalState(); ~EvalState();
void requireExperimentalFeatureOnEvaluation(
const std::string & feature,
const std::string_view fName,
const Pos & pos
);
void addToSearchPath(const string & s); void addToSearchPath(const string & s);
SearchPath getSearchPath() { return searchPath; } SearchPath getSearchPath() { return searchPath; }
@ -163,6 +178,14 @@ public:
trivial (i.e. doesn't require arbitrary computation). */ trivial (i.e. doesn't require arbitrary computation). */
void evalFile(const Path & path, Value & v, bool mustBeTrivial = false); void evalFile(const Path & path, Value & v, bool mustBeTrivial = false);
/* Like `cacheFile`, but with an already parsed expression. */
void cacheFile(
const Path & path,
const Path & resolvedPath,
Expr * e,
Value & v,
bool mustBeTrivial = false);
void resetFileCache(); void resetFileCache();
/* Look up a file in the search path. */ /* Look up a file in the search path. */
@ -217,7 +240,8 @@ public:
booleans and lists to a string. If `copyToStore' is set, booleans and lists to a string. If `copyToStore' is set,
referenced paths are copied to the Nix store as a side effect. */ referenced paths are copied to the Nix store as a side effect. */
string coerceToString(const Pos & pos, Value & v, PathSet & context, string coerceToString(const Pos & pos, Value & v, PathSet & context,
bool coerceMore = false, bool copyToStore = true); bool coerceMore = false, bool copyToStore = true,
bool canonicalizePath = true);
string copyPathToStore(PathSet & context, const Path & path); string copyPathToStore(PathSet & context, const Path & path);
@ -301,7 +325,7 @@ public:
void mkList(Value & v, size_t length); void mkList(Value & v, size_t length);
void mkAttrs(Value & v, size_t capacity); void mkAttrs(Value & v, size_t capacity);
void mkThunk_(Value & v, Expr * expr); void mkThunk_(Value & v, Expr * expr);
void mkPos(Value & v, Pos * pos); void mkPos(Value & v, ptr<Pos> pos);
void concatLists(Value & v, size_t nrLists, Value * * lists, const Pos & pos); void concatLists(Value & v, size_t nrLists, Value * * lists, const Pos & pos);
@ -316,8 +340,10 @@ private:
unsigned long nrValuesInEnvs = 0; unsigned long nrValuesInEnvs = 0;
unsigned long nrValues = 0; unsigned long nrValues = 0;
unsigned long nrListElems = 0; unsigned long nrListElems = 0;
unsigned long nrLookups = 0;
unsigned long nrAttrsets = 0; unsigned long nrAttrsets = 0;
unsigned long nrAttrsInAttrsets = 0; unsigned long nrAttrsInAttrsets = 0;
unsigned long nrAvoided = 0;
unsigned long nrOpUpdates = 0; unsigned long nrOpUpdates = 0;
unsigned long nrOpUpdateValuesCopied = 0; unsigned long nrOpUpdateValuesCopied = 0;
unsigned long nrListConcats = 0; unsigned long nrListConcats = 0;
@ -339,6 +365,11 @@ private:
friend struct ExprOpUpdate; friend struct ExprOpUpdate;
friend struct ExprOpConcatLists; friend struct ExprOpConcatLists;
friend struct ExprVar;
friend struct ExprString;
friend struct ExprInt;
friend struct ExprFloat;
friend struct ExprPath;
friend struct ExprSelect; friend struct ExprSelect;
friend void prim_getAttr(EvalState & state, const Pos & pos, Value * * args, Value & v); friend void prim_getAttr(EvalState & state, const Pos & pos, Value * * args, Value & v);
friend void prim_match(EvalState & state, const Pos & pos, Value * * args, Value & v); friend void prim_match(EvalState & state, const Pos & pos, Value * * args, Value & v);

View file

@ -22,12 +22,14 @@ static TrustedList readTrustedList()
static void writeTrustedList(const TrustedList & trustedList) static void writeTrustedList(const TrustedList & trustedList)
{ {
writeFile(trustedListPath(), nlohmann::json(trustedList).dump()); auto path = trustedListPath();
createDirs(dirOf(path));
writeFile(path, nlohmann::json(trustedList).dump());
} }
void ConfigFile::apply() void ConfigFile::apply()
{ {
std::set<std::string> whitelist{"bash-prompt", "bash-prompt-suffix"}; std::set<std::string> whitelist{"bash-prompt", "bash-prompt-suffix", "flake-registry"};
for (auto & [name, value] : settings) { for (auto & [name, value] : settings) {

View file

@ -1,4 +1,5 @@
#include "flake.hh" #include "flake.hh"
#include "eval.hh"
#include "lockfile.hh" #include "lockfile.hh"
#include "primops.hh" #include "primops.hh"
#include "eval-inline.hh" #include "eval-inline.hh"
@ -88,10 +89,12 @@ static void expectType(EvalState & state, ValueType type,
} }
static std::map<FlakeId, FlakeInput> parseFlakeInputs( static std::map<FlakeId, FlakeInput> parseFlakeInputs(
EvalState & state, Value * value, const Pos & pos); EvalState & state, Value * value, const Pos & pos,
const std::optional<Path> & baseDir);
static FlakeInput parseFlakeInput(EvalState & state, static FlakeInput parseFlakeInput(EvalState & state,
const std::string & inputName, Value * value, const Pos & pos) const std::string & inputName, Value * value, const Pos & pos,
const std::optional<Path> & baseDir)
{ {
expectType(state, nAttrs, *value, pos); expectType(state, nAttrs, *value, pos);
@ -115,7 +118,7 @@ static FlakeInput parseFlakeInput(EvalState & state,
expectType(state, nBool, *attr.value, *attr.pos); expectType(state, nBool, *attr.value, *attr.pos);
input.isFlake = attr.value->boolean; input.isFlake = attr.value->boolean;
} else if (attr.name == sInputs) { } else if (attr.name == sInputs) {
input.overrides = parseFlakeInputs(state, attr.value, *attr.pos); input.overrides = parseFlakeInputs(state, attr.value, *attr.pos, baseDir);
} else if (attr.name == sFollows) { } else if (attr.name == sFollows) {
expectType(state, nString, *attr.value, *attr.pos); expectType(state, nString, *attr.value, *attr.pos);
input.follows = parseInputPath(attr.value->string.s); input.follows = parseInputPath(attr.value->string.s);
@ -153,7 +156,7 @@ static FlakeInput parseFlakeInput(EvalState & state,
if (!attrs.empty()) if (!attrs.empty())
throw Error("unexpected flake input attribute '%s', at %s", attrs.begin()->first, pos); throw Error("unexpected flake input attribute '%s', at %s", attrs.begin()->first, pos);
if (url) if (url)
input.ref = parseFlakeRef(*url, {}, true); input.ref = parseFlakeRef(*url, baseDir, true);
} }
if (!input.follows && !input.ref) if (!input.follows && !input.ref)
@ -163,7 +166,8 @@ static FlakeInput parseFlakeInput(EvalState & state,
} }
static std::map<FlakeId, FlakeInput> parseFlakeInputs( static std::map<FlakeId, FlakeInput> parseFlakeInputs(
EvalState & state, Value * value, const Pos & pos) EvalState & state, Value * value, const Pos & pos,
const std::optional<Path> & baseDir)
{ {
std::map<FlakeId, FlakeInput> inputs; std::map<FlakeId, FlakeInput> inputs;
@ -174,7 +178,8 @@ static std::map<FlakeId, FlakeInput> parseFlakeInputs(
parseFlakeInput(state, parseFlakeInput(state,
inputAttr.name, inputAttr.name,
inputAttr.value, inputAttr.value,
*inputAttr.pos)); *inputAttr.pos,
baseDir));
} }
return inputs; return inputs;
@ -190,7 +195,8 @@ static Flake getFlake(
state, originalRef, allowLookup, flakeCache); state, originalRef, allowLookup, flakeCache);
// Guard against symlink attacks. // Guard against symlink attacks.
auto flakeFile = canonPath(sourceInfo.actualPath + "/" + lockedRef.subdir + "/flake.nix"); auto flakeDir = canonPath(sourceInfo.actualPath + "/" + lockedRef.subdir);
auto flakeFile = canonPath(flakeDir + "/flake.nix");
if (!isInDir(flakeFile, sourceInfo.actualPath)) if (!isInDir(flakeFile, sourceInfo.actualPath))
throw Error("'flake.nix' file of flake '%s' escapes from '%s'", throw Error("'flake.nix' file of flake '%s' escapes from '%s'",
lockedRef, state.store->printStorePath(sourceInfo.storePath)); lockedRef, state.store->printStorePath(sourceInfo.storePath));
@ -218,7 +224,7 @@ static Flake getFlake(
auto sInputs = state.symbols.create("inputs"); auto sInputs = state.symbols.create("inputs");
if (auto inputs = vInfo.attrs->get(sInputs)) if (auto inputs = vInfo.attrs->get(sInputs))
flake.inputs = parseFlakeInputs(state, inputs->value, *inputs->pos); flake.inputs = parseFlakeInputs(state, inputs->value, *inputs->pos, flakeDir);
auto sOutputs = state.symbols.create("outputs"); auto sOutputs = state.symbols.create("outputs");
@ -296,7 +302,14 @@ LockedFlake lockFlake(
FlakeCache flakeCache; FlakeCache flakeCache;
auto flake = getFlake(state, topRef, lockFlags.useRegistries, flakeCache); auto useRegistries = lockFlags.useRegistries.value_or(settings.useRegistries);
auto flake = getFlake(state, topRef, useRegistries, flakeCache);
if (lockFlags.applyNixConfig) {
flake.config.apply();
// FIXME: send new config to the daemon.
}
try { try {
@ -317,25 +330,38 @@ LockedFlake lockFlake(
std::vector<FlakeRef> parents; std::vector<FlakeRef> parents;
struct LockParent {
/* The path to this parent. */
InputPath path;
/* Whether we are currently inside a top-level lockfile
(inputs absolute) or subordinate lockfile (inputs
relative). */
bool absolute;
};
std::function<void( std::function<void(
const FlakeInputs & flakeInputs, const FlakeInputs & flakeInputs,
std::shared_ptr<Node> node, std::shared_ptr<Node> node,
const InputPath & inputPathPrefix, const InputPath & inputPathPrefix,
std::shared_ptr<const Node> oldNode)> std::shared_ptr<const Node> oldNode,
const LockParent & parent,
const Path & parentPath)>
computeLocks; computeLocks;
computeLocks = [&]( computeLocks = [&](
const FlakeInputs & flakeInputs, const FlakeInputs & flakeInputs,
std::shared_ptr<Node> node, std::shared_ptr<Node> node,
const InputPath & inputPathPrefix, const InputPath & inputPathPrefix,
std::shared_ptr<const Node> oldNode) std::shared_ptr<const Node> oldNode,
const LockParent & parent,
const Path & parentPath)
{ {
debug("computing lock file node '%s'", printInputPath(inputPathPrefix)); debug("computing lock file node '%s'", printInputPath(inputPathPrefix));
/* Get the overrides (i.e. attributes of the form /* Get the overrides (i.e. attributes of the form
'inputs.nixops.inputs.nixpkgs.url = ...'). */ 'inputs.nixops.inputs.nixpkgs.url = ...'). */
// FIXME: check this for (auto & [id, input] : flakeInputs) {
for (auto & [id, input] : flake.inputs) {
for (auto & [idOverride, inputOverride] : input.overrides) { for (auto & [idOverride, inputOverride] : input.overrides) {
auto inputPath(inputPathPrefix); auto inputPath(inputPathPrefix);
inputPath.push_back(id); inputPath.push_back(id);
@ -359,22 +385,31 @@ LockedFlake lockFlake(
ancestors? */ ancestors? */
auto i = overrides.find(inputPath); auto i = overrides.find(inputPath);
bool hasOverride = i != overrides.end(); bool hasOverride = i != overrides.end();
if (hasOverride) overridesUsed.insert(inputPath); if (hasOverride) {
overridesUsed.insert(inputPath);
// Respect the “flakeness” of the input even if we
// override it
i->second.isFlake = input2.isFlake;
}
auto & input = hasOverride ? i->second : input2; auto & input = hasOverride ? i->second : input2;
/* Resolve 'follows' later (since it may refer to an input /* Resolve 'follows' later (since it may refer to an input
path we haven't processed yet. */ path we haven't processed yet. */
if (input.follows) { if (input.follows) {
InputPath target; InputPath target;
if (hasOverride || input.absolute)
/* 'follows' from an override is relative to the if (parent.absolute && !hasOverride) {
root of the graph. */
target = *input.follows; target = *input.follows;
else { } else {
/* Otherwise, it's relative to the current flake. */ if (hasOverride) {
target = inputPathPrefix; target = inputPathPrefix;
target.pop_back();
} else
target = parent.path;
for (auto & i : *input.follows) target.push_back(i); for (auto & i : *input.follows) target.push_back(i);
} }
debug("input '%s' follows '%s'", inputPathS, printInputPath(target)); debug("input '%s' follows '%s'", inputPathS, printInputPath(target));
node->inputs.insert_or_assign(id, target); node->inputs.insert_or_assign(id, target);
continue; continue;
@ -420,7 +455,7 @@ LockedFlake lockFlake(
if (hasChildUpdate) { if (hasChildUpdate) {
auto inputFlake = getFlake( auto inputFlake = getFlake(
state, oldLock->lockedRef, false, flakeCache); state, oldLock->lockedRef, false, flakeCache);
computeLocks(inputFlake.inputs, childNode, inputPath, oldLock); computeLocks(inputFlake.inputs, childNode, inputPath, oldLock, parent, parentPath);
} else { } else {
/* No need to fetch this flake, we can be /* No need to fetch this flake, we can be
lazy. However there may be new overrides on the lazy. However there may be new overrides on the
@ -437,12 +472,11 @@ LockedFlake lockFlake(
} else if (auto follows = std::get_if<1>(&i.second)) { } else if (auto follows = std::get_if<1>(&i.second)) {
fakeInputs.emplace(i.first, FlakeInput { fakeInputs.emplace(i.first, FlakeInput {
.follows = *follows, .follows = *follows,
.absolute = true
}); });
} }
} }
computeLocks(fakeInputs, childNode, inputPath, oldLock); computeLocks(fakeInputs, childNode, inputPath, oldLock, parent, parentPath);
} }
} else { } else {
@ -454,7 +488,15 @@ LockedFlake lockFlake(
throw Error("cannot update flake input '%s' in pure mode", inputPathS); throw Error("cannot update flake input '%s' in pure mode", inputPathS);
if (input.isFlake) { if (input.isFlake) {
auto inputFlake = getFlake(state, *input.ref, lockFlags.useRegistries, flakeCache); Path localPath = parentPath;
FlakeRef localRef = *input.ref;
// If this input is a path, recurse it down.
// This allows us to resolve path inputs relative to the current flake.
if (localRef.input.getType() == "path")
localPath = absPath(*input.ref->input.getSourcePath(), parentPath);
auto inputFlake = getFlake(state, localRef, useRegistries, flakeCache);
/* Note: in case of an --override-input, we use /* Note: in case of an --override-input, we use
the *original* ref (input2.ref) for the the *original* ref (input2.ref) for the
@ -475,6 +517,13 @@ LockedFlake lockFlake(
parents.push_back(*input.ref); parents.push_back(*input.ref);
Finally cleanup([&]() { parents.pop_back(); }); Finally cleanup([&]() { parents.pop_back(); });
// Follows paths from existing inputs in the top-level lockfile are absolute,
// whereas paths in subordinate lockfiles are relative to those lockfiles.
LockParent newParent {
.path = inputPath,
.absolute = oldLock ? true : false
};
/* Recursively process the inputs of this /* Recursively process the inputs of this
flake. Also, unless we already have this flake flake. Also, unless we already have this flake
in the top-level lock file, use this flake's in the top-level lock file, use this flake's
@ -484,12 +533,13 @@ LockedFlake lockFlake(
oldLock oldLock
? std::dynamic_pointer_cast<const Node>(oldLock) ? std::dynamic_pointer_cast<const Node>(oldLock)
: LockFile::read( : LockFile::read(
inputFlake.sourceInfo->actualPath + "/" + inputFlake.lockedRef.subdir + "/flake.lock").root); inputFlake.sourceInfo->actualPath + "/" + inputFlake.lockedRef.subdir + "/flake.lock").root,
newParent, localPath);
} }
else { else {
auto [sourceInfo, resolvedRef, lockedRef] = fetchOrSubstituteTree( auto [sourceInfo, resolvedRef, lockedRef] = fetchOrSubstituteTree(
state, *input.ref, lockFlags.useRegistries, flakeCache); state, *input.ref, useRegistries, flakeCache);
node->inputs.insert_or_assign(id, node->inputs.insert_or_assign(id,
std::make_shared<LockedNode>(lockedRef, *input.ref, false)); std::make_shared<LockedNode>(lockedRef, *input.ref, false));
} }
@ -502,9 +552,17 @@ LockedFlake lockFlake(
} }
}; };
LockParent parent {
.path = {},
.absolute = true
};
// Bring in the current ref for relative path resolution if we have it
auto parentPath = canonPath(flake.sourceInfo->actualPath + "/" + flake.lockedRef.subdir);
computeLocks( computeLocks(
flake.inputs, newLockFile.root, {}, flake.inputs, newLockFile.root, {},
lockFlags.recreateLockFile ? nullptr : oldLockFile.root); lockFlags.recreateLockFile ? nullptr : oldLockFile.root, parent, parentPath);
for (auto & i : lockFlags.inputOverrides) for (auto & i : lockFlags.inputOverrides)
if (!overridesUsed.count(i.first)) if (!overridesUsed.count(i.first))
@ -554,8 +612,8 @@ LockedFlake lockFlake(
topRef.input.markChangedFile( topRef.input.markChangedFile(
(topRef.subdir == "" ? "" : topRef.subdir + "/") + "flake.lock", (topRef.subdir == "" ? "" : topRef.subdir + "/") + "flake.lock",
lockFlags.commitLockFile lockFlags.commitLockFile
? std::optional<std::string>(fmt("%s: %s\n\nFlake input changes:\n\n%s", ? std::optional<std::string>(fmt("%s: %s\n\nFlake lock file changes:\n\n%s",
relPath, lockFileExists ? "Update" : "Add", diff)) relPath, lockFileExists ? "Update" : "Add", filterANSIEscapes(diff, true)))
: std::nullopt); : std::nullopt);
/* Rewriting the lockfile changed the top-level /* Rewriting the lockfile changed the top-level
@ -563,7 +621,7 @@ LockedFlake lockFlake(
also just clear the 'rev' field... */ also just clear the 'rev' field... */
auto prevLockedRef = flake.lockedRef; auto prevLockedRef = flake.lockedRef;
FlakeCache dummyCache; FlakeCache dummyCache;
flake = getFlake(state, topRef, lockFlags.useRegistries, dummyCache); flake = getFlake(state, topRef, useRegistries, dummyCache);
if (lockFlags.commitLockFile && if (lockFlags.commitLockFile &&
flake.lockedRef.input.getRev() && flake.lockedRef.input.getRev() &&
@ -580,8 +638,10 @@ LockedFlake lockFlake(
} }
} else } else
throw Error("cannot write modified lock file of flake '%s' (use '--no-write-lock-file' to ignore)", topRef); throw Error("cannot write modified lock file of flake '%s' (use '--no-write-lock-file' to ignore)", topRef);
} else } else {
warn("not writing modified lock file of flake '%s':\n%s", topRef, chomp(diff)); warn("not writing modified lock file of flake '%s':\n%s", topRef, chomp(diff));
flake.forceDirty = true;
}
} }
return LockedFlake { .flake = std::move(flake), .lockFile = std::move(newLockFile) }; return LockedFlake { .flake = std::move(flake), .lockFile = std::move(newLockFile) };
@ -604,26 +664,32 @@ void callFlake(EvalState & state,
mkString(*vLocks, lockedFlake.lockFile.to_string()); mkString(*vLocks, lockedFlake.lockFile.to_string());
emitTreeAttrs(state, *lockedFlake.flake.sourceInfo, lockedFlake.flake.lockedRef.input, *vRootSrc); emitTreeAttrs(
state,
*lockedFlake.flake.sourceInfo,
lockedFlake.flake.lockedRef.input,
*vRootSrc,
false,
lockedFlake.flake.forceDirty);
mkString(*vRootSubdir, lockedFlake.flake.lockedRef.subdir); mkString(*vRootSubdir, lockedFlake.flake.lockedRef.subdir);
static RootValue vCallFlake = nullptr; if (!state.vCallFlake) {
state.vCallFlake = allocRootValue(state.allocValue());
if (!vCallFlake) {
vCallFlake = allocRootValue(state.allocValue());
state.eval(state.parseExprFromString( state.eval(state.parseExprFromString(
#include "call-flake.nix.gen.hh" #include "call-flake.nix.gen.hh"
, "/"), **vCallFlake); , "/"), **state.vCallFlake);
} }
state.callFunction(**vCallFlake, *vLocks, *vTmp1, noPos); state.callFunction(**state.vCallFlake, *vLocks, *vTmp1, noPos);
state.callFunction(*vTmp1, *vRootSrc, *vTmp2, noPos); state.callFunction(*vTmp1, *vRootSrc, *vTmp2, noPos);
state.callFunction(*vTmp2, *vRootSubdir, vRes, noPos); state.callFunction(*vTmp2, *vRootSubdir, vRes, noPos);
} }
static void prim_getFlake(EvalState & state, const Pos & pos, Value * * args, Value & v) static void prim_getFlake(EvalState & state, const Pos & pos, Value * * args, Value & v)
{ {
state.requireExperimentalFeatureOnEvaluation("flakes", "builtins.getFlake", pos);
auto flakeRefS = state.forceStringNoCtx(*args[0], pos); auto flakeRefS = state.forceStringNoCtx(*args[0], pos);
auto flakeRef = parseFlakeRef(flakeRefS, {}, true); auto flakeRef = parseFlakeRef(flakeRefS, {}, true);
if (evalSettings.pureEval && !flakeRef.input.isImmutable()) if (evalSettings.pureEval && !flakeRef.input.isImmutable())
@ -633,13 +699,13 @@ static void prim_getFlake(EvalState & state, const Pos & pos, Value * * args, Va
lockFlake(state, flakeRef, lockFlake(state, flakeRef,
LockFlags { LockFlags {
.updateLockFile = false, .updateLockFile = false,
.useRegistries = !evalSettings.pureEval, .useRegistries = !evalSettings.pureEval && settings.useRegistries,
.allowMutable = !evalSettings.pureEval, .allowMutable = !evalSettings.pureEval,
}), }),
v); v);
} }
static RegisterPrimOp r2("__getFlake", 1, prim_getFlake, "flakes"); static RegisterPrimOp r2("__getFlake", 1, prim_getFlake);
} }
@ -649,8 +715,9 @@ Fingerprint LockedFlake::getFingerprint() const
// and we haven't changed it, then it's sufficient to use // and we haven't changed it, then it's sufficient to use
// flake.sourceInfo.storePath for the fingerprint. // flake.sourceInfo.storePath for the fingerprint.
return hashString(htSHA256, return hashString(htSHA256,
fmt("%s;%d;%d;%s", fmt("%s;%s;%d;%d;%s",
flake.sourceInfo->storePath.to_string(), flake.sourceInfo->storePath.to_string(),
flake.lockedRef.subdir,
flake.lockedRef.input.getRevCount().value_or(0), flake.lockedRef.input.getRevCount().value_or(0),
flake.lockedRef.input.getLastModified().value_or(0), flake.lockedRef.input.getLastModified().value_or(0),
lockFile)); lockFile));

View file

@ -43,7 +43,6 @@ struct FlakeInput
std::optional<FlakeRef> ref; std::optional<FlakeRef> ref;
bool isFlake = true; // true = process flake to get outputs, false = (fetched) static source path bool isFlake = true; // true = process flake to get outputs, false = (fetched) static source path
std::optional<InputPath> follows; std::optional<InputPath> follows;
bool absolute = false; // whether 'follows' is relative to the flake root
FlakeInputs overrides; FlakeInputs overrides;
}; };
@ -59,9 +58,10 @@ struct ConfigFile
/* The contents of a flake.nix file. */ /* The contents of a flake.nix file. */
struct Flake struct Flake
{ {
FlakeRef originalRef; // the original flake specification (by the user) FlakeRef originalRef; // the original flake specification (by the user)
FlakeRef resolvedRef; // registry references and caching resolved to the specific underlying flake FlakeRef resolvedRef; // registry references and caching resolved to the specific underlying flake
FlakeRef lockedRef; // the specific local store result of invoking the fetcher FlakeRef lockedRef; // the specific local store result of invoking the fetcher
bool forceDirty = false; // pretend that 'lockedRef' is dirty
std::optional<std::string> description; std::optional<std::string> description;
std::shared_ptr<const fetchers::Tree> sourceInfo; std::shared_ptr<const fetchers::Tree> sourceInfo;
FlakeInputs inputs; FlakeInputs inputs;
@ -102,7 +102,11 @@ struct LockFlags
/* Whether to use the registries to lookup indirect flake /* Whether to use the registries to lookup indirect flake
references like 'nixpkgs'. */ references like 'nixpkgs'. */
bool useRegistries = true; std::optional<bool> useRegistries = std::nullopt;
/* Whether to apply flake's nixConfig attribute to the configuration */
bool applyNixConfig = false;
/* Whether mutable flake references (i.e. those without a Git /* Whether mutable flake references (i.e. those without a Git
revision or similar) without a corresponding lock are revision or similar) without a corresponding lock are
@ -137,6 +141,8 @@ void emitTreeAttrs(
EvalState & state, EvalState & state,
const fetchers::Tree & tree, const fetchers::Tree & tree,
const fetchers::Input & input, const fetchers::Input & input,
Value & v, bool emptyRevFallback = false); Value & v,
bool emptyRevFallback = false,
bool forceDirty = false);
} }

View file

@ -172,8 +172,12 @@ std::pair<FlakeRef, std::string> parseFlakeRefWithFragment(
auto parsedURL = parseURL(url); auto parsedURL = parseURL(url);
std::string fragment; std::string fragment;
std::swap(fragment, parsedURL.fragment); std::swap(fragment, parsedURL.fragment);
auto input = Input::fromURL(parsedURL);
input.parent = baseDir;
return std::make_pair( return std::make_pair(
FlakeRef(Input::fromURL(parsedURL), get(parsedURL.query, "dir").value_or("")), FlakeRef(std::move(input), get(parsedURL.query, "dir").value_or("")),
fragment); fragment);
} }
} }

View file

@ -2,6 +2,8 @@
#include "store-api.hh" #include "store-api.hh"
#include "url-parts.hh" #include "url-parts.hh"
#include <iomanip>
#include <nlohmann/json.hpp> #include <nlohmann/json.hpp>
namespace nix::flake { namespace nix::flake {
@ -268,10 +270,20 @@ std::map<InputPath, Node::Edge> LockFile::getAllInputs() const
return res; return res;
} }
static std::string describe(const FlakeRef & flakeRef)
{
auto s = fmt("'%s'", flakeRef.to_string());
if (auto lastModified = flakeRef.input.getLastModified())
s += fmt(" (%s)", std::put_time(std::gmtime(&*lastModified), "%Y-%m-%d"));
return s;
}
std::ostream & operator <<(std::ostream & stream, const Node::Edge & edge) std::ostream & operator <<(std::ostream & stream, const Node::Edge & edge)
{ {
if (auto node = std::get_if<0>(&edge)) if (auto node = std::get_if<0>(&edge))
stream << "'" << (*node)->lockedRef << "'"; stream << describe((*node)->lockedRef);
else if (auto follows = std::get_if<1>(&edge)) else if (auto follows = std::get_if<1>(&edge))
stream << fmt("follows '%s'", printInputPath(*follows)); stream << fmt("follows '%s'", printInputPath(*follows));
return stream; return stream;
@ -299,14 +311,15 @@ std::string LockFile::diff(const LockFile & oldLocks, const LockFile & newLocks)
while (i != oldFlat.end() || j != newFlat.end()) { while (i != oldFlat.end() || j != newFlat.end()) {
if (j != newFlat.end() && (i == oldFlat.end() || i->first > j->first)) { if (j != newFlat.end() && (i == oldFlat.end() || i->first > j->first)) {
res += fmt("* Added '%s': %s\n", printInputPath(j->first), j->second); res += fmt("" ANSI_GREEN "Added input '%s':" ANSI_NORMAL "\n %s\n",
printInputPath(j->first), j->second);
++j; ++j;
} else if (i != oldFlat.end() && (j == newFlat.end() || i->first < j->first)) { } else if (i != oldFlat.end() && (j == newFlat.end() || i->first < j->first)) {
res += fmt("* Removed '%s'\n", printInputPath(i->first)); res += fmt("" ANSI_RED "Removed input '%s'" ANSI_NORMAL "\n", printInputPath(i->first));
++i; ++i;
} else { } else {
if (!equals(i->second, j->second)) { if (!equals(i->second, j->second)) {
res += fmt("* Updated '%s': %s -> %s\n", res += fmt("" ANSI_BOLD "Updated input '%s':" ANSI_NORMAL "\n %s\n %s\n",
printInputPath(i->first), printInputPath(i->first),
i->second, i->second,
j->second); j->second);

View file

@ -9,6 +9,9 @@
%s DEFAULT %s DEFAULT
%x STRING %x STRING
%x IND_STRING %x IND_STRING
%x INPATH
%x INPATH_SLASH
%x PATH_START
%{ %{
@ -25,6 +28,8 @@ using namespace nix;
namespace nix { namespace nix {
// backup to recover from yyless(0)
YYLTYPE prev_yylloc;
static void initLoc(YYLTYPE * loc) static void initLoc(YYLTYPE * loc)
{ {
@ -35,14 +40,18 @@ static void initLoc(YYLTYPE * loc)
static void adjustLoc(YYLTYPE * loc, const char * s, size_t len) static void adjustLoc(YYLTYPE * loc, const char * s, size_t len)
{ {
prev_yylloc = *loc;
loc->first_line = loc->last_line; loc->first_line = loc->last_line;
loc->first_column = loc->last_column; loc->first_column = loc->last_column;
while (len--) { for (size_t i = 0; i < len; i++) {
switch (*s++) { switch (*s++) {
case '\r': case '\r':
if (*s == '\n') /* cr/lf */ if (*s == '\n') { /* cr/lf */
i++;
s++; s++;
}
/* fall through */ /* fall through */
case '\n': case '\n':
++loc->last_line; ++loc->last_line;
@ -95,9 +104,12 @@ ANY .|\n
ID [a-zA-Z\_][a-zA-Z0-9\_\'\-]* ID [a-zA-Z\_][a-zA-Z0-9\_\'\-]*
INT [0-9]+ INT [0-9]+
FLOAT (([1-9][0-9]*\.[0-9]*)|(0?\.[0-9]+))([Ee][+-]?[0-9]+)? FLOAT (([1-9][0-9]*\.[0-9]*)|(0?\.[0-9]+))([Ee][+-]?[0-9]+)?
PATH [a-zA-Z0-9\.\_\-\+]*(\/[a-zA-Z0-9\.\_\-\+]+)+\/? PATH_CHAR [a-zA-Z0-9\.\_\-\+]
HPATH \~(\/[a-zA-Z0-9\.\_\-\+]+)+\/? PATH {PATH_CHAR}*(\/{PATH_CHAR}+)+\/?
SPATH \<[a-zA-Z0-9\.\_\-\+]+(\/[a-zA-Z0-9\.\_\-\+]+)*\> PATH_SEG {PATH_CHAR}*\/
HPATH \~(\/{PATH_CHAR}+)+\/?
HPATH_START \~\/
SPATH \<{PATH_CHAR}+(\/{PATH_CHAR}+)*\>
URI [a-zA-Z][a-zA-Z0-9\+\-\.]*\:[a-zA-Z0-9\%\/\?\:\@\&\=\+\$\,\-\_\.\!\~\*\']+ URI [a-zA-Z][a-zA-Z0-9\+\-\.]*\:[a-zA-Z0-9\%\/\?\:\@\&\=\+\$\,\-\_\.\!\~\*\']+
@ -198,17 +210,75 @@ or { return OR_KW; }
return IND_STR; return IND_STR;
} }
{PATH_SEG}\$\{ |
{HPATH_START}\$\{ {
PUSH_STATE(PATH_START);
yyless(0);
*yylloc = prev_yylloc;
}
<PATH_START>{PATH_SEG} {
POP_STATE();
PUSH_STATE(INPATH_SLASH);
yylval->path = strdup(yytext);
return PATH;
}
<PATH_START>{HPATH_START} {
POP_STATE();
PUSH_STATE(INPATH_SLASH);
yylval->path = strdup(yytext);
return HPATH;
}
{PATH} {
if (yytext[yyleng-1] == '/')
PUSH_STATE(INPATH_SLASH);
else
PUSH_STATE(INPATH);
yylval->path = strdup(yytext);
return PATH;
}
{HPATH} {
if (yytext[yyleng-1] == '/')
PUSH_STATE(INPATH_SLASH);
else
PUSH_STATE(INPATH);
yylval->path = strdup(yytext);
return HPATH;
}
<INPATH,INPATH_SLASH>\$\{ {
POP_STATE();
PUSH_STATE(INPATH);
PUSH_STATE(DEFAULT);
return DOLLAR_CURLY;
}
<INPATH,INPATH_SLASH>{PATH}|{PATH_SEG}|{PATH_CHAR}+ {
POP_STATE();
if (yytext[yyleng-1] == '/')
PUSH_STATE(INPATH_SLASH);
else
PUSH_STATE(INPATH);
yylval->e = new ExprString(data->symbols.create(string(yytext)));
return STR;
}
<INPATH>{ANY} |
<INPATH><<EOF>> {
/* if we encounter a non-path character we inform the parser that the path has
ended with a PATH_END token and re-parse this character in the default
context (it may be ')', ';', or something of that sort) */
POP_STATE();
yyless(0);
*yylloc = prev_yylloc;
return PATH_END;
}
<INPATH_SLASH>{ANY} |
<INPATH_SLASH><<EOF>> {
throw ParseError("path has a trailing slash");
}
{PATH} { if (yytext[yyleng-1] == '/')
throw ParseError("path '%s' has a trailing slash", yytext);
yylval->path = strdup(yytext);
return PATH;
}
{HPATH} { if (yytext[yyleng-1] == '/')
throw ParseError("path '%s' has a trailing slash", yytext);
yylval->path = strdup(yytext);
return HPATH;
}
{SPATH} { yylval->path = strdup(yytext); return SPATH; } {SPATH} { yylval->path = strdup(yytext); return SPATH; }
{URI} { yylval->uri = strdup(yytext); return URI; } {URI} { yylval->uri = strdup(yytext); return URI; }

View file

@ -15,8 +15,8 @@ libexpr_CXXFLAGS += -I src/libutil -I src/libstore -I src/libfetchers -I src/lib
libexpr_LIBS = libutil libstore libfetchers libexpr_LIBS = libutil libstore libfetchers
libexpr_LDFLAGS = -lboost_context libexpr_LDFLAGS += -lboost_context -pthread
ifneq ($(OS), FreeBSD) ifdef HOST_LINUX
libexpr_LDFLAGS += -ldl libexpr_LDFLAGS += -ldl
endif endif
@ -35,7 +35,7 @@ $(d)/lexer-tab.cc $(d)/lexer-tab.hh: $(d)/lexer.l
clean-files += $(d)/parser-tab.cc $(d)/parser-tab.hh $(d)/lexer-tab.cc $(d)/lexer-tab.hh clean-files += $(d)/parser-tab.cc $(d)/parser-tab.hh $(d)/lexer-tab.cc $(d)/lexer-tab.hh
$(eval $(call install-file-in, $(d)/nix-expr.pc, $(prefix)/lib/pkgconfig, 0644)) $(eval $(call install-file-in, $(d)/nix-expr.pc, $(libdir)/pkgconfig, 0644))
$(foreach i, $(wildcard src/libexpr/flake/*.hh), \ $(foreach i, $(wildcard src/libexpr/flake/*.hh), \
$(eval $(call install-file-in, $(i), $(includedir)/nix/flake, 0644))) $(eval $(call install-file-in, $(i), $(includedir)/nix/flake, 0644)))

View file

@ -180,6 +180,7 @@ struct ExprOpHasAttr : Expr
struct ExprAttrs : Expr struct ExprAttrs : Expr
{ {
bool recursive; bool recursive;
Pos pos;
struct AttrDef { struct AttrDef {
bool inherited; bool inherited;
Expr * e; Expr * e;
@ -199,7 +200,8 @@ struct ExprAttrs : Expr
}; };
typedef std::vector<DynamicAttrDef> DynamicAttrDefs; typedef std::vector<DynamicAttrDef> DynamicAttrDefs;
DynamicAttrDefs dynamicAttrs; DynamicAttrDefs dynamicAttrs;
ExprAttrs() : recursive(false) { }; ExprAttrs(const Pos &pos) : recursive(false), pos(pos) { };
ExprAttrs() : recursive(false), pos(noPos) { };
COMMON_METHODS COMMON_METHODS
}; };

View file

@ -290,13 +290,13 @@ void yyerror(YYLTYPE * loc, yyscan_t scanner, ParseData * data, const char * err
%type <formal> formal %type <formal> formal
%type <attrNames> attrs attrpath %type <attrNames> attrs attrpath
%type <string_parts> string_parts_interpolated ind_string_parts %type <string_parts> string_parts_interpolated ind_string_parts
%type <e> string_parts string_attr %type <e> path_start string_parts string_attr
%type <id> attr %type <id> attr
%token <id> ID ATTRPATH %token <id> ID ATTRPATH
%token <e> STR IND_STR %token <e> STR IND_STR
%token <n> INT %token <n> INT
%token <nf> FLOAT %token <nf> FLOAT
%token <path> PATH HPATH SPATH %token <path> PATH HPATH SPATH PATH_END
%token <uri> URI %token <uri> URI
%token IF THEN ELSE ASSERT WITH LET IN REC INHERIT EQ NEQ AND OR IMPL OR_KW %token IF THEN ELSE ASSERT WITH LET IN REC INHERIT EQ NEQ AND OR IMPL OR_KW
%token DOLLAR_CURLY /* == ${ */ %token DOLLAR_CURLY /* == ${ */
@ -405,8 +405,11 @@ expr_simple
| IND_STRING_OPEN ind_string_parts IND_STRING_CLOSE { | IND_STRING_OPEN ind_string_parts IND_STRING_CLOSE {
$$ = stripIndentation(CUR_POS, data->symbols, *$2); $$ = stripIndentation(CUR_POS, data->symbols, *$2);
} }
| PATH { $$ = new ExprPath(absPath($1, data->basePath)); } | path_start PATH_END { $$ = $1; }
| HPATH { $$ = new ExprPath(getHome() + string{$1 + 1}); } | path_start string_parts_interpolated PATH_END {
$2->insert($2->begin(), $1);
$$ = new ExprConcatStrings(CUR_POS, false, $2);
}
| SPATH { | SPATH {
string path($1 + 1, strlen($1) - 2); string path($1 + 1, strlen($1) - 2);
$$ = new ExprApp(CUR_POS, $$ = new ExprApp(CUR_POS,
@ -452,6 +455,20 @@ string_parts_interpolated
} }
; ;
path_start
: PATH {
Path path(absPath($1, data->basePath));
/* add back in the trailing '/' to the first segment */
if ($1[strlen($1)-1] == '/' && strlen($1) > 1)
path += "/";
$$ = new ExprPath(path);
}
| HPATH {
Path path(getHome() + string($1 + 1));
$$ = new ExprPath(path);
}
;
ind_string_parts ind_string_parts
: ind_string_parts IND_STR { $$ = $1; $1->push_back($2); } : ind_string_parts IND_STR { $$ = $1; $1->push_back($2); }
| ind_string_parts DOLLAR_CURLY expr '}' { $$ = $1; $1->push_back($3); } | ind_string_parts DOLLAR_CURLY expr '}' { $$ = $1; $1->push_back($3); }
@ -478,7 +495,7 @@ binds
$$->attrs[i.symbol] = ExprAttrs::AttrDef(new ExprSelect(CUR_POS, $4, i.symbol), makeCurPos(@6, data)); $$->attrs[i.symbol] = ExprAttrs::AttrDef(new ExprSelect(CUR_POS, $4, i.symbol), makeCurPos(@6, data));
} }
} }
| { $$ = new ExprAttrs; } | { $$ = new ExprAttrs(makeCurPos(@0, data)); }
; ;
attrs attrs

View file

@ -21,6 +21,8 @@
#include <regex> #include <regex>
#include <dlfcn.h> #include <dlfcn.h>
#include <cmath>
namespace nix { namespace nix {
@ -50,7 +52,8 @@ void EvalState::realiseContext(const PathSet & context)
if (drvs.empty()) return; if (drvs.empty()) return;
if (!evalSettings.enableImportFromDerivation) if (!evalSettings.enableImportFromDerivation)
throw EvalError("attempted to realize '%1%' during evaluation but 'allow-import-from-derivation' is false", throw Error(
"cannot build '%1%' during evaluation because the option 'allow-import-from-derivation' is disabled",
store->printStorePath(drvs.begin()->drvPath)); store->printStorePath(drvs.begin()->drvPath));
/* For performance, prefetch all substitute info. */ /* For performance, prefetch all substitute info. */
@ -122,7 +125,7 @@ static void import(EvalState & state, const Pos & pos, Value & vPath, Value * vS
}); });
} catch (Error & e) { } catch (Error & e) {
e.addTrace(pos, "while importing '%s'", path); e.addTrace(pos, "while importing '%s'", path);
throw e; throw;
} }
Path realPath = state.checkSourcePath(state.toRealPath(path, context)); Path realPath = state.checkSourcePath(state.toRealPath(path, context));
@ -158,16 +161,15 @@ static void import(EvalState & state, const Pos & pos, Value & vPath, Value * vS
} }
w.attrs->sort(); w.attrs->sort();
static RootValue fun; if (!state.vImportedDrvToDerivation) {
if (!fun) { state.vImportedDrvToDerivation = allocRootValue(state.allocValue());
fun = allocRootValue(state.allocValue());
state.eval(state.parseExprFromString( state.eval(state.parseExprFromString(
#include "imported-drv-to-derivation.nix.gen.hh" #include "imported-drv-to-derivation.nix.gen.hh"
, "/"), **fun); , "/"), **state.vImportedDrvToDerivation);
} }
state.forceFunction(**fun, pos); state.forceFunction(**state.vImportedDrvToDerivation, pos);
mkApp(v, **fun, w); mkApp(v, **state.vImportedDrvToDerivation, w);
state.forceAttrs(v, pos); state.forceAttrs(v, pos);
} }
@ -547,18 +549,56 @@ typedef list<Value *> ValueList;
#endif #endif
static Bindings::iterator getAttr(
EvalState & state,
string funcName,
string attrName,
Bindings * attrSet,
const Pos & pos)
{
Bindings::iterator value = attrSet->find(state.symbols.create(attrName));
if (value == attrSet->end()) {
hintformat errorMsg = hintfmt(
"attribute '%s' missing for call to '%s'",
attrName,
funcName
);
Pos aPos = *attrSet->pos;
if (aPos == noPos) {
throw TypeError({
.msg = errorMsg,
.errPos = pos,
});
} else {
auto e = TypeError({
.msg = errorMsg,
.errPos = aPos,
});
// Adding another trace for the function name to make it clear
// which call received wrong arguments.
e.addTrace(pos, hintfmt("while invoking '%s'", funcName));
throw;
}
}
return value;
}
static void prim_genericClosure(EvalState & state, const Pos & pos, Value * * args, Value & v) static void prim_genericClosure(EvalState & state, const Pos & pos, Value * * args, Value & v)
{ {
state.forceAttrs(*args[0], pos); state.forceAttrs(*args[0], pos);
/* Get the start set. */ /* Get the start set. */
Bindings::iterator startSet = Bindings::iterator startSet = getAttr(
args[0]->attrs->find(state.symbols.create("startSet")); state,
if (startSet == args[0]->attrs->end()) "genericClosure",
throw EvalError({ "startSet",
.msg = hintfmt("attribute 'startSet' required"), args[0]->attrs,
.errPos = pos pos
}); );
state.forceList(*startSet->value, pos); state.forceList(*startSet->value, pos);
ValueList workSet; ValueList workSet;
@ -566,13 +606,14 @@ static void prim_genericClosure(EvalState & state, const Pos & pos, Value * * ar
workSet.push_back(startSet->value->listElems()[n]); workSet.push_back(startSet->value->listElems()[n]);
/* Get the operator. */ /* Get the operator. */
Bindings::iterator op = Bindings::iterator op = getAttr(
args[0]->attrs->find(state.symbols.create("operator")); state,
if (op == args[0]->attrs->end()) "genericClosure",
throw EvalError({ "operator",
.msg = hintfmt("attribute 'operator' required"), args[0]->attrs,
.errPos = pos pos
}); );
state.forceValue(*op->value, pos); state.forceValue(*op->value, pos);
/* Construct the closure by applying the operator to element of /* Construct the closure by applying the operator to element of
@ -675,6 +716,44 @@ static RegisterPrimOp primop_addErrorContext(RegisterPrimOp::Info {
.fun = prim_addErrorContext, .fun = prim_addErrorContext,
}); });
static void prim_ceil(EvalState & state, const Pos & pos, Value * * args, Value & v)
{
auto value = state.forceFloat(*args[0], args[0]->determinePos(pos));
mkInt(v, ceil(value));
}
static RegisterPrimOp primop_ceil({
.name = "__ceil",
.args = {"double"},
.doc = R"(
Converts an IEEE-754 double-precision floating-point number (*double*) to
the next higher integer.
If the datatype is neither an integer nor a "float", an evaluation error will be
thrown.
)",
.fun = prim_ceil,
});
static void prim_floor(EvalState & state, const Pos & pos, Value * * args, Value & v)
{
auto value = state.forceFloat(*args[0], args[0]->determinePos(pos));
mkInt(v, floor(value));
}
static RegisterPrimOp primop_floor({
.name = "__floor",
.args = {"double"},
.doc = R"(
Converts an IEEE-754 double-precision floating-point number (*double*) to
the next lower integer.
If the datatype is neither an integer nor a "float", an evaluation error will be
thrown.
)",
.fun = prim_floor,
});
/* Try evaluating the argument. Success => {success=true; value=something;}, /* Try evaluating the argument. Success => {success=true; value=something;},
* else => {success=false; value=false;} */ * else => {success=false; value=false;} */
static void prim_tryEval(EvalState & state, const Pos & pos, Value * * args, Value & v) static void prim_tryEval(EvalState & state, const Pos & pos, Value * * args, Value & v)
@ -816,12 +895,14 @@ static void prim_derivationStrict(EvalState & state, const Pos & pos, Value * *
state.forceAttrs(*args[0], pos); state.forceAttrs(*args[0], pos);
/* Figure out the name first (for stack backtraces). */ /* Figure out the name first (for stack backtraces). */
Bindings::iterator attr = args[0]->attrs->find(state.sName); Bindings::iterator attr = getAttr(
if (attr == args[0]->attrs->end()) state,
throw EvalError({ "derivationStrict",
.msg = hintfmt("required attribute 'name' missing"), state.sName,
.errPos = pos args[0]->attrs,
}); pos
);
string drvName; string drvName;
Pos & posDrvName(*attr->pos); Pos & posDrvName(*attr->pos);
try { try {
@ -953,7 +1034,7 @@ static void prim_derivationStrict(EvalState & state, const Pos & pos, Value * *
} }
} else { } else {
auto s = state.coerceToString(posDrvName, *i->value, context, true); auto s = state.coerceToString(*i->pos, *i->value, context, true);
drv.env.emplace(key, s); drv.env.emplace(key, s);
if (i->name == state.sBuilder) drv.builder = s; if (i->name == state.sBuilder) drv.builder = s;
else if (i->name == state.sSystem) drv.platform = s; else if (i->name == state.sSystem) drv.platform = s;
@ -1216,7 +1297,10 @@ static RegisterPrimOp primop_toPath({
static void prim_storePath(EvalState & state, const Pos & pos, Value * * args, Value & v) static void prim_storePath(EvalState & state, const Pos & pos, Value * * args, Value & v)
{ {
if (evalSettings.pureEval) if (evalSettings.pureEval)
throw EvalError("builtins.storePath' is not allowed in pure evaluation mode"); throw EvalError({
.msg = hintfmt("'%s' is not allowed in pure evaluation mode", "builtins.storePath"),
.errPos = pos
});
PathSet context; PathSet context;
Path path = state.checkSourcePath(state.coerceToPath(pos, *args[0], context)); Path path = state.checkSourcePath(state.coerceToPath(pos, *args[0], context));
@ -1375,12 +1459,13 @@ static void prim_findFile(EvalState & state, const Pos & pos, Value * * args, Va
if (i != v2.attrs->end()) if (i != v2.attrs->end())
prefix = state.forceStringNoCtx(*i->value, pos); prefix = state.forceStringNoCtx(*i->value, pos);
i = v2.attrs->find(state.symbols.create("path")); i = getAttr(
if (i == v2.attrs->end()) state,
throw EvalError({ "findFile",
.msg = hintfmt("attribute 'path' missing"), "path",
.errPos = pos v2.attrs,
}); pos
);
PathSet context; PathSet context;
string path = state.coerceToString(pos, *i->value, context, false, false); string path = state.coerceToString(pos, *i->value, context, false, false);
@ -1414,15 +1499,20 @@ static void prim_hashFile(EvalState & state, const Pos & pos, Value * * args, Va
string type = state.forceStringNoCtx(*args[0], pos); string type = state.forceStringNoCtx(*args[0], pos);
std::optional<HashType> ht = parseHashType(type); std::optional<HashType> ht = parseHashType(type);
if (!ht) if (!ht)
throw Error({ throw Error({
.msg = hintfmt("unknown hash type '%1%'", type), .msg = hintfmt("unknown hash type '%1%'", type),
.errPos = pos .errPos = pos
}); });
PathSet context; // discarded PathSet context;
Path p = state.coerceToPath(pos, *args[1], context); Path path = state.coerceToPath(pos, *args[1], context);
try {
state.realiseContext(context);
} catch (InvalidPathError & e) {
throw EvalError("cannot read '%s' since path '%s' is not valid, at %s", path, e.path, pos);
}
mkString(v, hashFile(*ht, state.checkSourcePath(p)).to_string(Base16, false), context); mkString(v, hashFile(*ht, state.checkSourcePath(state.toRealPath(path, context))).to_string(Base16, false));
} }
static RegisterPrimOp primop_hashFile({ static RegisterPrimOp primop_hashFile({
@ -1633,7 +1723,7 @@ static void prim_fromJSON(EvalState & state, const Pos & pos, Value * * args, Va
parseJSON(state, s, v); parseJSON(state, s, v);
} catch (JSONParseError &e) { } catch (JSONParseError &e) {
e.addTrace(pos, "while decoding a JSON string"); e.addTrace(pos, "while decoding a JSON string");
throw e; throw;
} }
} }
@ -1932,26 +2022,26 @@ static RegisterPrimOp primop_path({
An enrichment of the built-in path type, based on the attributes An enrichment of the built-in path type, based on the attributes
present in *args*. All are optional except `path`: present in *args*. All are optional except `path`:
- path - path\
The underlying path. The underlying path.
- name - name\
The name of the path when added to the store. This can used to The name of the path when added to the store. This can used to
reference paths that have nix-illegal characters in their names, reference paths that have nix-illegal characters in their names,
like `@`. like `@`.
- filter - filter\
A function of the type expected by `builtins.filterSource`, A function of the type expected by `builtins.filterSource`,
with the same semantics. with the same semantics.
- recursive - recursive\
When `false`, when `path` is added to the store it is with a When `false`, when `path` is added to the store it is with a
flat hash, rather than a hash of the NAR serialization of the flat hash, rather than a hash of the NAR serialization of the
file. Thus, `path` must refer to a regular file, not a file. Thus, `path` must refer to a regular file, not a
directory. This allows similar behavior to `fetchurl`. Defaults directory. This allows similar behavior to `fetchurl`. Defaults
to `true`. to `true`.
- sha256 - sha256\
When provided, this is the expected hash of the file at the When provided, this is the expected hash of the file at the
path. Evaluation will fail if the hash is incorrect, and path. Evaluation will fail if the hash is incorrect, and
providing a hash allows `builtins.path` to be used even when the providing a hash allows `builtins.path` to be used even when the
@ -2028,14 +2118,15 @@ void prim_getAttr(EvalState & state, const Pos & pos, Value * * args, Value & v)
string attr = state.forceStringNoCtx(*args[0], pos); string attr = state.forceStringNoCtx(*args[0], pos);
state.forceAttrs(*args[1], pos); state.forceAttrs(*args[1], pos);
// !!! Should we create a symbol here or just do a lookup? // !!! Should we create a symbol here or just do a lookup?
Bindings::iterator i = args[1]->attrs->find(state.symbols.create(attr)); Bindings::iterator i = getAttr(
if (i == args[1]->attrs->end()) state,
throw EvalError({ "getAttr",
.msg = hintfmt("attribute '%1%' missing", attr), attr,
.errPos = pos args[1]->attrs,
}); pos
);
// !!! add to stack trace? // !!! add to stack trace?
if (state.countCalls && i->pos) state.attrSelects[*i->pos]++; if (state.countCalls && *i->pos != noPos) state.attrSelects[*i->pos]++;
state.forceValue(*i->value, pos); state.forceValue(*i->value, pos);
v = *i->value; v = *i->value;
} }
@ -2160,22 +2251,25 @@ static void prim_listToAttrs(EvalState & state, const Pos & pos, Value * * args,
Value & v2(*args[0]->listElems()[i]); Value & v2(*args[0]->listElems()[i]);
state.forceAttrs(v2, pos); state.forceAttrs(v2, pos);
Bindings::iterator j = v2.attrs->find(state.sName); Bindings::iterator j = getAttr(
if (j == v2.attrs->end()) state,
throw TypeError({ "listToAttrs",
.msg = hintfmt("'name' attribute missing in a call to 'listToAttrs'"), state.sName,
.errPos = pos v2.attrs,
}); pos
string name = state.forceStringNoCtx(*j->value, pos); );
string name = state.forceStringNoCtx(*j->value, *j->pos);
Symbol sym = state.symbols.create(name); Symbol sym = state.symbols.create(name);
if (seen.insert(sym).second) { if (seen.insert(sym).second) {
Bindings::iterator j2 = v2.attrs->find(state.symbols.create(state.sValue)); Bindings::iterator j2 = getAttr(
if (j2 == v2.attrs->end()) state,
throw TypeError({ "listToAttrs",
.msg = hintfmt("'value' attribute missing in a call to 'listToAttrs'"), state.sValue,
.errPos = pos v2.attrs,
}); pos
);
v.attrs->push_back(Attr(sym, j2->value, j2->pos)); v.attrs->push_back(Attr(sym, j2->value, j2->pos));
} }
} }
@ -2292,7 +2386,7 @@ static void prim_functionArgs(EvalState & state, const Pos & pos, Value * * args
for (auto & i : args[0]->lambda.fun->formals->formals) { for (auto & i : args[0]->lambda.fun->formals->formals) {
// !!! should optimise booleans (allocate only once) // !!! should optimise booleans (allocate only once)
Value * value = state.allocValue(); Value * value = state.allocValue();
v.attrs->push_back(Attr(i.name, value, &i.pos)); v.attrs->push_back(Attr(i.name, value, ptr(&i.pos)));
mkBool(*value, i.def); mkBool(*value, i.def);
} }
v.attrs->sort(); v.attrs->sort();
@ -2816,7 +2910,12 @@ static void prim_concatMap(EvalState & state, const Pos & pos, Value * * args, V
for (unsigned int n = 0; n < nrLists; ++n) { for (unsigned int n = 0; n < nrLists; ++n) {
Value * vElem = args[1]->listElems()[n]; Value * vElem = args[1]->listElems()[n];
state.callFunction(*args[0], *vElem, lists[n], pos); state.callFunction(*args[0], *vElem, lists[n], pos);
state.forceList(lists[n], pos); try {
state.forceList(lists[n], lists[n].determinePos(args[0]->determinePos(pos)));
} catch (TypeError &e) {
e.addTrace(pos, hintfmt("while invoking '%s'", "concatMap"));
throw;
}
len += lists[n].listSize(); len += lists[n].listSize();
} }
@ -3027,7 +3126,7 @@ static RegisterPrimOp primop_toString({
- A path (e.g., `toString /foo/bar` yields `"/foo/bar"`. - A path (e.g., `toString /foo/bar` yields `"/foo/bar"`.
- A set containing `{ __toString = self: ...; }`. - A set containing `{ __toString = self: ...; }` or `{ outPath = ...; }`.
- An integer. - An integer.
@ -3112,7 +3211,7 @@ static void prim_hashString(EvalState & state, const Pos & pos, Value * * args,
PathSet context; // discarded PathSet context; // discarded
string s = state.forceString(*args[1], context, pos); string s = state.forceString(*args[1], context, pos);
mkString(v, hashString(*ht, s).to_string(Base16, false), context); mkString(v, hashString(*ht, s).to_string(Base16, false));
} }
static RegisterPrimOp primop_hashString({ static RegisterPrimOp primop_hashString({
@ -3519,15 +3618,13 @@ static RegisterPrimOp primop_splitVersion({
RegisterPrimOp::PrimOps * RegisterPrimOp::primOps; RegisterPrimOp::PrimOps * RegisterPrimOp::primOps;
RegisterPrimOp::RegisterPrimOp(std::string name, size_t arity, PrimOpFun fun, RegisterPrimOp::RegisterPrimOp(std::string name, size_t arity, PrimOpFun fun)
std::optional<std::string> requiredFeature)
{ {
if (!primOps) primOps = new PrimOps; if (!primOps) primOps = new PrimOps;
primOps->push_back({ primOps->push_back({
.name = name, .name = name,
.args = {}, .args = {},
.arity = arity, .arity = arity,
.requiredFeature = std::move(requiredFeature),
.fun = fun .fun = fun
}); });
} }
@ -3563,9 +3660,7 @@ void EvalState::createBaseEnv()
if (!evalSettings.pureEval) { if (!evalSettings.pureEval) {
mkInt(v, time(0)); mkInt(v, time(0));
addConstant("__currentTime", v); addConstant("__currentTime", v);
}
if (!evalSettings.pureEval) {
mkString(v, settings.thisSystem.get()); mkString(v, settings.thisSystem.get());
addConstant("__currentSystem", v); addConstant("__currentSystem", v);
} }
@ -3603,14 +3698,13 @@ void EvalState::createBaseEnv()
if (RegisterPrimOp::primOps) if (RegisterPrimOp::primOps)
for (auto & primOp : *RegisterPrimOp::primOps) for (auto & primOp : *RegisterPrimOp::primOps)
if (!primOp.requiredFeature || settings.isExperimentalFeatureEnabled(*primOp.requiredFeature)) addPrimOp({
addPrimOp({ .fun = primOp.fun,
.fun = primOp.fun, .arity = std::max(primOp.args.size(), primOp.arity),
.arity = std::max(primOp.args.size(), primOp.arity), .name = symbols.create(primOp.name),
.name = symbols.create(primOp.name), .args = std::move(primOp.args),
.args = std::move(primOp.args), .doc = primOp.doc,
.doc = primOp.doc, });
});
/* Add a wrapper around the derivation primop that computes the /* Add a wrapper around the derivation primop that computes the
`drvPath' and `outPath' attributes lazily. */ `drvPath' and `outPath' attributes lazily. */

View file

@ -15,7 +15,6 @@ struct RegisterPrimOp
std::vector<std::string> args; std::vector<std::string> args;
size_t arity = 0; size_t arity = 0;
const char * doc; const char * doc;
std::optional<std::string> requiredFeature;
PrimOpFun fun; PrimOpFun fun;
}; };
@ -28,8 +27,7 @@ struct RegisterPrimOp
RegisterPrimOp( RegisterPrimOp(
std::string name, std::string name,
size_t arity, size_t arity,
PrimOpFun fun, PrimOpFun fun);
std::optional<std::string> requiredFeature = {});
RegisterPrimOp(Info && info); RegisterPrimOp(Info && info);
}; };

View file

@ -62,6 +62,7 @@ static void prim_fetchMercurial(EvalState & state, const Pos & pos, Value * * ar
fetchers::Attrs attrs; fetchers::Attrs attrs;
attrs.insert_or_assign("type", "hg"); attrs.insert_or_assign("type", "hg");
attrs.insert_or_assign("url", url.find("://") != std::string::npos ? url : "file://" + url); attrs.insert_or_assign("url", url.find("://") != std::string::npos ? url : "file://" + url);
attrs.insert_or_assign("name", name);
if (ref) attrs.insert_or_assign("ref", *ref); if (ref) attrs.insert_or_assign("ref", *ref);
if (rev) attrs.insert_or_assign("rev", rev->gitRev()); if (rev) attrs.insert_or_assign("rev", rev->gitRev());
auto input = fetchers::Input::fromAttrs(std::move(attrs)); auto input = fetchers::Input::fromAttrs(std::move(attrs));

View file

@ -7,6 +7,7 @@
#include <ctime> #include <ctime>
#include <iomanip> #include <iomanip>
#include <regex>
namespace nix { namespace nix {
@ -15,7 +16,8 @@ void emitTreeAttrs(
const fetchers::Tree & tree, const fetchers::Tree & tree,
const fetchers::Input & input, const fetchers::Input & input,
Value & v, Value & v,
bool emptyRevFallback) bool emptyRevFallback,
bool forceDirty)
{ {
assert(input.isImmutable()); assert(input.isImmutable());
@ -32,24 +34,28 @@ void emitTreeAttrs(
mkString(*state.allocAttr(v, state.symbols.create("narHash")), mkString(*state.allocAttr(v, state.symbols.create("narHash")),
narHash->to_string(SRI, true)); narHash->to_string(SRI, true));
if (auto rev = input.getRev()) {
mkString(*state.allocAttr(v, state.symbols.create("rev")), rev->gitRev());
mkString(*state.allocAttr(v, state.symbols.create("shortRev")), rev->gitShortRev());
} else if (emptyRevFallback) {
// Backwards compat for `builtins.fetchGit`: dirty repos return an empty sha1 as rev
auto emptyHash = Hash(htSHA1);
mkString(*state.allocAttr(v, state.symbols.create("rev")), emptyHash.gitRev());
mkString(*state.allocAttr(v, state.symbols.create("shortRev")), emptyHash.gitShortRev());
}
if (input.getType() == "git") if (input.getType() == "git")
mkBool(*state.allocAttr(v, state.symbols.create("submodules")), mkBool(*state.allocAttr(v, state.symbols.create("submodules")),
fetchers::maybeGetBoolAttr(input.attrs, "submodules").value_or(false)); fetchers::maybeGetBoolAttr(input.attrs, "submodules").value_or(false));
if (auto revCount = input.getRevCount()) if (!forceDirty) {
mkInt(*state.allocAttr(v, state.symbols.create("revCount")), *revCount);
else if (emptyRevFallback) if (auto rev = input.getRev()) {
mkInt(*state.allocAttr(v, state.symbols.create("revCount")), 0); mkString(*state.allocAttr(v, state.symbols.create("rev")), rev->gitRev());
mkString(*state.allocAttr(v, state.symbols.create("shortRev")), rev->gitShortRev());
} else if (emptyRevFallback) {
// Backwards compat for `builtins.fetchGit`: dirty repos return an empty sha1 as rev
auto emptyHash = Hash(htSHA1);
mkString(*state.allocAttr(v, state.symbols.create("rev")), emptyHash.gitRev());
mkString(*state.allocAttr(v, state.symbols.create("shortRev")), emptyHash.gitShortRev());
}
if (auto revCount = input.getRevCount())
mkInt(*state.allocAttr(v, state.symbols.create("revCount")), *revCount);
else if (emptyRevFallback)
mkInt(*state.allocAttr(v, state.symbols.create("revCount")), 0);
}
if (auto lastModified = input.getLastModified()) { if (auto lastModified = input.getLastModified()) {
mkInt(*state.allocAttr(v, state.symbols.create("lastModified")), *lastModified); mkInt(*state.allocAttr(v, state.symbols.create("lastModified")), *lastModified);
@ -60,10 +66,19 @@ void emitTreeAttrs(
v.attrs->sort(); v.attrs->sort();
} }
std::string fixURI(std::string uri, EvalState &state) std::string fixURI(std::string uri, EvalState &state, const std::string & defaultScheme = "file")
{ {
state.checkURI(uri); state.checkURI(uri);
return uri.find("://") != std::string::npos ? uri : "file://" + uri; return uri.find("://") != std::string::npos ? uri : defaultScheme + "://" + uri;
}
std::string fixURIForGit(std::string uri, EvalState & state)
{
static std::regex scp_uri("([^/].*)@(.*):(.*)");
if (uri[0] != '/' && std::regex_match(uri, scp_uri))
return fixURI(std::regex_replace(uri, scp_uri, "$1@$2/$3"), state, "ssh");
else
return fixURI(uri, state);
} }
void addURI(EvalState &state, fetchers::Attrs &attrs, Symbol name, std::string v) void addURI(EvalState &state, fetchers::Attrs &attrs, Symbol name, std::string v)
@ -72,13 +87,18 @@ void addURI(EvalState &state, fetchers::Attrs &attrs, Symbol name, std::string v
attrs.emplace(name, n == "url" ? fixURI(v, state) : v); attrs.emplace(name, n == "url" ? fixURI(v, state) : v);
} }
struct FetchTreeParams {
bool emptyRevFallback = false;
bool allowNameArgument = false;
};
static void fetchTree( static void fetchTree(
EvalState &state, EvalState &state,
const Pos &pos, const Pos &pos,
Value **args, Value **args,
Value &v, Value &v,
const std::optional<std::string> type, const std::optional<std::string> type,
bool emptyRevFallback = false const FetchTreeParams & params = FetchTreeParams{}
) { ) {
fetchers::Input input; fetchers::Input input;
PathSet context; PathSet context;
@ -119,17 +139,25 @@ static void fetchTree(
.errPos = pos .errPos = pos
}); });
if (!params.allowNameArgument)
if (auto nameIter = attrs.find("name"); nameIter != attrs.end())
throw Error({
.msg = hintfmt("attribute 'name' isnt supported in call to 'fetchTree'"),
.errPos = pos
});
input = fetchers::Input::fromAttrs(std::move(attrs)); input = fetchers::Input::fromAttrs(std::move(attrs));
} else { } else {
auto url = fixURI(state.coerceToString(pos, *args[0], context, false, false), state); auto url = state.coerceToString(pos, *args[0], context, false, false);
if (type == "git") { if (type == "git") {
fetchers::Attrs attrs; fetchers::Attrs attrs;
attrs.emplace("type", "git"); attrs.emplace("type", "git");
attrs.emplace("url", url); attrs.emplace("url", fixURIForGit(url, state));
input = fetchers::Input::fromAttrs(std::move(attrs)); input = fetchers::Input::fromAttrs(std::move(attrs));
} else { } else {
input = fetchers::Input::fromURL(url); input = fetchers::Input::fromURL(fixURI(url, state));
} }
} }
@ -144,13 +172,13 @@ static void fetchTree(
if (state.allowedPaths) if (state.allowedPaths)
state.allowedPaths->insert(tree.actualPath); state.allowedPaths->insert(tree.actualPath);
emitTreeAttrs(state, tree, input2, v, emptyRevFallback); emitTreeAttrs(state, tree, input2, v, params.emptyRevFallback, false);
} }
static void prim_fetchTree(EvalState & state, const Pos & pos, Value * * args, Value & v) static void prim_fetchTree(EvalState & state, const Pos & pos, Value * * args, Value & v)
{ {
settings.requireExperimentalFeature("flakes"); settings.requireExperimentalFeature("flakes");
fetchTree(state, pos, args, v, std::nullopt); fetchTree(state, pos, args, v, std::nullopt, FetchTreeParams { .allowNameArgument = false });
} }
// FIXME: document // FIXME: document
@ -206,20 +234,21 @@ static void fetch(EvalState & state, const Pos & pos, Value * * args, Value & v,
? fetchers::downloadTarball(state.store, *url, name, (bool) expectedHash).first.storePath ? fetchers::downloadTarball(state.store, *url, name, (bool) expectedHash).first.storePath
: fetchers::downloadFile(state.store, *url, name, (bool) expectedHash).storePath; : fetchers::downloadFile(state.store, *url, name, (bool) expectedHash).storePath;
auto path = state.store->toRealPath(storePath); auto realPath = state.store->toRealPath(storePath);
if (expectedHash) { if (expectedHash) {
auto hash = unpack auto hash = unpack
? state.store->queryPathInfo(storePath)->narHash ? state.store->queryPathInfo(storePath)->narHash
: hashFile(htSHA256, path); : hashFile(htSHA256, realPath);
if (hash != *expectedHash) if (hash != *expectedHash)
throw Error((unsigned int) 102, "hash mismatch in file downloaded from '%s':\n specified: %s\n got: %s", throw Error((unsigned int) 102, "hash mismatch in file downloaded from '%s':\n specified: %s\n got: %s",
*url, expectedHash->to_string(Base32, true), hash.to_string(Base32, true)); *url, expectedHash->to_string(Base32, true), hash.to_string(Base32, true));
} }
if (state.allowedPaths) if (state.allowedPaths)
state.allowedPaths->insert(path); state.allowedPaths->insert(realPath);
auto path = state.store->printStorePath(storePath);
mkString(v, path, PathSet({path})); mkString(v, path, PathSet({path}));
} }
@ -292,7 +321,7 @@ static RegisterPrimOp primop_fetchTarball({
static void prim_fetchGit(EvalState &state, const Pos &pos, Value **args, Value &v) static void prim_fetchGit(EvalState &state, const Pos &pos, Value **args, Value &v)
{ {
fetchTree(state, pos, args, v, "git", true); fetchTree(state, pos, args, v, "git", FetchTreeParams { .emptyRevFallback = true, .allowNameArgument = true });
} }
static RegisterPrimOp primop_fetchGit({ static RegisterPrimOp primop_fetchGit({
@ -303,17 +332,17 @@ static RegisterPrimOp primop_fetchGit({
of the repo at that URL is fetched. Otherwise, it can be an of the repo at that URL is fetched. Otherwise, it can be an
attribute with the following attributes (all except `url` optional): attribute with the following attributes (all except `url` optional):
- url - url\
The URL of the repo. The URL of the repo.
- name - name\
The name of the directory the repo should be exported to in the The name of the directory the repo should be exported to in the
store. Defaults to the basename of the URL. store. Defaults to the basename of the URL.
- rev - rev\
The git revision to fetch. Defaults to the tip of `ref`. The git revision to fetch. Defaults to the tip of `ref`.
- ref - ref\
The git ref to look for the requested revision under. This is The git ref to look for the requested revision under. This is
often a branch or tag name. Defaults to `HEAD`. often a branch or tag name. Defaults to `HEAD`.
@ -321,11 +350,11 @@ static RegisterPrimOp primop_fetchGit({
of Nix 2.3.0 Nix will not prefix `refs/heads/` if `ref` starts of Nix 2.3.0 Nix will not prefix `refs/heads/` if `ref` starts
with `refs/`. with `refs/`.
- submodules - submodules\
A Boolean parameter that specifies whether submodules should be A Boolean parameter that specifies whether submodules should be
checked out. Defaults to `false`. checked out. Defaults to `false`.
- allRefs - allRefs\
Whether to fetch all refs of the repository. With this argument being Whether to fetch all refs of the repository. With this argument being
true, it's possible to load a `rev` from *any* `ref` (by default only true, it's possible to load a `rev` from *any* `ref` (by default only
`rev`s from the specified `ref` are supported). `rev`s from the specified `ref` are supported).

View file

@ -42,7 +42,7 @@ static void showAttrs(EvalState & state, bool strict, bool location,
XMLAttrs xmlAttrs; XMLAttrs xmlAttrs;
xmlAttrs["name"] = i; xmlAttrs["name"] = i;
if (location && a.pos != &noPos) posToXML(xmlAttrs, *a.pos); if (location && a.pos != ptr(&noPos)) posToXML(xmlAttrs, *a.pos);
XMLOpenElement _(doc, "attr", xmlAttrs); XMLOpenElement _(doc, "attr", xmlAttrs);
printValueAsXML(state, strict, location, printValueAsXML(state, strict, location,

View file

@ -341,6 +341,8 @@ public:
return internalType == tList1 ? 1 : internalType == tList2 ? 2 : bigList.size; return internalType == tList1 ? 1 : internalType == tList2 ? 2 : bigList.size;
} }
Pos determinePos(const Pos &pos) const;
/* Check whether forcing this value requires a trivial amount of /* Check whether forcing this value requires a trivial amount of
computation. In particular, function applications are computation. In particular, function applications are
non-trivial. */ non-trivial. */

View file

@ -6,6 +6,8 @@
#include <nlohmann/json_fwd.hpp> #include <nlohmann/json_fwd.hpp>
#include <optional>
namespace nix::fetchers { namespace nix::fetchers {
typedef std::variant<std::string, uint64_t, Explicit<bool>> Attr; typedef std::variant<std::string, uint64_t, Explicit<bool>> Attr;

View file

@ -200,12 +200,17 @@ void Input::markChangedFile(
return scheme->markChangedFile(*this, file, commitMsg); return scheme->markChangedFile(*this, file, commitMsg);
} }
std::string Input::getName() const
{
return maybeGetStrAttr(attrs, "name").value_or("source");
}
StorePath Input::computeStorePath(Store & store) const StorePath Input::computeStorePath(Store & store) const
{ {
auto narHash = getNarHash(); auto narHash = getNarHash();
if (!narHash) if (!narHash)
throw Error("cannot compute store path for mutable input '%s'", to_string()); throw Error("cannot compute store path for mutable input '%s'", to_string());
return store.makeFixedOutputPath("source", FixedOutputInfo { return store.makeFixedOutputPath(getName(), FixedOutputInfo {
{ {
.method = FileIngestionMethod::Recursive, .method = FileIngestionMethod::Recursive,
.hash = *narHash, .hash = *narHash,

View file

@ -38,6 +38,9 @@ struct Input
bool immutable = false; bool immutable = false;
bool direct = true; bool direct = true;
/* path of the parent of this input, used for relative path resolution */
std::optional<Path> parent;
public: public:
static Input fromURL(const std::string & url); static Input fromURL(const std::string & url);
@ -81,6 +84,8 @@ public:
std::string_view file, std::string_view file,
std::optional<std::string> commitMsg) const; std::optional<std::string> commitMsg) const;
std::string getName() const;
StorePath computeStorePath(Store & store) const; StorePath computeStorePath(Store & store) const;
// Convenience functions for common attributes. // Convenience functions for common attributes.
@ -145,13 +150,7 @@ DownloadFileResult downloadFile(
bool immutable, bool immutable,
const Headers & headers = {}); const Headers & headers = {});
struct DownloadTarballMeta std::pair<Tree, time_t> downloadTarball(
{
time_t lastModified;
std::string effectiveUrl;
};
std::pair<Tree, DownloadTarballMeta> downloadTarball(
ref<Store> store, ref<Store> store,
const std::string & url, const std::string & url,
const std::string & name, const std::string & name,

View file

@ -4,13 +4,21 @@
#include "tarfile.hh" #include "tarfile.hh"
#include "store-api.hh" #include "store-api.hh"
#include "url-parts.hh" #include "url-parts.hh"
#include "pathlocks.hh"
#include <sys/time.h> #include <sys/time.h>
#include <sys/wait.h>
using namespace std::string_literals; using namespace std::string_literals;
namespace nix::fetchers { namespace nix::fetchers {
// Explicit initial branch of our bare repo to suppress warnings from new version of git.
// The value itself does not matter, since we always fetch a specific revision or branch.
// It is set with `-c init.defaultBranch=` instead of `--initial-branch=` to stay compatible with
// old version of git, which will ignore unrecognized `-c` options.
const std::string gitInitialBranch = "__nix_dummy_branch";
static std::string readHead(const Path & path) static std::string readHead(const Path & path)
{ {
return chomp(runProgram("git", true, { "-C", path, "rev-parse", "--abbrev-ref", "HEAD" })); return chomp(runProgram("git", true, { "-C", path, "rev-parse", "--abbrev-ref", "HEAD" }));
@ -59,7 +67,7 @@ struct GitInputScheme : InputScheme
if (maybeGetStrAttr(attrs, "type") != "git") return {}; if (maybeGetStrAttr(attrs, "type") != "git") return {};
for (auto & [name, value] : attrs) for (auto & [name, value] : attrs)
if (name != "type" && name != "url" && name != "ref" && name != "rev" && name != "shallow" && name != "submodules" && name != "lastModified" && name != "revCount" && name != "narHash" && name != "allRefs") if (name != "type" && name != "url" && name != "ref" && name != "rev" && name != "shallow" && name != "submodules" && name != "lastModified" && name != "revCount" && name != "narHash" && name != "allRefs" && name != "name")
throw Error("unsupported Git input attribute '%s'", name); throw Error("unsupported Git input attribute '%s'", name);
parseURL(getStrAttr(attrs, "url")); parseURL(getStrAttr(attrs, "url"));
@ -166,10 +174,10 @@ struct GitInputScheme : InputScheme
std::pair<Tree, Input> fetch(ref<Store> store, const Input & _input) override std::pair<Tree, Input> fetch(ref<Store> store, const Input & _input) override
{ {
auto name = "source";
Input input(_input); Input input(_input);
std::string name = input.getName();
bool shallow = maybeGetBoolAttr(input.attrs, "shallow").value_or(false); bool shallow = maybeGetBoolAttr(input.attrs, "shallow").value_or(false);
bool submodules = maybeGetBoolAttr(input.attrs, "submodules").value_or(false); bool submodules = maybeGetBoolAttr(input.attrs, "submodules").value_or(false);
bool allRefs = maybeGetBoolAttr(input.attrs, "allRefs").value_or(false); bool allRefs = maybeGetBoolAttr(input.attrs, "allRefs").value_or(false);
@ -269,7 +277,7 @@ struct GitInputScheme : InputScheme
return files.count(file); return files.count(file);
}; };
auto storePath = store->addToStore("source", actualUrl, FileIngestionMethod::Recursive, htSHA256, filter); auto storePath = store->addToStore(input.getName(), actualUrl, FileIngestionMethod::Recursive, htSHA256, filter);
// FIXME: maybe we should use the timestamp of the last // FIXME: maybe we should use the timestamp of the last
// modified dirty file? // modified dirty file?
@ -316,11 +324,17 @@ struct GitInputScheme : InputScheme
Path cacheDir = getCacheDir() + "/nix/gitv3/" + hashString(htSHA256, actualUrl).to_string(Base32, false); Path cacheDir = getCacheDir() + "/nix/gitv3/" + hashString(htSHA256, actualUrl).to_string(Base32, false);
repoDir = cacheDir; repoDir = cacheDir;
Path cacheDirLock = cacheDir + ".lock";
createDirs(dirOf(cacheDir));
AutoCloseFD lock = openLockFile(cacheDirLock, true);
lockFile(lock.get(), ltWrite, true);
if (!pathExists(cacheDir)) { if (!pathExists(cacheDir)) {
createDirs(dirOf(cacheDir)); runProgram("git", true, { "-c", "init.defaultBranch=" + gitInitialBranch, "init", "--bare", repoDir });
runProgram("git", true, { "init", "--bare", repoDir });
} }
deleteLockFile(cacheDirLock, lock.get());
Path localRefFile = Path localRefFile =
input.getRef()->compare(0, 5, "refs/") == 0 input.getRef()->compare(0, 5, "refs/") == 0
? cacheDir + "/" + *input.getRef() ? cacheDir + "/" + *input.getRef()
@ -405,17 +419,14 @@ struct GitInputScheme : InputScheme
AutoDelete delTmpDir(tmpDir, true); AutoDelete delTmpDir(tmpDir, true);
PathFilter filter = defaultPathFilter; PathFilter filter = defaultPathFilter;
RunOptions checkCommitOpts( auto result = runProgram(RunOptions {
"git", .program = "git",
{ "-C", repoDir, "cat-file", "commit", input.getRev()->gitRev() } .args = { "-C", repoDir, "cat-file", "commit", input.getRev()->gitRev() },
); .mergeStderrToStdout = true
checkCommitOpts.searchPath = true; });
checkCommitOpts.mergeStderrToStdout = true;
auto result = runProgram(checkCommitOpts);
if (WEXITSTATUS(result.first) == 128 if (WEXITSTATUS(result.first) == 128
&& result.second.find("bad file") != std::string::npos && result.second.find("bad file") != std::string::npos)
) { {
throw Error( throw Error(
"Cannot find Git revision '%s' in ref '%s' of repository '%s'! " "Cannot find Git revision '%s' in ref '%s' of repository '%s'! "
"Please make sure that the " ANSI_BOLD "rev" ANSI_NORMAL " exists on the " "Please make sure that the " ANSI_BOLD "rev" ANSI_NORMAL " exists on the "
@ -431,7 +442,7 @@ struct GitInputScheme : InputScheme
Path tmpGitDir = createTempDir(); Path tmpGitDir = createTempDir();
AutoDelete delTmpGitDir(tmpGitDir, true); AutoDelete delTmpGitDir(tmpGitDir, true);
runProgram("git", true, { "init", tmpDir, "--separate-git-dir", tmpGitDir }); runProgram("git", true, { "-c", "init.defaultBranch=" + gitInitialBranch, "init", tmpDir, "--separate-git-dir", tmpGitDir });
// TODO: repoDir might lack the ref (it only checks if rev // TODO: repoDir might lack the ref (it only checks if rev
// exists, see FIXME above) so use a big hammer and fetch // exists, see FIXME above) so use a big hammer and fetch
// everything to ensure we get the rev. // everything to ensure we get the rev.
@ -447,9 +458,11 @@ struct GitInputScheme : InputScheme
// FIXME: should pipe this, or find some better way to extract a // FIXME: should pipe this, or find some better way to extract a
// revision. // revision.
auto source = sinkToSource([&](Sink & sink) { auto source = sinkToSource([&](Sink & sink) {
RunOptions gitOptions("git", { "-C", repoDir, "archive", input.getRev()->gitRev() }); runProgram2({
gitOptions.standardOut = &sink; .program = "git",
runProgram2(gitOptions); .args = { "-C", repoDir, "archive", input.getRev()->gitRev() },
.standardOut = &sink
});
}); });
unpackTarfile(*source, tmpDir); unpackTarfile(*source, tmpDir);

View file

@ -207,16 +207,16 @@ struct GitArchiveInputScheme : InputScheme
auto url = getDownloadUrl(input); auto url = getDownloadUrl(input);
auto [tree, meta] = downloadTarball(store, url.url, "source", true, url.headers); auto [tree, lastModified] = downloadTarball(store, url.url, input.getName(), true, url.headers);
input.attrs.insert_or_assign("lastModified", uint64_t(meta.lastModified)); input.attrs.insert_or_assign("lastModified", uint64_t(lastModified));
getCache()->add( getCache()->add(
store, store,
immutableAttrs, immutableAttrs,
{ {
{"rev", rev->gitRev()}, {"rev", rev->gitRev()},
{"lastModified", uint64_t(meta.lastModified)} {"lastModified", uint64_t(lastModified)}
}, },
tree.storePath, tree.storePath,
true); true);
@ -273,9 +273,9 @@ struct GitHubInputScheme : GitArchiveInputScheme
void clone(const Input & input, const Path & destDir) override void clone(const Input & input, const Path & destDir) override
{ {
auto host = maybeGetStrAttr(input.attrs, "host").value_or("github.com"); auto host = maybeGetStrAttr(input.attrs, "host").value_or("github.com");
Input::fromURL(fmt("git+ssh://git@%s/%s/%s.git", Input::fromURL(fmt("git+https://%s/%s/%s.git",
host, getStrAttr(input.attrs, "owner"), getStrAttr(input.attrs, "repo"))) host, getStrAttr(input.attrs, "owner"), getStrAttr(input.attrs, "repo")))
.applyOverrides(input.getRef().value_or("HEAD"), input.getRev()) .applyOverrides(input.getRef(), input.getRev())
.clone(destDir); .clone(destDir);
} }
}; };
@ -341,9 +341,9 @@ struct GitLabInputScheme : GitArchiveInputScheme
{ {
auto host = maybeGetStrAttr(input.attrs, "host").value_or("gitlab.com"); auto host = maybeGetStrAttr(input.attrs, "host").value_or("gitlab.com");
// FIXME: get username somewhere // FIXME: get username somewhere
Input::fromURL(fmt("git+ssh://git@%s/%s/%s.git", Input::fromURL(fmt("git+https://%s/%s/%s.git",
host, getStrAttr(input.attrs, "owner"), getStrAttr(input.attrs, "repo"))) host, getStrAttr(input.attrs, "owner"), getStrAttr(input.attrs, "repo")))
.applyOverrides(input.getRef().value_or("HEAD"), input.getRev()) .applyOverrides(input.getRef(), input.getRev())
.clone(destDir); .clone(destDir);
} }
}; };

View file

@ -8,4 +8,6 @@ libfetchers_SOURCES := $(wildcard $(d)/*.cc)
libfetchers_CXXFLAGS += -I src/libutil -I src/libstore libfetchers_CXXFLAGS += -I src/libutil -I src/libstore
libfetchers_LDFLAGS += -pthread
libfetchers_LIBS = libutil libstore libfetchers_LIBS = libutil libstore

Some files were not shown because too many files have changed in this diff Show more