forked from lix-project/lix
Merge branch 'master' into debug-merge
This commit is contained in:
commit
64c4ba8f66
2
.github/STALE-BOT.md
vendored
2
.github/STALE-BOT.md
vendored
|
@ -3,7 +3,7 @@
|
|||
- Thanks for your contribution!
|
||||
- To remove the stale label, just leave a new comment.
|
||||
- _How to find the right people to ping?_ → [`git blame`](https://git-scm.com/docs/git-blame) to the rescue! (or GitHub's history and blame buttons.)
|
||||
- You can always ask for help on [our Discourse Forum](https://discourse.nixos.org/) or on the [#nixos IRC channel](https://webchat.freenode.net/#nixos).
|
||||
- You can always ask for help on [our Discourse Forum](https://discourse.nixos.org/) or on [Matrix - #nix:nixos.org](https://matrix.to/#/#nix:nixos.org).
|
||||
|
||||
## Suggestions for PRs
|
||||
|
||||
|
|
26
.github/workflows/backport.yml
vendored
Normal file
26
.github/workflows/backport.yml
vendored
Normal file
|
@ -0,0 +1,26 @@
|
|||
name: Backport
|
||||
on:
|
||||
pull_request_target:
|
||||
types: [closed, labeled]
|
||||
jobs:
|
||||
backport:
|
||||
name: Backport Pull Request
|
||||
if: github.repository_owner == 'NixOS' && github.event.pull_request.merged == true && (github.event_name != 'labeled' || startsWith('backport', github.event.label.name))
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
with:
|
||||
ref: ${{ github.event.pull_request.head.sha }}
|
||||
# required to find all branches
|
||||
fetch-depth: 0
|
||||
- name: Create backport PRs
|
||||
# should be kept in sync with `version`
|
||||
uses: zeebe-io/backport-action@v0.0.7
|
||||
with:
|
||||
# Config README: https://github.com/zeebe-io/backport-action#backport-action
|
||||
github_token: ${{ secrets.GITHUB_TOKEN }}
|
||||
github_workspace: ${{ github.workspace }}
|
||||
pull_description: |-
|
||||
Bot-based backport to `${target_branch}`, triggered by a label in #${pull_number}.
|
||||
# should be kept in sync with `uses`
|
||||
version: v0.0.5
|
25
.github/workflows/test.yml
vendored
25
.github/workflows/test.yml
vendored
|
@ -1,27 +1,32 @@
|
|||
name: "Test"
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
push:
|
||||
|
||||
jobs:
|
||||
|
||||
tests:
|
||||
needs: [check_cachix]
|
||||
strategy:
|
||||
matrix:
|
||||
os: [ubuntu-latest, macos-latest]
|
||||
runs-on: ${{ matrix.os }}
|
||||
|
||||
timeout-minutes: 60
|
||||
steps:
|
||||
- uses: actions/checkout@v2.3.4
|
||||
- uses: actions/checkout@v2.4.0
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- uses: cachix/install-nix-action@v13
|
||||
- uses: cachix/install-nix-action@v16
|
||||
- run: echo CACHIX_NAME="$(echo $GITHUB_REPOSITORY-install-tests | tr "[A-Z]/" "[a-z]-")" >> $GITHUB_ENV
|
||||
- uses: cachix/cachix-action@v10
|
||||
if: needs.check_cachix.outputs.secret == 'true'
|
||||
with:
|
||||
name: '${{ env.CACHIX_NAME }}'
|
||||
signingKey: '${{ secrets.CACHIX_SIGNING_KEY }}'
|
||||
authToken: '${{ secrets.CACHIX_AUTH_TOKEN }}'
|
||||
#- run: nix flake check
|
||||
- run: nix-build -A checks.$(if [[ `uname` = Linux ]]; then echo x86_64-linux; else echo x86_64-darwin; fi)
|
||||
- run: nix-build -A checks.$(nix-instantiate --eval -E '(builtins.currentSystem)')
|
||||
|
||||
check_cachix:
|
||||
name: Cachix secret present for installer tests
|
||||
runs-on: ubuntu-latest
|
||||
|
@ -33,6 +38,7 @@ jobs:
|
|||
env:
|
||||
_CACHIX_SECRETS: ${{ secrets.CACHIX_SIGNING_KEY }}${{ secrets.CACHIX_AUTH_TOKEN }}
|
||||
run: echo "::set-output name=secret::${{ env._CACHIX_SECRETS != '' }}"
|
||||
|
||||
installer:
|
||||
needs: [tests, check_cachix]
|
||||
if: github.event_name == 'push' && needs.check_cachix.outputs.secret == 'true'
|
||||
|
@ -40,11 +46,11 @@ jobs:
|
|||
outputs:
|
||||
installerURL: ${{ steps.prepare-installer.outputs.installerURL }}
|
||||
steps:
|
||||
- uses: actions/checkout@v2.3.4
|
||||
- uses: actions/checkout@v2.4.0
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- run: echo CACHIX_NAME="$(echo $GITHUB_REPOSITORY-install-tests | tr "[A-Z]/" "[a-z]-")" >> $GITHUB_ENV
|
||||
- uses: cachix/install-nix-action@v13
|
||||
- uses: cachix/install-nix-action@v16
|
||||
- uses: cachix/cachix-action@v10
|
||||
with:
|
||||
name: '${{ env.CACHIX_NAME }}'
|
||||
|
@ -52,6 +58,7 @@ jobs:
|
|||
authToken: '${{ secrets.CACHIX_AUTH_TOKEN }}'
|
||||
- id: prepare-installer
|
||||
run: scripts/prepare-installer-for-github-actions
|
||||
|
||||
installer_test:
|
||||
needs: [installer, check_cachix]
|
||||
if: github.event_name == 'push' && needs.check_cachix.outputs.secret == 'true'
|
||||
|
@ -60,9 +67,9 @@ jobs:
|
|||
os: [ubuntu-latest, macos-latest]
|
||||
runs-on: ${{ matrix.os }}
|
||||
steps:
|
||||
- uses: actions/checkout@v2.3.4
|
||||
- uses: actions/checkout@v2.4.0
|
||||
- run: echo CACHIX_NAME="$(echo $GITHUB_REPOSITORY-install-tests | tr "[A-Z]/" "[a-z]-")" >> $GITHUB_ENV
|
||||
- uses: cachix/install-nix-action@v13
|
||||
- uses: cachix/install-nix-action@v16
|
||||
with:
|
||||
install_url: '${{needs.installer.outputs.installerURL}}'
|
||||
install_options: "--tarball-url-prefix https://${{ env.CACHIX_NAME }}.cachix.org/serve"
|
||||
|
|
9
.gitignore
vendored
9
.gitignore
vendored
|
@ -15,6 +15,7 @@ perl/Makefile.config
|
|||
/doc/manual/*.1
|
||||
/doc/manual/*.5
|
||||
/doc/manual/*.8
|
||||
/doc/manual/generated/*
|
||||
/doc/manual/nix.json
|
||||
/doc/manual/conf-file.json
|
||||
/doc/manual/builtins.json
|
||||
|
@ -25,8 +26,6 @@ perl/Makefile.config
|
|||
|
||||
# /scripts/
|
||||
/scripts/nix-profile.sh
|
||||
/scripts/nix-reduce-build
|
||||
/scripts/nix-http-export.cgi
|
||||
/scripts/nix-profile-daemon.sh
|
||||
|
||||
# /src/libexpr/
|
||||
|
@ -39,6 +38,7 @@ perl/Makefile.config
|
|||
|
||||
# /src/libstore/
|
||||
*.gen.*
|
||||
/src/libstore/tests/libstore-tests
|
||||
|
||||
# /src/libutil/
|
||||
/src/libutil/tests/libutil-tests
|
||||
|
@ -56,9 +56,6 @@ perl/Makefile.config
|
|||
|
||||
/src/nix-prefetch-url/nix-prefetch-url
|
||||
|
||||
# /src/nix-daemon/
|
||||
/src/nix-daemon/nix-daemon
|
||||
|
||||
/src/nix-collect-garbage/nix-collect-garbage
|
||||
|
||||
# /src/nix-channel/
|
||||
|
@ -76,12 +73,12 @@ perl/Makefile.config
|
|||
# /tests/
|
||||
/tests/test-tmp
|
||||
/tests/common.sh
|
||||
/tests/dummy
|
||||
/tests/result*
|
||||
/tests/restricted-innocent
|
||||
/tests/shell
|
||||
/tests/shell.drv
|
||||
/tests/config.nix
|
||||
/tests/ca/config.nix
|
||||
|
||||
# /tests/lang/
|
||||
/tests/lang/*.out
|
||||
|
|
5
Makefile
5
Makefile
|
@ -4,6 +4,7 @@ makefiles = \
|
|||
src/libutil/local.mk \
|
||||
src/libutil/tests/local.mk \
|
||||
src/libstore/local.mk \
|
||||
src/libstore/tests/local.mk \
|
||||
src/libfetchers/local.mk \
|
||||
src/libmain/local.mk \
|
||||
src/libexpr/local.mk \
|
||||
|
@ -12,6 +13,8 @@ makefiles = \
|
|||
src/resolve-system-dependencies/local.mk \
|
||||
scripts/local.mk \
|
||||
misc/bash/local.mk \
|
||||
misc/fish/local.mk \
|
||||
misc/zsh/local.mk \
|
||||
misc/systemd/local.mk \
|
||||
misc/launchd/local.mk \
|
||||
misc/upstart/local.mk \
|
||||
|
@ -32,4 +35,4 @@ endif
|
|||
include mk/lib.mk
|
||||
|
||||
# GLOBAL_CXXFLAGS += -g -Wall -include config.h -std=c++17 -fstack-usage
|
||||
GLOBAL_CXXFLAGS += -g -Wall -include config.h -std=c++17
|
||||
GLOBAL_CXXFLAGS += -g -Wall -include config.h -std=c++17 -I src
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
HOST_OS = @host_os@
|
||||
AR = @AR@
|
||||
BDW_GC_LIBS = @BDW_GC_LIBS@
|
||||
BOOST_LDFLAGS = @BOOST_LDFLAGS@
|
||||
|
|
|
@ -28,7 +28,8 @@ build nix from source with nix-build or how to get a development environment.
|
|||
- [Nix manual](https://nixos.org/nix/manual)
|
||||
- [Nix jobsets on hydra.nixos.org](https://hydra.nixos.org/project/nix)
|
||||
- [NixOS Discourse](https://discourse.nixos.org/)
|
||||
- [IRC - #nixos on freenode.net](irc://irc.freenode.net/#nixos)
|
||||
- [Matrix - #nix:nixos.org](https://matrix.to/#/#nix:nixos.org)
|
||||
- [IRC - #nixos on libera.chat](irc://irc.libera.chat/#nixos)
|
||||
|
||||
## License
|
||||
|
||||
|
|
45
boehmgc-coroutine-sp-fallback.diff
Normal file
45
boehmgc-coroutine-sp-fallback.diff
Normal file
|
@ -0,0 +1,45 @@
|
|||
diff --git a/pthread_stop_world.c b/pthread_stop_world.c
|
||||
index 4b2c429..1fb4c52 100644
|
||||
--- a/pthread_stop_world.c
|
||||
+++ b/pthread_stop_world.c
|
||||
@@ -673,6 +673,8 @@ GC_INNER void GC_push_all_stacks(void)
|
||||
struct GC_traced_stack_sect_s *traced_stack_sect;
|
||||
pthread_t self = pthread_self();
|
||||
word total_size = 0;
|
||||
+ size_t stack_limit;
|
||||
+ pthread_attr_t pattr;
|
||||
|
||||
if (!EXPECT(GC_thr_initialized, TRUE))
|
||||
GC_thr_init();
|
||||
@@ -722,6 +724,31 @@ GC_INNER void GC_push_all_stacks(void)
|
||||
hi = p->altstack + p->altstack_size;
|
||||
/* FIXME: Need to scan the normal stack too, but how ? */
|
||||
/* FIXME: Assume stack grows down */
|
||||
+ } else {
|
||||
+ if (pthread_getattr_np(p->id, &pattr)) {
|
||||
+ ABORT("GC_push_all_stacks: pthread_getattr_np failed!");
|
||||
+ }
|
||||
+ if (pthread_attr_getstacksize(&pattr, &stack_limit)) {
|
||||
+ ABORT("GC_push_all_stacks: pthread_attr_getstacksize failed!");
|
||||
+ }
|
||||
+ if (pthread_attr_destroy(&pattr)) {
|
||||
+ ABORT("GC_push_all_stacks: pthread_attr_destroy failed!");
|
||||
+ }
|
||||
+ // When a thread goes into a coroutine, we lose its original sp until
|
||||
+ // control flow returns to the thread.
|
||||
+ // While in the coroutine, the sp points outside the thread stack,
|
||||
+ // so we can detect this and push the entire thread stack instead,
|
||||
+ // as an approximation.
|
||||
+ // We assume that the coroutine has similarly added its entire stack.
|
||||
+ // This could be made accurate by cooperating with the application
|
||||
+ // via new functions and/or callbacks.
|
||||
+ #ifndef STACK_GROWS_UP
|
||||
+ if (lo >= hi || lo < hi - stack_limit) { // sp outside stack
|
||||
+ lo = hi - stack_limit;
|
||||
+ }
|
||||
+ #else
|
||||
+ #error "STACK_GROWS_UP not supported in boost_coroutine2 (as of june 2021), so we don't support it in Nix."
|
||||
+ #endif
|
||||
}
|
||||
GC_push_all_stack_sections(lo, hi, traced_stack_sect);
|
||||
# ifdef STACK_GROWS_UP
|
20
config/config.guess
vendored
20
config/config.guess
vendored
|
@ -1,8 +1,8 @@
|
|||
#! /bin/sh
|
||||
# Attempt to guess a canonical system name.
|
||||
# Copyright 1992-2020 Free Software Foundation, Inc.
|
||||
# Copyright 1992-2021 Free Software Foundation, Inc.
|
||||
|
||||
timestamp='2020-11-19'
|
||||
timestamp='2021-01-25'
|
||||
|
||||
# This file is free software; you can redistribute it and/or modify it
|
||||
# under the terms of the GNU General Public License as published by
|
||||
|
@ -50,7 +50,7 @@ version="\
|
|||
GNU config.guess ($timestamp)
|
||||
|
||||
Originally written by Per Bothner.
|
||||
Copyright 1992-2020 Free Software Foundation, Inc.
|
||||
Copyright 1992-2021 Free Software Foundation, Inc.
|
||||
|
||||
This is free software; see the source for copying conditions. There is NO
|
||||
warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE."
|
||||
|
@ -188,10 +188,9 @@ case "$UNAME_MACHINE:$UNAME_SYSTEM:$UNAME_RELEASE:$UNAME_VERSION" in
|
|||
#
|
||||
# Note: NetBSD doesn't particularly care about the vendor
|
||||
# portion of the name. We always set it to "unknown".
|
||||
sysctl="sysctl -n hw.machine_arch"
|
||||
UNAME_MACHINE_ARCH=$( (uname -p 2>/dev/null || \
|
||||
"/sbin/$sysctl" 2>/dev/null || \
|
||||
"/usr/sbin/$sysctl" 2>/dev/null || \
|
||||
/sbin/sysctl -n hw.machine_arch 2>/dev/null || \
|
||||
/usr/sbin/sysctl -n hw.machine_arch 2>/dev/null || \
|
||||
echo unknown))
|
||||
case "$UNAME_MACHINE_ARCH" in
|
||||
aarch64eb) machine=aarch64_be-unknown ;;
|
||||
|
@ -996,6 +995,9 @@ EOF
|
|||
k1om:Linux:*:*)
|
||||
echo "$UNAME_MACHINE"-unknown-linux-"$LIBC"
|
||||
exit ;;
|
||||
loongarch32:Linux:*:* | loongarch64:Linux:*:* | loongarchx32:Linux:*:*)
|
||||
echo "$UNAME_MACHINE"-unknown-linux-"$LIBC"
|
||||
exit ;;
|
||||
m32r*:Linux:*:*)
|
||||
echo "$UNAME_MACHINE"-unknown-linux-"$LIBC"
|
||||
exit ;;
|
||||
|
@ -1084,7 +1086,7 @@ EOF
|
|||
ppcle:Linux:*:*)
|
||||
echo powerpcle-unknown-linux-"$LIBC"
|
||||
exit ;;
|
||||
riscv32:Linux:*:* | riscv64:Linux:*:*)
|
||||
riscv32:Linux:*:* | riscv32be:Linux:*:* | riscv64:Linux:*:* | riscv64be:Linux:*:*)
|
||||
echo "$UNAME_MACHINE"-unknown-linux-"$LIBC"
|
||||
exit ;;
|
||||
s390:Linux:*:* | s390x:Linux:*:*)
|
||||
|
@ -1480,8 +1482,8 @@ EOF
|
|||
i*86:rdos:*:*)
|
||||
echo "$UNAME_MACHINE"-pc-rdos
|
||||
exit ;;
|
||||
i*86:AROS:*:*)
|
||||
echo "$UNAME_MACHINE"-pc-aros
|
||||
*:AROS:*:*)
|
||||
echo "$UNAME_MACHINE"-unknown-aros
|
||||
exit ;;
|
||||
x86_64:VMkernel:*:*)
|
||||
echo "$UNAME_MACHINE"-unknown-esx
|
||||
|
|
20
config/config.sub
vendored
20
config/config.sub
vendored
|
@ -1,8 +1,8 @@
|
|||
#! /bin/sh
|
||||
# Configuration validation subroutine script.
|
||||
# Copyright 1992-2020 Free Software Foundation, Inc.
|
||||
# Copyright 1992-2021 Free Software Foundation, Inc.
|
||||
|
||||
timestamp='2020-12-02'
|
||||
timestamp='2021-01-08'
|
||||
|
||||
# This file is free software; you can redistribute it and/or modify it
|
||||
# under the terms of the GNU General Public License as published by
|
||||
|
@ -67,7 +67,7 @@ Report bugs and patches to <config-patches@gnu.org>."
|
|||
version="\
|
||||
GNU config.sub ($timestamp)
|
||||
|
||||
Copyright 1992-2020 Free Software Foundation, Inc.
|
||||
Copyright 1992-2021 Free Software Foundation, Inc.
|
||||
|
||||
This is free software; see the source for copying conditions. There is NO
|
||||
warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE."
|
||||
|
@ -1185,6 +1185,7 @@ case $cpu-$vendor in
|
|||
| k1om \
|
||||
| le32 | le64 \
|
||||
| lm32 \
|
||||
| loongarch32 | loongarch64 | loongarchx32 \
|
||||
| m32c | m32r | m32rle \
|
||||
| m5200 | m68000 | m680[012346]0 | m68360 | m683?2 | m68k \
|
||||
| m6811 | m68hc11 | m6812 | m68hc12 | m68hcs12x \
|
||||
|
@ -1229,7 +1230,7 @@ case $cpu-$vendor in
|
|||
| powerpc | powerpc64 | powerpc64le | powerpcle | powerpcspe \
|
||||
| pru \
|
||||
| pyramid \
|
||||
| riscv | riscv32 | riscv64 \
|
||||
| riscv | riscv32 | riscv32be | riscv64 | riscv64be \
|
||||
| rl78 | romp | rs6000 | rx \
|
||||
| s390 | s390x \
|
||||
| score \
|
||||
|
@ -1682,11 +1683,14 @@ fi
|
|||
|
||||
# Now, validate our (potentially fixed-up) OS.
|
||||
case $os in
|
||||
# Sometimes we do "kernel-abi", so those need to count as OSes.
|
||||
# Sometimes we do "kernel-libc", so those need to count as OSes.
|
||||
musl* | newlib* | uclibc*)
|
||||
;;
|
||||
# Likewise for "kernel-libc"
|
||||
eabi | eabihf | gnueabi | gnueabihf)
|
||||
# Likewise for "kernel-abi"
|
||||
eabi* | gnueabi*)
|
||||
;;
|
||||
# VxWorks passes extra cpu info in the 4th filed.
|
||||
simlinux | simwindows | spe)
|
||||
;;
|
||||
# Now accept the basic system types.
|
||||
# The portable systems comes first.
|
||||
|
@ -1750,6 +1754,8 @@ case $kernel-$os in
|
|||
;;
|
||||
kfreebsd*-gnu* | kopensolaris*-gnu*)
|
||||
;;
|
||||
vxworks-simlinux | vxworks-simwindows | vxworks-spe)
|
||||
;;
|
||||
nto-qnx*)
|
||||
;;
|
||||
os2-emx)
|
||||
|
|
84
configure.ac
84
configure.ac
|
@ -1,4 +1,4 @@
|
|||
AC_INIT(nix, m4_esyscmd([bash -c "echo -n $(cat ./.version)$VERSION_SUFFIX"]))
|
||||
AC_INIT([nix],[m4_esyscmd(bash -c "echo -n $(cat ./.version)$VERSION_SUFFIX")])
|
||||
AC_CONFIG_MACRO_DIRS([m4])
|
||||
AC_CONFIG_SRCDIR(README.md)
|
||||
AC_CONFIG_AUX_DIR(config)
|
||||
|
@ -9,8 +9,7 @@ AC_PROG_SED
|
|||
AC_CANONICAL_HOST
|
||||
AC_MSG_CHECKING([for the canonical Nix system name])
|
||||
|
||||
AC_ARG_WITH(system, AC_HELP_STRING([--with-system=SYSTEM],
|
||||
[Platform identifier (e.g., `i686-linux').]),
|
||||
AC_ARG_WITH(system, AS_HELP_STRING([--with-system=SYSTEM],[Platform identifier (e.g., `i686-linux').]),
|
||||
[system=$withval],
|
||||
[case "$host_cpu" in
|
||||
i*86)
|
||||
|
@ -33,14 +32,6 @@ AC_ARG_WITH(system, AC_HELP_STRING([--with-system=SYSTEM],
|
|||
system="$machine_name-`echo $host_os | "$SED" -e's/@<:@0-9.@:>@*$//g'`";;
|
||||
esac])
|
||||
|
||||
sys_name=$(uname -s | tr 'A-Z ' 'a-z_')
|
||||
|
||||
case $sys_name in
|
||||
cygwin*)
|
||||
sys_name=cygwin
|
||||
;;
|
||||
esac
|
||||
|
||||
AC_MSG_RESULT($system)
|
||||
AC_SUBST(system)
|
||||
AC_DEFINE_UNQUOTED(SYSTEM, ["$system"], [platform identifier ('cpu-os')])
|
||||
|
@ -64,10 +55,12 @@ AC_SYS_LARGEFILE
|
|||
|
||||
# Solaris-specific stuff.
|
||||
AC_STRUCT_DIRENT_D_TYPE
|
||||
if test "$sys_name" = sunos; then
|
||||
case "$host_os" in
|
||||
solaris*)
|
||||
# Solaris requires -lsocket -lnsl for network functions
|
||||
LIBS="-lsocket -lnsl $LIBS"
|
||||
fi
|
||||
LDFLAGS="-lsocket -lnsl $LDFLAGS"
|
||||
;;
|
||||
esac
|
||||
|
||||
|
||||
# Check for pubsetbuf.
|
||||
|
@ -127,8 +120,7 @@ NEED_PROG(jq, jq)
|
|||
AC_SUBST(coreutils, [$(dirname $(type -p cat))])
|
||||
|
||||
|
||||
AC_ARG_WITH(store-dir, AC_HELP_STRING([--with-store-dir=PATH],
|
||||
[path of the Nix store (defaults to /nix/store)]),
|
||||
AC_ARG_WITH(store-dir, AS_HELP_STRING([--with-store-dir=PATH],[path of the Nix store (defaults to /nix/store)]),
|
||||
storedir=$withval, storedir='/nix/store')
|
||||
AC_SUBST(storedir)
|
||||
|
||||
|
@ -152,13 +144,12 @@ int main() {
|
|||
}]])], GCC_ATOMIC_BUILTINS_NEED_LIBATOMIC=no, GCC_ATOMIC_BUILTINS_NEED_LIBATOMIC=yes)
|
||||
AC_MSG_RESULT($GCC_ATOMIC_BUILTINS_NEED_LIBATOMIC)
|
||||
if test "x$GCC_ATOMIC_BUILTINS_NEED_LIBATOMIC" = xyes; then
|
||||
LIBS="-latomic $LIBS"
|
||||
LDFLAGS="-latomic $LDFLAGS"
|
||||
fi
|
||||
|
||||
PKG_PROG_PKG_CONFIG
|
||||
|
||||
AC_ARG_ENABLE(shared, AC_HELP_STRING([--enable-shared],
|
||||
[Build shared libraries for Nix [default=yes]]),
|
||||
AC_ARG_ENABLE(shared, AS_HELP_STRING([--enable-shared],[Build shared libraries for Nix [default=yes]]),
|
||||
shared=$enableval, shared=yes)
|
||||
if test "$shared" = yes; then
|
||||
AC_SUBST(BUILD_SHARED_LIBS, 1, [Whether to build shared libraries.])
|
||||
|
@ -213,30 +204,32 @@ AC_SUBST(HAVE_LIBCPUID, [$have_libcpuid])
|
|||
|
||||
|
||||
# Look for libseccomp, required for Linux sandboxing.
|
||||
if test "$sys_name" = linux; then
|
||||
AC_ARG_ENABLE([seccomp-sandboxing],
|
||||
AC_HELP_STRING([--disable-seccomp-sandboxing],
|
||||
[Don't build support for seccomp sandboxing (only recommended if your arch doesn't support libseccomp yet!)]
|
||||
))
|
||||
if test "x$enable_seccomp_sandboxing" != "xno"; then
|
||||
PKG_CHECK_MODULES([LIBSECCOMP], [libseccomp],
|
||||
[CXXFLAGS="$LIBSECCOMP_CFLAGS $CXXFLAGS"])
|
||||
have_seccomp=1
|
||||
AC_DEFINE([HAVE_SECCOMP], [1], [Whether seccomp is available and should be used for sandboxing.])
|
||||
else
|
||||
case "$host_os" in
|
||||
linux*)
|
||||
AC_ARG_ENABLE([seccomp-sandboxing],
|
||||
AS_HELP_STRING([--disable-seccomp-sandboxing],[Don't build support for seccomp sandboxing (only recommended if your arch doesn't support libseccomp yet!)
|
||||
]))
|
||||
if test "x$enable_seccomp_sandboxing" != "xno"; then
|
||||
PKG_CHECK_MODULES([LIBSECCOMP], [libseccomp],
|
||||
[CXXFLAGS="$LIBSECCOMP_CFLAGS $CXXFLAGS"])
|
||||
have_seccomp=1
|
||||
AC_DEFINE([HAVE_SECCOMP], [1], [Whether seccomp is available and should be used for sandboxing.])
|
||||
else
|
||||
have_seccomp=
|
||||
fi
|
||||
;;
|
||||
*)
|
||||
have_seccomp=
|
||||
fi
|
||||
else
|
||||
have_seccomp=
|
||||
fi
|
||||
;;
|
||||
esac
|
||||
AC_SUBST(HAVE_SECCOMP, [$have_seccomp])
|
||||
|
||||
|
||||
# Look for aws-cpp-sdk-s3.
|
||||
AC_LANG_PUSH(C++)
|
||||
AC_CHECK_HEADERS([aws/s3/S3Client.h],
|
||||
[AC_DEFINE([ENABLE_S3], [1], [Whether to enable S3 support via aws-sdk-cpp.])
|
||||
enable_s3=1], [enable_s3=])
|
||||
[AC_DEFINE([ENABLE_S3], [1], [Whether to enable S3 support via aws-sdk-cpp.]) enable_s3=1],
|
||||
[AC_DEFINE([ENABLE_S3], [0], [Whether to enable S3 support via aws-sdk-cpp.]) enable_s3=])
|
||||
AC_SUBST(ENABLE_S3, [$enable_s3])
|
||||
AC_LANG_POP(C++)
|
||||
|
||||
|
@ -249,8 +242,7 @@ fi
|
|||
|
||||
|
||||
# Whether to use the Boehm garbage collector.
|
||||
AC_ARG_ENABLE(gc, AC_HELP_STRING([--enable-gc],
|
||||
[enable garbage collection in the Nix expression evaluator (requires Boehm GC) [default=yes]]),
|
||||
AC_ARG_ENABLE(gc, AS_HELP_STRING([--enable-gc],[enable garbage collection in the Nix expression evaluator (requires Boehm GC) [default=yes]]),
|
||||
gc=$enableval, gc=yes)
|
||||
if test "$gc" = yes; then
|
||||
PKG_CHECK_MODULES([BDW_GC], [bdw-gc])
|
||||
|
@ -264,11 +256,12 @@ PKG_CHECK_MODULES([GTEST], [gtest_main])
|
|||
|
||||
|
||||
# documentation generation switch
|
||||
AC_ARG_ENABLE(doc-gen, AC_HELP_STRING([--disable-doc-gen],
|
||||
[disable documentation generation]),
|
||||
AC_ARG_ENABLE(doc-gen, AS_HELP_STRING([--disable-doc-gen],[disable documentation generation]),
|
||||
doc_generate=$enableval, doc_generate=yes)
|
||||
AC_SUBST(doc_generate)
|
||||
|
||||
# Look for lowdown library.
|
||||
PKG_CHECK_MODULES([LOWDOWN], [lowdown >= 0.8.0], [CXXFLAGS="$LOWDOWN_CFLAGS $CXXFLAGS"])
|
||||
|
||||
# Setuid installations.
|
||||
AC_CHECK_FUNCS([setresuid setreuid lchown])
|
||||
|
@ -280,13 +273,14 @@ AC_CHECK_FUNCS([strsignal posix_fallocate sysconf])
|
|||
|
||||
# This is needed if bzip2 is a static library, and the Nix libraries
|
||||
# are dynamic.
|
||||
if test "$(uname)" = "Darwin"; then
|
||||
case "${host_os}" in
|
||||
darwin*)
|
||||
LDFLAGS="-all_load $LDFLAGS"
|
||||
fi
|
||||
;;
|
||||
esac
|
||||
|
||||
|
||||
AC_ARG_WITH(sandbox-shell, AC_HELP_STRING([--with-sandbox-shell=PATH],
|
||||
[path of a statically-linked shell to use as /bin/sh in sandboxes]),
|
||||
AC_ARG_WITH(sandbox-shell, AS_HELP_STRING([--with-sandbox-shell=PATH],[path of a statically-linked shell to use as /bin/sh in sandboxes]),
|
||||
sandbox_shell=$withval)
|
||||
AC_SUBST(sandbox_shell)
|
||||
|
||||
|
@ -301,6 +295,6 @@ done
|
|||
|
||||
rm -f Makefile.config
|
||||
|
||||
AC_CONFIG_HEADER([config.h])
|
||||
AC_CONFIG_HEADERS([config.h])
|
||||
AC_CONFIG_FILES([])
|
||||
AC_OUTPUT
|
||||
|
|
|
@ -6,9 +6,11 @@ builtins:
|
|||
concatStrings (map
|
||||
(name:
|
||||
let builtin = builtins.${name}; in
|
||||
" - `builtins.${name}` " + concatStringsSep " " (map (s: "*${s}*") builtin.args)
|
||||
+ " \n\n"
|
||||
+ concatStrings (map (s: " ${s}\n") (splitLines builtin.doc)) + "\n\n"
|
||||
"<dt><code>${name} "
|
||||
+ concatStringsSep " " (map (s: "<var>${s}</var>") builtin.args)
|
||||
+ "</code></dt>"
|
||||
+ "<dd>\n\n"
|
||||
+ builtin.doc
|
||||
+ "\n\n</dd>"
|
||||
)
|
||||
(attrNames builtins))
|
||||
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
command:
|
||||
{ command, renderLinks ? false }:
|
||||
|
||||
with builtins;
|
||||
with import ./utils.nix;
|
||||
|
@ -20,7 +20,11 @@ let
|
|||
categories = sort (x: y: x.id < y.id) (unique (map (cmd: cmd.category) (attrValues def.commands)));
|
||||
listCommands = cmds:
|
||||
concatStrings (map (name:
|
||||
"* [`${command} ${name}`](./${appendName filename name}.md) - ${cmds.${name}.description}\n")
|
||||
"* "
|
||||
+ (if renderLinks
|
||||
then "[`${command} ${name}`](./${appendName filename name}.md)"
|
||||
else "`${command} ${name}`")
|
||||
+ " - ${cmds.${name}.description}\n")
|
||||
(attrNames cmds));
|
||||
in
|
||||
"where *subcommand* is one of the following:\n\n"
|
||||
|
@ -89,7 +93,7 @@ let
|
|||
in
|
||||
|
||||
let
|
||||
manpages = processCommand { filename = "nix"; command = "nix"; def = command; };
|
||||
manpages = processCommand { filename = "nix"; command = "nix"; def = builtins.fromJSON command; };
|
||||
summary = concatStrings (map (manpage: " - [${manpage.command}](command-ref/new-cli/${manpage.name})\n") manpages);
|
||||
in
|
||||
(listToAttrs manpages) // { "SUMMARY.md" = summary; }
|
||||
|
|
|
@ -1,7 +1,5 @@
|
|||
ifeq ($(doc_generate),yes)
|
||||
|
||||
MANUAL_SRCS := $(call rwildcard, $(d)/src, *.md)
|
||||
|
||||
# Generate man pages.
|
||||
man-pages := $(foreach n, \
|
||||
nix-env.1 nix-build.1 nix-shell.1 nix-store.1 nix-instantiate.1 \
|
||||
|
@ -46,7 +44,7 @@ $(d)/src/SUMMARY.md: $(d)/src/SUMMARY.md.in $(d)/src/command-ref/new-cli
|
|||
|
||||
$(d)/src/command-ref/new-cli: $(d)/nix.json $(d)/generate-manpage.nix $(bindir)/nix
|
||||
@rm -rf $@
|
||||
$(trace-gen) $(nix-eval) --write-to $@ --expr 'import doc/manual/generate-manpage.nix (builtins.fromJSON (builtins.readFile $<))'
|
||||
$(trace-gen) $(nix-eval) --write-to $@ --expr 'import doc/manual/generate-manpage.nix { command = builtins.readFile $<; renderLinks = true; }'
|
||||
|
||||
$(d)/src/command-ref/conf-file.md: $(d)/conf-file.json $(d)/generate-options.nix $(d)/src/command-ref/conf-file-prefix.md $(bindir)/nix
|
||||
@cat doc/manual/src/command-ref/conf-file-prefix.md > $@.tmp
|
||||
|
@ -64,6 +62,7 @@ $(d)/conf-file.json: $(bindir)/nix
|
|||
$(d)/src/expressions/builtins.md: $(d)/builtins.json $(d)/generate-builtins.nix $(d)/src/expressions/builtins-prefix.md $(bindir)/nix
|
||||
@cat doc/manual/src/expressions/builtins-prefix.md > $@.tmp
|
||||
$(trace-gen) $(nix-eval) --expr 'import doc/manual/generate-builtins.nix (builtins.fromJSON (builtins.readFile $<))' >> $@.tmp
|
||||
@cat doc/manual/src/expressions/builtins-suffix.md >> $@.tmp
|
||||
@mv $@.tmp $@
|
||||
|
||||
$(d)/builtins.json: $(bindir)/nix
|
||||
|
@ -74,17 +73,28 @@ $(d)/builtins.json: $(bindir)/nix
|
|||
install: $(docdir)/manual/index.html
|
||||
|
||||
# Generate 'nix' manpages.
|
||||
install: $(d)/src/command-ref/new-cli
|
||||
install: $(mandir)/man1/nix3-manpages
|
||||
man: doc/manual/generated/man1/nix3-manpages
|
||||
all: doc/manual/generated/man1/nix3-manpages
|
||||
|
||||
$(mandir)/man1/nix3-manpages: doc/manual/generated/man1/nix3-manpages
|
||||
@mkdir -p $(DESTDIR)$$(dirname $@)
|
||||
$(trace-install) install -m 0644 $$(dirname $<)/* $(DESTDIR)$$(dirname $@)
|
||||
|
||||
doc/manual/generated/man1/nix3-manpages: $(d)/src/command-ref/new-cli
|
||||
@mkdir -p $(DESTDIR)$$(dirname $@)
|
||||
$(trace-gen) for i in doc/manual/src/command-ref/new-cli/*.md; do \
|
||||
name=$$(basename $$i .md); \
|
||||
tmpFile=$$(mktemp); \
|
||||
if [[ $$name = SUMMARY ]]; then continue; fi; \
|
||||
printf "Title: %s\n\n" "$$name" > $$i.tmp; \
|
||||
cat $$i >> $$i.tmp; \
|
||||
lowdown -sT man -M section=1 $$i.tmp -o $(mandir)/man1/$$name.1; \
|
||||
printf "Title: %s\n\n" "$$name" > $$tmpFile; \
|
||||
cat $$i >> $$tmpFile; \
|
||||
lowdown -sT man -M section=1 $$tmpFile -o $(DESTDIR)$$(dirname $@)/$$name.1; \
|
||||
rm $$tmpFile; \
|
||||
done
|
||||
@touch $@
|
||||
|
||||
$(docdir)/manual/index.html: $(MANUAL_SRCS) $(d)/book.toml $(d)/custom.css $(d)/src/SUMMARY.md $(d)/src/command-ref/new-cli $(d)/src/command-ref/conf-file.md $(d)/src/expressions/builtins.md
|
||||
$(trace-gen) RUST_LOG=warn mdbook build doc/manual -d $(docdir)/manual
|
||||
@cp doc/manual/highlight.pack.js $(docdir)/manual/highlight.js
|
||||
$(docdir)/manual/index.html: $(MANUAL_SRCS) $(d)/book.toml $(d)/custom.css $(d)/src/SUMMARY.md $(d)/src/command-ref/new-cli $(d)/src/command-ref/conf-file.md $(d)/src/expressions/builtins.md $(call rwildcard, $(d)/src, *.md)
|
||||
$(trace-gen) RUST_LOG=warn mdbook build doc/manual -d $(DESTDIR)$(docdir)/manual
|
||||
|
||||
endif
|
||||
|
|
|
@ -9,6 +9,7 @@
|
|||
- [Prerequisites](installation/prerequisites-source.md)
|
||||
- [Obtaining a Source Distribution](installation/obtaining-source.md)
|
||||
- [Building Nix from Source](installation/building-source.md)
|
||||
- [Using Nix within Docker](installation/installing-docker.md)
|
||||
- [Security](installation/nix-security.md)
|
||||
- [Single-User Mode](installation/single-user.md)
|
||||
- [Multi-User Mode](installation/multi-user.md)
|
||||
|
@ -70,6 +71,8 @@
|
|||
- [Hacking](contributing/hacking.md)
|
||||
- [CLI guideline](contributing/cli-guideline.md)
|
||||
- [Release Notes](release-notes/release-notes.md)
|
||||
- [Release X.Y (202?-??-??)](release-notes/rl-next.md)
|
||||
- [Release 2.4 (2021-11-01)](release-notes/rl-2.4.md)
|
||||
- [Release 2.3 (2019-09-04)](release-notes/rl-2.3.md)
|
||||
- [Release 2.2 (2019-01-11)](release-notes/rl-2.2.md)
|
||||
- [Release 2.1 (2018-09-02)](release-notes/rl-2.1.md)
|
||||
|
|
|
@ -16,8 +16,9 @@ By default Nix reads settings from the following places:
|
|||
will be loaded in reverse order.
|
||||
|
||||
Otherwise it will look for `nix/nix.conf` files in `XDG_CONFIG_DIRS`
|
||||
and `XDG_CONFIG_HOME`. If these are unset, it will look in
|
||||
`$HOME/.config/nix.conf`.
|
||||
and `XDG_CONFIG_HOME`. If unset, `XDG_CONFIG_DIRS` defaults to
|
||||
`/etc/xdg`, and `XDG_CONFIG_HOME` defaults to `$HOME/.config`
|
||||
as per [XDG Base Directory Specification](https://specifications.freedesktop.org/basedir-spec/basedir-spec-latest.html).
|
||||
|
||||
- If `NIX_CONFIG` is set, its contents is treated as the contents of
|
||||
a configuration file.
|
||||
|
|
|
@ -10,35 +10,39 @@ Most Nix commands interpret the following environment variables:
|
|||
A colon-separated list of directories used to look up Nix
|
||||
expressions enclosed in angle brackets (i.e., `<path>`). For
|
||||
instance, the value
|
||||
|
||||
|
||||
/home/eelco/Dev:/etc/nixos
|
||||
|
||||
|
||||
will cause Nix to look for paths relative to `/home/eelco/Dev` and
|
||||
`/etc/nixos`, in this order. It is also possible to match paths
|
||||
against a prefix. For example, the value
|
||||
|
||||
|
||||
nixpkgs=/home/eelco/Dev/nixpkgs-branch:/etc/nixos
|
||||
|
||||
|
||||
will cause Nix to search for `<nixpkgs/path>` in
|
||||
`/home/eelco/Dev/nixpkgs-branch/path` and `/etc/nixos/nixpkgs/path`.
|
||||
|
||||
|
||||
If a path in the Nix search path starts with `http://` or
|
||||
`https://`, it is interpreted as the URL of a tarball that will be
|
||||
downloaded and unpacked to a temporary location. The tarball must
|
||||
consist of a single top-level directory. For example, setting
|
||||
`NIX_PATH` to
|
||||
|
||||
nixpkgs=https://github.com/NixOS/nixpkgs/archive/nixos-15.09.tar.gz
|
||||
|
||||
tells Nix to download the latest revision in the Nixpkgs/NixOS 15.09
|
||||
channel.
|
||||
|
||||
A following shorthand can be used to refer to the official channels:
|
||||
|
||||
nixpkgs=channel:nixos-15.09
|
||||
|
||||
The search path can be extended using the `-I` option, which takes
|
||||
precedence over `NIX_PATH`.
|
||||
|
||||
nixpkgs=https://github.com/NixOS/nixpkgs/archive/master.tar.gz
|
||||
|
||||
tells Nix to download and use the current contents of the
|
||||
`master` branch in the `nixpkgs` repository.
|
||||
|
||||
The URLs of the tarballs from the official nixos.org channels (see
|
||||
[the manual for `nix-channel`](nix-channel.md)) can be abbreviated
|
||||
as `channel:<channel-name>`. For instance, the following two
|
||||
values of `NIX_PATH` are equivalent:
|
||||
|
||||
nixpkgs=channel:nixos-21.05
|
||||
nixpkgs=https://nixos.org/channels/nixos-21.05/nixexprs.tar.xz
|
||||
|
||||
The Nix search path can also be extended using the `-I` option to
|
||||
many Nix commands, which takes precedence over `NIX_PATH`.
|
||||
|
||||
- `NIX_IGNORE_SYMLINK_STORE`\
|
||||
Normally, the Nix store directory (typically `/nix/store`) is not
|
||||
|
@ -50,7 +54,7 @@ Most Nix commands interpret the following environment variables:
|
|||
builds are deployed to machines where `/nix/store` resolves
|
||||
differently. If you are sure that you’re not going to do that, you
|
||||
can set `NIX_IGNORE_SYMLINK_STORE` to `1`.
|
||||
|
||||
|
||||
Note that if you’re symlinking the Nix store so that you can put it
|
||||
on another file system than the root file system, on Linux you’re
|
||||
better off using `bind` mount points, e.g.,
|
||||
|
@ -59,7 +63,7 @@ Most Nix commands interpret the following environment variables:
|
|||
$ mkdir /nix
|
||||
$ mount -o bind /mnt/otherdisk/nix /nix
|
||||
```
|
||||
|
||||
|
||||
Consult the mount 8 manual page for details.
|
||||
|
||||
- `NIX_STORE_DIR`\
|
||||
|
|
|
@ -238,7 +238,16 @@ a number of possible ways:
|
|||
|
||||
## Examples
|
||||
|
||||
To install a specific version of `gcc` from the active Nix expression:
|
||||
To install a package using a specific attribute path from the active Nix expression:
|
||||
|
||||
```console
|
||||
$ nix-env -iA gcc40mips
|
||||
installing `gcc-4.0.2'
|
||||
$ nix-env -iA xorg.xorgserver
|
||||
installing `xorg-server-1.2.0'
|
||||
```
|
||||
|
||||
To install a specific version of `gcc` using the derivation name:
|
||||
|
||||
```console
|
||||
$ nix-env --install gcc-3.3.2
|
||||
|
@ -246,6 +255,9 @@ installing `gcc-3.3.2'
|
|||
uninstalling `gcc-3.1'
|
||||
```
|
||||
|
||||
Using attribute path for selecting a package is preferred,
|
||||
as it is much faster and there will not be multiple matches.
|
||||
|
||||
Note the previously installed version is removed, since
|
||||
`--preserve-installed` was not specified.
|
||||
|
||||
|
@ -256,13 +268,6 @@ $ nix-env --install gcc
|
|||
installing `gcc-3.3.2'
|
||||
```
|
||||
|
||||
To install using a specific attribute:
|
||||
|
||||
```console
|
||||
$ nix-env -i -A gcc40mips
|
||||
$ nix-env -i -A xorg.xorgserver
|
||||
```
|
||||
|
||||
To install all derivations in the Nix expression `foo.nix`:
|
||||
|
||||
```console
|
||||
|
@ -374,22 +379,29 @@ For the other flags, see `--install`.
|
|||
## Examples
|
||||
|
||||
```console
|
||||
$ nix-env --upgrade gcc
|
||||
$ nix-env --upgrade -A nixpkgs.gcc
|
||||
upgrading `gcc-3.3.1' to `gcc-3.4'
|
||||
```
|
||||
|
||||
When there are no updates available, nothing will happen:
|
||||
|
||||
```console
|
||||
$ nix-env -u gcc-3.3.2 --always (switch to a specific version)
|
||||
$ nix-env --upgrade -A nixpkgs.pan
|
||||
```
|
||||
|
||||
Using `-A` is preferred when possible, as it is faster and unambiguous but
|
||||
it is also possible to upgrade to a specific version by matching the derivation name:
|
||||
|
||||
```console
|
||||
$ nix-env -u gcc-3.3.2 --always
|
||||
upgrading `gcc-3.4' to `gcc-3.3.2'
|
||||
```
|
||||
|
||||
```console
|
||||
$ nix-env --upgrade pan
|
||||
(no upgrades available, so nothing happens)
|
||||
```
|
||||
To try to upgrade everything
|
||||
(matching packages based on the part of the derivation name without version):
|
||||
|
||||
```console
|
||||
$ nix-env -u (try to upgrade everything)
|
||||
$ nix-env -u
|
||||
upgrading `hello-2.1.2' to `hello-2.1.3'
|
||||
upgrading `mozilla-1.2' to `mozilla-1.4'
|
||||
```
|
||||
|
@ -401,7 +413,7 @@ of a derivation `x` by looking at their respective `name` attributes.
|
|||
The names (e.g., `gcc-3.3.1` are split into two parts: the package name
|
||||
(`gcc`), and the version (`3.3.1`). The version part starts after the
|
||||
first dash not followed by a letter. `x` is considered an upgrade of `y`
|
||||
if their package names match, and the version of `y` is higher that that
|
||||
if their package names match, and the version of `y` is higher than that
|
||||
of `x`.
|
||||
|
||||
The versions are compared by splitting them into contiguous components
|
||||
|
|
|
@ -11,8 +11,8 @@
|
|||
[`--command` *cmd*]
|
||||
[`--run` *cmd*]
|
||||
[`--exclude` *regexp*]
|
||||
[--pure]
|
||||
[--keep *name*]
|
||||
[`--pure`]
|
||||
[`--keep` *name*]
|
||||
{{`--packages` | `-p`} {*packages* | *expressions*} … | [*path*]}
|
||||
|
||||
# Description
|
||||
|
@ -78,9 +78,7 @@ All options not listed here are passed to `nix-store
|
|||
cleared before the interactive shell is started, so you get an
|
||||
environment that more closely corresponds to the “real” Nix build. A
|
||||
few variables, in particular `HOME`, `USER` and `DISPLAY`, are
|
||||
retained. Note that (depending on your Bash
|
||||
installation) `/etc/bashrc` is still sourced, so any variables set
|
||||
there will affect the interactive shell.
|
||||
retained.
|
||||
|
||||
- `--packages` / `-p` *packages*…\
|
||||
Set up an environment in which the specified packages are present.
|
||||
|
@ -112,13 +110,19 @@ shell in which to build it:
|
|||
|
||||
```console
|
||||
$ nix-shell '<nixpkgs>' -A pan
|
||||
[nix-shell]$ unpackPhase
|
||||
[nix-shell]$ eval ${unpackPhase:-unpackPhase}
|
||||
[nix-shell]$ cd pan-*
|
||||
[nix-shell]$ configurePhase
|
||||
[nix-shell]$ buildPhase
|
||||
[nix-shell]$ eval ${configurePhase:-configurePhase}
|
||||
[nix-shell]$ eval ${buildPhase:-buildPhase}
|
||||
[nix-shell]$ ./pan/gui/pan
|
||||
```
|
||||
|
||||
The reason we use form `eval ${configurePhase:-configurePhase}` here is because
|
||||
those packages that override these phases do so by exporting the overridden
|
||||
values in the environment variable of the same name.
|
||||
Here bash is being told to either evaluate the contents of 'configurePhase',
|
||||
if it exists as a variable, otherwise evaluate the configurePhase function.
|
||||
|
||||
To clear the environment first, and do some additional automatic
|
||||
initialisation of the interactive shell:
|
||||
|
||||
|
|
|
@ -125,7 +125,7 @@ Special exit codes:
|
|||
|
||||
- `104`\
|
||||
Not deterministic, the build succeeded in check mode but the
|
||||
resulting output is not binary reproducable.
|
||||
resulting output is not binary reproducible.
|
||||
|
||||
With the `--keep-going` flag it's possible for multiple failures to
|
||||
occur, in this case the 1xx status codes are or combined using binary
|
||||
|
|
|
@ -162,11 +162,11 @@ Most Nix commands accept the following command-line options:
|
|||
}: ...
|
||||
```
|
||||
|
||||
So if you call this Nix expression (e.g., when you do `nix-env -i
|
||||
So if you call this Nix expression (e.g., when you do `nix-env -iA
|
||||
pkgname`), the function will be called automatically using the
|
||||
value [`builtins.currentSystem`](../expressions/builtins.md) for
|
||||
the `system` argument. You can override this using `--arg`, e.g.,
|
||||
`nix-env -i pkgname --arg system \"i686-freebsd\"`. (Note that
|
||||
`nix-env -iA pkgname --arg system \"i686-freebsd\"`. (Note that
|
||||
since the argument is a Nix string literal, you have to escape the
|
||||
quotes.)
|
||||
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
## Goals
|
||||
|
||||
Purpose of this document is to provide a clear direction to **help design
|
||||
delightful command line** experience. This document contain guidelines to
|
||||
delightful command line** experience. This document contains guidelines to
|
||||
follow to ensure a consistent and approachable user experience.
|
||||
|
||||
## Overview
|
||||
|
@ -103,7 +103,7 @@ impacted the most by bad user experience.
|
|||
# Help is essential
|
||||
|
||||
Help should be built into your command line so that new users can gradually
|
||||
discover new features when they need them.
|
||||
discover new features when they need them.
|
||||
|
||||
## Looking for help
|
||||
|
||||
|
@ -115,7 +115,7 @@ The rules are:
|
|||
|
||||
- Help is shown by using `--help` or `help` command (eg `nix` `--``help` or
|
||||
`nix help`).
|
||||
- For non-COMMANDs (eg. `nix` `--``help` and `nix store` `--``help`) we **show
|
||||
- For non-COMMANDs (eg. `nix` `--``help` and `nix store` `--``help`) we **show
|
||||
a summary** of most common use cases. Summary is presented on the STDOUT
|
||||
without any use of PAGER.
|
||||
- For COMMANDs (eg. `nix init` `--``help` or `nix help init`) we display the
|
||||
|
@ -176,7 +176,7 @@ $ nix init --template=template#pyton
|
|||
------------------------------------------------------------------------
|
||||
Initializing Nix project at `/path/to/here`.
|
||||
Select a template for you new project:
|
||||
|> template#pyton
|
||||
|> template#python
|
||||
template#python-pip
|
||||
template#python-poetry
|
||||
```
|
||||
|
@ -230,17 +230,17 @@ Now **Learn** part of the output is where you educate users. You should only
|
|||
show it when you know that a build will take some time and not annoy users of
|
||||
the builds that take only few seconds.
|
||||
|
||||
Every feature like this should go though a intensive review and testing to
|
||||
collect as much a feedback as possible and to fine tune every little detail. If
|
||||
Every feature like this should go through an intensive review and testing to
|
||||
collect as much feedback as possible and to fine tune every little detail. If
|
||||
done right this can be an awesome features beginners and advance users will
|
||||
love, but if not done perfectly it will annoy users and leave bad impression.
|
||||
|
||||
# Input
|
||||
|
||||
Input to a command is provided via `ARGUMENTS` and `OPTIONS`.
|
||||
Input to a command is provided via `ARGUMENTS` and `OPTIONS`.
|
||||
|
||||
`ARGUMENTS` represent a required input for a function. When choosing to use
|
||||
`ARGUMENT` over function please be aware of the downsides that come with it:
|
||||
`ARGUMENTS` over `OPTIONS` please be aware of the downsides that come with it:
|
||||
|
||||
- User will need to remember the order of `ARGUMENTS`. This is not a problem if
|
||||
there is only one `ARGUMENT`.
|
||||
|
@ -253,7 +253,7 @@ developer consider the downsides and choose wisely.
|
|||
|
||||
## Naming the `OPTIONS`
|
||||
|
||||
Then only naming convention - apart from the ones mentioned in Naming the
|
||||
The only naming convention - apart from the ones mentioned in Naming the
|
||||
`COMMANDS` section is how flags are named.
|
||||
|
||||
Flags are a type of `OPTION` that represent an option that can be turned ON of
|
||||
|
@ -271,12 +271,12 @@ to improve the discoverability of possible input. A new user will most likely
|
|||
not know which `ARGUMENTS` and `OPTIONS` are required or which values are
|
||||
possible for those options.
|
||||
|
||||
In cases, the user might not provide the input or they provide wrong input,
|
||||
rather then show the error, prompt a user with an option to find and select
|
||||
In case the user does not provide the input or they provide wrong input,
|
||||
rather than show the error, prompt a user with an option to find and select
|
||||
correct input (see examples).
|
||||
|
||||
Prompting is of course not required when TTY is not attached to STDIN. This
|
||||
would mean that scripts wont need to handle prompt, but rather handle errors.
|
||||
would mean that scripts won't need to handle prompt, but rather handle errors.
|
||||
|
||||
A place to use prompt and provide user with interactive select
|
||||
|
||||
|
@ -300,9 +300,9 @@ going to happen.
|
|||
```shell
|
||||
$ nix build --option substitutors https://cache.example.org
|
||||
------------------------------------------------------------------------
|
||||
Warning! A security related question need to be answered.
|
||||
Warning! A security related question needs to be answered.
|
||||
------------------------------------------------------------------------
|
||||
The following substitutors will be used to in `my-project`:
|
||||
The following substitutors will be used to in `my-project`:
|
||||
- https://cache.example.org
|
||||
|
||||
Do you allow `my-project` to use above mentioned substitutors?
|
||||
|
@ -311,14 +311,14 @@ $ nix build --option substitutors https://cache.example.org
|
|||
|
||||
# Output
|
||||
|
||||
Terminal output can be quite limiting in many ways. Which should forces us to
|
||||
Terminal output can be quite limiting in many ways. Which should force us to
|
||||
think about the experience even more. As with every design the output is a
|
||||
compromise between being terse and being verbose, between showing help to
|
||||
beginners and annoying advance users. For this it is important that we know
|
||||
what are the priorities.
|
||||
|
||||
Nix command line should be first and foremost written with beginners in mind.
|
||||
But users wont stay beginners for long and what was once useful might quickly
|
||||
But users won't stay beginners for long and what was once useful might quickly
|
||||
become annoying. There is no golden rule that we can give in this guideline
|
||||
that would make it easier how to draw a line and find best compromise.
|
||||
|
||||
|
@ -342,7 +342,7 @@ also allowing them to redirect content to a file. For example:
|
|||
```shell
|
||||
$ nix build > build.txt
|
||||
------------------------------------------------------------------------
|
||||
Error! Atrribute `bin` missing at (1:94) from string.
|
||||
Error! Attribute `bin` missing at (1:94) from string.
|
||||
------------------------------------------------------------------------
|
||||
|
||||
1| with import <nixpkgs> { }; (pkgs.runCommandCC or pkgs.runCommand) "shell" { buildInputs = [ (surge.bin) ]; } ""
|
||||
|
@ -408,7 +408,7 @@ Above command clearly states that command successfully completed. And in case
|
|||
of `nix build`, which is a command that might take some time to complete, it is
|
||||
equally important to also show that a command started.
|
||||
|
||||
## Text alignment
|
||||
## Text alignment
|
||||
|
||||
Text alignment is the number one design element that will present all of the
|
||||
Nix commands as a family and not as separate tools glued together.
|
||||
|
@ -419,7 +419,7 @@ The format we should follow is:
|
|||
$ nix COMMAND
|
||||
VERB_1 NOUN and other words
|
||||
VERB__1 NOUN and other words
|
||||
|> Some details
|
||||
|> Some details
|
||||
```
|
||||
|
||||
Few rules that we can extract from above example:
|
||||
|
@ -444,13 +444,13 @@ is not even notable, therefore relying on it wouldn’t make much sense.
|
|||
|
||||
**The bright text is much better supported** across terminals and color
|
||||
schemes. Most of the time the difference is perceived as if the bright text
|
||||
would be bold.
|
||||
would be bold.
|
||||
|
||||
## Colors
|
||||
|
||||
Humans are already conditioned by society to attach certain meaning to certain
|
||||
colors. While the meaning is not universal, a simple collection of colors is
|
||||
used to represent basic emotions.
|
||||
used to represent basic emotions.
|
||||
|
||||
Colors that can be used in output
|
||||
|
||||
|
@ -508,7 +508,7 @@ can, with a few key strokes, be changed into and advance introspection tool.
|
|||
|
||||
### Progress
|
||||
|
||||
For longer running commands we should provide and overview of the progress.
|
||||
For longer running commands we should provide and overview the progress.
|
||||
This is shown best in `nix build` example:
|
||||
|
||||
```shell
|
||||
|
@ -553,9 +553,9 @@ going to happen.
|
|||
```shell
|
||||
$ nix build --option substitutors https://cache.example.org
|
||||
------------------------------------------------------------------------
|
||||
Warning! A security related question need to be answered.
|
||||
Warning! A security related question needs to be answered.
|
||||
------------------------------------------------------------------------
|
||||
The following substitutors will be used to in `my-project`:
|
||||
The following substitutors will be used to in `my-project`:
|
||||
- https://cache.example.org
|
||||
|
||||
Do you allow `my-project` to use above mentioned substitutors?
|
||||
|
@ -566,7 +566,7 @@ $ nix build --option substitutors https://cache.example.org
|
|||
|
||||
There are many ways that you can control verbosity.
|
||||
|
||||
Verbosity levels are:
|
||||
Verbosity levels are:
|
||||
|
||||
- `ERROR` (level 0)
|
||||
- `WARN` (level 1)
|
||||
|
@ -586,4 +586,4 @@ There are also two shortcuts, `--debug` to run in `DEBUG` verbosity level and
|
|||
|
||||
# Appendix 1: Commands naming exceptions
|
||||
|
||||
`nix init` and `nix repl` are well established
|
||||
`nix init` and `nix repl` are well established
|
||||
|
|
|
@ -237,7 +237,7 @@ Derivations can declare some infrequently used optional attributes.
|
|||
- `preferLocalBuild`\
|
||||
If this attribute is set to `true` and [distributed building is
|
||||
enabled](../advanced-topics/distributed-builds.md), then, if
|
||||
possible, the derivaton will be built locally instead of forwarded
|
||||
possible, the derivation will be built locally instead of forwarded
|
||||
to a remote machine. This is appropriate for trivial builders
|
||||
where the cost of doing a download or remote build would exceed
|
||||
the cost of building locally.
|
||||
|
|
|
@ -9,7 +9,8 @@ scope. Instead, you can access them through the `builtins` built-in
|
|||
value, which is a set that contains all built-in functions and values.
|
||||
For instance, `derivation` is also available as `builtins.derivation`.
|
||||
|
||||
- `derivation` *attrs*; `builtins.derivation` *attrs*\
|
||||
|
||||
`derivation` is described in [its own section](derivations.md).
|
||||
|
||||
<dl>
|
||||
<dt><code>derivation <var>attrs</var></code>;
|
||||
<code>builtins.derivation <var>attrs</var></code></dt>
|
||||
<dd><p><var>derivation</var> is described in
|
||||
<a href="derivations.md">its own section</a>.</p></dd>
|
||||
|
|
1
doc/manual/src/expressions/builtins-suffix.md
Normal file
1
doc/manual/src/expressions/builtins-suffix.md
Normal file
|
@ -0,0 +1 @@
|
|||
</dl>
|
|
@ -26,7 +26,7 @@ elements (referenced from the figure by number):
|
|||
called with three arguments: `stdenv`, `fetchurl`, and `perl`. They
|
||||
are needed to build Hello, but we don't know how to build them here;
|
||||
that's why they are function arguments. `stdenv` is a package that
|
||||
is used by almost all Nix Packages packages; it provides a
|
||||
is used by almost all Nix Packages; it provides a
|
||||
“standard” environment consisting of the things you would expect
|
||||
in a basic Unix environment: a C/C++ compiler (GCC, to be precise),
|
||||
the Bash shell, fundamental Unix tools such as `cp`, `grep`, `tar`,
|
||||
|
|
|
@ -17,12 +17,12 @@ order of precedence (from strongest to weakest binding).
|
|||
| String Concatenation | *string1* `+` *string2* | left | String concatenation. | 7 |
|
||||
| Not | `!` *e* | none | Boolean negation. | 8 |
|
||||
| Update | *e1* `//` *e2* | right | Return a set consisting of the attributes in *e1* and *e2* (with the latter taking precedence over the former in case of equally named attributes). | 9 |
|
||||
| Less Than | *e1* `<` *e2*, | none | Arithmetic comparison. | 10 |
|
||||
| Less Than or Equal To | *e1* `<=` *e2* | none | Arithmetic comparison. | 10 |
|
||||
| Greater Than | *e1* `>` *e2* | none | Arithmetic comparison. | 10 |
|
||||
| Greater Than or Equal To | *e1* `>=` *e2* | none | Arithmetic comparison. | 10 |
|
||||
| Less Than | *e1* `<` *e2*, | none | Arithmetic/lexicographic comparison. | 10 |
|
||||
| Less Than or Equal To | *e1* `<=` *e2* | none | Arithmetic/lexicographic comparison. | 10 |
|
||||
| Greater Than | *e1* `>` *e2* | none | Arithmetic/lexicographic comparison. | 10 |
|
||||
| Greater Than or Equal To | *e1* `>=` *e2* | none | Arithmetic/lexicographic comparison. | 10 |
|
||||
| Equality | *e1* `==` *e2* | none | Equality. | 11 |
|
||||
| Inequality | *e1* `!=` *e2* | none | Inequality. | 11 |
|
||||
| Logical AND | *e1* `&&` *e2* | left | Logical AND. | 12 |
|
||||
| Logical OR | *e1* `\|\|` *e2* | left | Logical OR. | 13 |
|
||||
| Logical Implication | *e1* `->` *e2* | none | Logical implication (equivalent to `!e1 \|\| e2`). | 14 |
|
||||
| Logical OR | *e1* <code>||</code> *e2* | left | Logical OR. | 13 |
|
||||
| Logical Implication | *e1* `->` *e2* | none | Logical implication (equivalent to <code>!e1 || e2</code>). | 14 |
|
||||
|
|
|
@ -64,7 +64,7 @@ Nix has the following basic data types:
|
|||
the start of each line. To be precise, it strips from each line a
|
||||
number of spaces equal to the minimal indentation of the string as a
|
||||
whole (disregarding the indentation of empty lines). For instance,
|
||||
the first and second line are indented two space, while the third
|
||||
the first and second line are indented two spaces, while the third
|
||||
line is indented four spaces. Thus, two spaces are stripped from
|
||||
each line, so the resulting string is
|
||||
|
||||
|
@ -139,6 +139,13 @@ Nix has the following basic data types:
|
|||
environment variable `NIX_PATH` will be searched for the given file
|
||||
or directory name.
|
||||
|
||||
Antiquotation is supported in any paths except those in angle brackets.
|
||||
`./${foo}-${bar}.nix` is a more convenient way of writing
|
||||
`./. + "/" + foo + "-" + bar + ".nix"` or `./. + "/${foo}-${bar}.nix"`. At
|
||||
least one slash must appear *before* any antiquotations for this to be
|
||||
recognized as a path. `a.${foo}/b.${bar}` is a syntactically valid division
|
||||
operation. `./a.${foo}/b.${bar}` is a path.
|
||||
|
||||
- *Booleans* with values `true` and `false`.
|
||||
|
||||
- The null value, denoted as `null`.
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
# Building and Testing
|
||||
|
||||
You can now try to build Hello. Of course, you could do `nix-env -i
|
||||
You can now try to build Hello. Of course, you could do `nix-env -f . -iA
|
||||
hello`, but you may not want to install a possibly broken package just
|
||||
yet. The best way to test the package is by using the command
|
||||
`nix-build`, which builds a Nix expression and creates a symlink named
|
||||
|
|
|
@ -1,9 +1,9 @@
|
|||
# Building Nix from Source
|
||||
|
||||
After unpacking or checking out the Nix sources, issue the following
|
||||
commands:
|
||||
After cloning Nix's Git repository, issue the following commands:
|
||||
|
||||
```console
|
||||
$ ./bootstrap.sh
|
||||
$ ./configure options...
|
||||
$ make
|
||||
$ make install
|
||||
|
@ -11,13 +11,6 @@ $ make install
|
|||
|
||||
Nix requires GNU Make so you may need to invoke `gmake` instead.
|
||||
|
||||
When building from the Git repository, these should be preceded by the
|
||||
command:
|
||||
|
||||
```console
|
||||
$ ./bootstrap.sh
|
||||
```
|
||||
|
||||
The installation path can be specified by passing the `--prefix=prefix`
|
||||
to `configure`. The default installation directory is `/usr/local`. You
|
||||
can change this to any location you like. You must have write permission
|
||||
|
|
|
@ -40,7 +40,7 @@ export NIX_SSL_CERT_FILE=/etc/ssl/my-certificate-bundle.crt
|
|||
> **Note**
|
||||
>
|
||||
> You must not add the export and then do the install, as the Nix
|
||||
> installer will detect the presense of Nix configuration, and abort.
|
||||
> installer will detect the presence of Nix configuration, and abort.
|
||||
|
||||
## `NIX_SSL_CERT_FILE` with macOS and the Nix daemon
|
||||
|
||||
|
|
|
@ -1,18 +1,26 @@
|
|||
# Installing a Binary Distribution
|
||||
|
||||
If you are using Linux or macOS versions up to 10.14 (Mojave), the
|
||||
easiest way to install Nix is to run the following command:
|
||||
The easiest way to install Nix is to run the following command:
|
||||
|
||||
```console
|
||||
$ sh <(curl -L https://nixos.org/nix/install)
|
||||
```
|
||||
|
||||
If you're using macOS 10.15 (Catalina) or newer, consult [the macOS
|
||||
installation instructions](#macos-installation) before installing.
|
||||
This will run the installer interactively (causing it to explain what
|
||||
it is doing more explicitly), and perform the default "type" of install
|
||||
for your platform:
|
||||
- single-user on Linux
|
||||
- multi-user on macOS
|
||||
|
||||
As of Nix 2.1.0, the Nix installer will always default to creating a
|
||||
single-user installation, however opting in to the multi-user
|
||||
installation is highly recommended.
|
||||
> **Notes on read-only filesystem root in macOS 10.15 Catalina +**
|
||||
>
|
||||
> - It took some time to support this cleanly. You may see posts,
|
||||
> examples, and tutorials using obsolete workarounds.
|
||||
> - Supporting it cleanly made macOS installs too complex to qualify
|
||||
> as single-user, so this type is no longer supported on macOS.
|
||||
|
||||
We recommend the multi-user install if it supports your platform and
|
||||
you can authenticate with `sudo`.
|
||||
|
||||
# Single User Installation
|
||||
|
||||
|
@ -50,9 +58,9 @@ $ rm -rf /nix
|
|||
The multi-user Nix installation creates system users, and a system
|
||||
service for the Nix daemon.
|
||||
|
||||
- Linux running systemd, with SELinux disabled
|
||||
|
||||
- macOS
|
||||
**Supported Systems**
|
||||
- Linux running systemd, with SELinux disabled
|
||||
- macOS
|
||||
|
||||
You can instruct the installer to perform a multi-user installation on
|
||||
your system:
|
||||
|
@ -96,165 +104,28 @@ sudo rm /Library/LaunchDaemons/org.nixos.nix-daemon.plist
|
|||
There may also be references to Nix in `/etc/profile`, `/etc/bashrc`,
|
||||
and `/etc/zshrc` which you may remove.
|
||||
|
||||
# macOS Installation
|
||||
# macOS Installation <a name="sect-macos-installation-change-store-prefix"></a><a name="sect-macos-installation-encrypted-volume"></a><a name="sect-macos-installation-symlink"></a><a name="sect-macos-installation-recommended-notes"></a>
|
||||
<!-- Note: anchors above to catch permalinks to old explanations -->
|
||||
|
||||
Starting with macOS 10.15 (Catalina), the root filesystem is read-only.
|
||||
This means `/nix` can no longer live on your system volume, and that
|
||||
you'll need a workaround to install Nix.
|
||||
We believe we have ironed out how to cleanly support the read-only root
|
||||
on modern macOS. New installs will do this automatically, and you can
|
||||
also re-run a new installer to convert your existing setup.
|
||||
|
||||
The recommended approach, which creates an unencrypted APFS volume for
|
||||
your Nix store and a "synthetic" empty directory to mount it over at
|
||||
`/nix`, is least likely to impair Nix or your system.
|
||||
This section previously detailed the situation, options, and trade-offs,
|
||||
but it now only outlines what the installer does. You don't need to know
|
||||
this to run the installer, but it may help if you run into trouble:
|
||||
|
||||
> **Note**
|
||||
>
|
||||
> With all separate-volume approaches, it's possible something on your
|
||||
> system (particularly daemons/services and restored apps) may need
|
||||
> access to your Nix store before the volume is mounted. Adding
|
||||
> additional encryption makes this more likely.
|
||||
|
||||
If you're using a recent Mac with a [T2
|
||||
chip](https://www.apple.com/euro/mac/shared/docs/Apple_T2_Security_Chip_Overview.pdf),
|
||||
your drive will still be encrypted at rest (in which case "unencrypted"
|
||||
is a bit of a misnomer). To use this approach, just install Nix with:
|
||||
|
||||
```console
|
||||
$ sh <(curl -L https://nixos.org/nix/install) --darwin-use-unencrypted-nix-store-volume
|
||||
```
|
||||
|
||||
If you don't like the sound of this, you'll want to weigh the other
|
||||
approaches and tradeoffs detailed in this section.
|
||||
|
||||
> **Note**
|
||||
>
|
||||
> All of the known workarounds have drawbacks, but we hope better
|
||||
> solutions will be available in the future. Some that we have our eye
|
||||
> on are:
|
||||
>
|
||||
> 1. A true firmlink would enable the Nix store to live on the primary
|
||||
> data volume without the build problems caused by the symlink
|
||||
> approach. End users cannot currently create true firmlinks.
|
||||
>
|
||||
> 2. If the Nix store volume shared FileVault encryption with the
|
||||
> primary data volume (probably by using the same volume group and
|
||||
> role), FileVault encryption could be easily supported by the
|
||||
> installer without requiring manual setup by each user.
|
||||
|
||||
## Change the Nix store path prefix
|
||||
|
||||
Changing the default prefix for the Nix store is a simple approach which
|
||||
enables you to leave it on your root volume, where it can take full
|
||||
advantage of FileVault encryption if enabled. Unfortunately, this
|
||||
approach also opts your device out of some benefits that are enabled by
|
||||
using the same prefix across systems:
|
||||
|
||||
- Your system won't be able to take advantage of the binary cache
|
||||
(unless someone is able to stand up and support duplicate caching
|
||||
infrastructure), which means you'll spend more time waiting for
|
||||
builds.
|
||||
|
||||
- It's harder to build and deploy packages to Linux systems.
|
||||
|
||||
It would also possible (and often requested) to just apply this change
|
||||
ecosystem-wide, but it's an intrusive process that has side effects we
|
||||
want to avoid for now.
|
||||
|
||||
## Use a separate encrypted volume
|
||||
|
||||
If you like, you can also add encryption to the recommended approach
|
||||
taken by the installer. You can do this by pre-creating an encrypted
|
||||
volume before you run the installer--or you can run the installer and
|
||||
encrypt the volume it creates later.
|
||||
|
||||
In either case, adding encryption to a second volume isn't quite as
|
||||
simple as enabling FileVault for your boot volume. Before you dive in,
|
||||
there are a few things to weigh:
|
||||
|
||||
1. The additional volume won't be encrypted with your existing
|
||||
FileVault key, so you'll need another mechanism to decrypt the
|
||||
volume.
|
||||
|
||||
2. You can store the password in Keychain to automatically decrypt the
|
||||
volume on boot--but it'll have to wait on Keychain and may not mount
|
||||
before your GUI apps restore. If any of your launchd agents or apps
|
||||
depend on Nix-installed software (for example, if you use a
|
||||
Nix-installed login shell), the restore may fail or break.
|
||||
|
||||
On a case-by-case basis, you may be able to work around this problem
|
||||
by using `wait4path` to block execution until your executable is
|
||||
available.
|
||||
|
||||
It's also possible to decrypt and mount the volume earlier with a
|
||||
login hook--but this mechanism appears to be deprecated and its
|
||||
future is unclear.
|
||||
|
||||
3. You can hard-code the password in the clear, so that your store
|
||||
volume can be decrypted before Keychain is available.
|
||||
|
||||
If you are comfortable navigating these tradeoffs, you can encrypt the
|
||||
volume with something along the lines of:
|
||||
|
||||
```console
|
||||
$ diskutil apfs enableFileVault /nix -user disk
|
||||
```
|
||||
|
||||
## Symlink the Nix store to a custom location
|
||||
|
||||
Another simple approach is using `/etc/synthetic.conf` to symlink the
|
||||
Nix store to the data volume. This option also enables your store to
|
||||
share any configured FileVault encryption. Unfortunately, builds that
|
||||
resolve the symlink may leak the canonical path or even fail.
|
||||
|
||||
Because of these downsides, we can't recommend this approach.
|
||||
|
||||
## Notes on the recommended approach
|
||||
|
||||
This section goes into a little more detail on the recommended approach.
|
||||
You don't need to understand it to run the installer, but it can serve
|
||||
as a helpful reference if you run into trouble.
|
||||
|
||||
1. In order to compose user-writable locations into the new read-only
|
||||
system root, Apple introduced a new concept called `firmlinks`,
|
||||
which it describes as a "bi-directional wormhole" between two
|
||||
filesystems. You can see the current firmlinks in
|
||||
`/usr/share/firmlinks`. Unfortunately, firmlinks aren't (currently?)
|
||||
user-configurable.
|
||||
|
||||
For special cases like NFS mount points or package manager roots,
|
||||
[synthetic.conf(5)](https://developer.apple.com/library/archive/documentation/System/Conceptual/ManPages_iPhoneOS/man5/synthetic.conf.5.html)
|
||||
supports limited user-controlled file-creation (of symlinks, and
|
||||
synthetic empty directories) at `/`. To create a synthetic empty
|
||||
directory for mounting at `/nix`, add the following line to
|
||||
`/etc/synthetic.conf` (create it if necessary):
|
||||
|
||||
nix
|
||||
|
||||
2. This configuration is applied at boot time, but you can use
|
||||
`apfs.util` to trigger creation (not deletion) of new entries
|
||||
without a reboot:
|
||||
|
||||
```console
|
||||
$ /System/Library/Filesystems/apfs.fs/Contents/Resources/apfs.util -B
|
||||
```
|
||||
|
||||
3. Create the new APFS volume with diskutil:
|
||||
|
||||
```console
|
||||
$ sudo diskutil apfs addVolume diskX APFS 'Nix Store' -mountpoint /nix
|
||||
```
|
||||
|
||||
4. Using `vifs`, add the new mount to `/etc/fstab`. If it doesn't
|
||||
already have other entries, it should look something like:
|
||||
|
||||
#
|
||||
# Warning - this file should only be modified with vifs(8)
|
||||
#
|
||||
# Failure to do so is unsupported and may be destructive.
|
||||
#
|
||||
LABEL=Nix\040Store /nix apfs rw,nobrowse
|
||||
|
||||
The nobrowse setting will keep Spotlight from indexing this volume,
|
||||
and keep it from showing up on your desktop.
|
||||
- create a new APFS volume for your Nix store
|
||||
- update `/etc/synthetic.conf` to direct macOS to create a "synthetic"
|
||||
empty root directory to mount your volume
|
||||
- specify mount options for the volume in `/etc/fstab`
|
||||
- if you have FileVault enabled
|
||||
- generate an encryption password
|
||||
- put it in your system Keychain
|
||||
- use it to encrypt the volume
|
||||
- create a system LaunchDaemon to mount this volume early enough in the
|
||||
boot process to avoid problems loading or restoring any programs that
|
||||
need access to your Nix store
|
||||
|
||||
# Installing a pinned Nix version from a URL
|
||||
|
||||
|
|
59
doc/manual/src/installation/installing-docker.md
Normal file
59
doc/manual/src/installation/installing-docker.md
Normal file
|
@ -0,0 +1,59 @@
|
|||
# Using Nix within Docker
|
||||
|
||||
To run the latest stable release of Nix with Docker run the following command:
|
||||
|
||||
```console
|
||||
$ docker -ti run nixos/nix
|
||||
Unable to find image 'nixos/nix:latest' locally
|
||||
latest: Pulling from nixos/nix
|
||||
5843afab3874: Pull complete
|
||||
b52bf13f109c: Pull complete
|
||||
1e2415612aa3: Pull complete
|
||||
Digest: sha256:27f6e7f60227e959ee7ece361f75d4844a40e1cc6878b6868fe30140420031ff
|
||||
Status: Downloaded newer image for nixos/nix:latest
|
||||
35ca4ada6e96:/# nix --version
|
||||
nix (Nix) 2.3.12
|
||||
35ca4ada6e96:/# exit
|
||||
```
|
||||
|
||||
# What is included in Nix' Docker image?
|
||||
|
||||
The official Docker image is created using `pkgs.dockerTools.buildLayeredImage`
|
||||
(and not with `Dockerfile` as it is usual with Docker images). You can still
|
||||
base your custom Docker image on it as you would do with any other Docker
|
||||
image.
|
||||
|
||||
The Docker image is also not based on any other image and includes minimal set
|
||||
of runtime dependencies that are required to use Nix:
|
||||
|
||||
- pkgs.nix
|
||||
- pkgs.bashInteractive
|
||||
- pkgs.coreutils-full
|
||||
- pkgs.gnutar
|
||||
- pkgs.gzip
|
||||
- pkgs.gnugrep
|
||||
- pkgs.which
|
||||
- pkgs.curl
|
||||
- pkgs.less
|
||||
- pkgs.wget
|
||||
- pkgs.man
|
||||
- pkgs.cacert.out
|
||||
- pkgs.findutils
|
||||
|
||||
# Docker image with the latest development version of Nix
|
||||
|
||||
To get the latest image that was built by [Hydra](https://hydra.nixos.org) run
|
||||
the following command:
|
||||
|
||||
```console
|
||||
$ curl -L https://hydra.nixos.org/job/nix/master/dockerImage.x86_64-linux/latest/download/1 | docker load
|
||||
$ docker run -ti nix:2.5pre20211105
|
||||
```
|
||||
|
||||
You can also build a Docker image from source yourself:
|
||||
|
||||
```console
|
||||
$ nix build ./\#hydraJobs.dockerImage.x86_64-linux
|
||||
$ docker load -i ./result
|
||||
$ docker run -ti nix:2.5pre20211105
|
||||
```
|
|
@ -1,4 +1,4 @@
|
|||
# Installing Nix from Source
|
||||
|
||||
If no binary package is available, you can download and compile a source
|
||||
distribution.
|
||||
If no binary package is available or if you want to hack on Nix, you
|
||||
can build Nix from its Git repository.
|
||||
|
|
|
@ -1,14 +1,9 @@
|
|||
# Obtaining a Source Distribution
|
||||
# Obtaining the Source
|
||||
|
||||
The source tarball of the most recent stable release can be downloaded
|
||||
from the [Nix homepage](http://nixos.org/nix/download.html). You can
|
||||
also grab the [most recent development
|
||||
release](http://hydra.nixos.org/job/nix/master/release/latest-finished#tabs-constituents).
|
||||
|
||||
Alternatively, the most recent sources of Nix can be obtained from its
|
||||
[Git repository](https://github.com/NixOS/nix). For example, the
|
||||
following command will check out the latest revision into a directory
|
||||
called `nix`:
|
||||
The most recent sources of Nix can be obtained from its [Git
|
||||
repository](https://github.com/NixOS/nix). For example, the following
|
||||
command will check out the latest revision into a directory called
|
||||
`nix`:
|
||||
|
||||
```console
|
||||
$ git clone https://github.com/NixOS/nix
|
||||
|
|
|
@ -2,9 +2,8 @@
|
|||
|
||||
- GNU Autoconf (<https://www.gnu.org/software/autoconf/>) and the
|
||||
autoconf-archive macro collection
|
||||
(<https://www.gnu.org/software/autoconf-archive/>). These are only
|
||||
needed to run the bootstrap script, and are not necessary if your
|
||||
source distribution came with a pre-built `./configure` script.
|
||||
(<https://www.gnu.org/software/autoconf-archive/>). These are
|
||||
needed to run the bootstrap script.
|
||||
|
||||
- GNU Make.
|
||||
|
||||
|
@ -26,15 +25,6 @@
|
|||
available for download from the official repository
|
||||
<https://github.com/google/brotli>.
|
||||
|
||||
- The bzip2 compressor program and the `libbz2` library. Thus you must
|
||||
have bzip2 installed, including development headers and libraries.
|
||||
If your distribution does not provide these, you can obtain bzip2
|
||||
from
|
||||
<https://sourceware.org/bzip2/>.
|
||||
|
||||
- `liblzma`, which is provided by XZ Utils. If your distribution does
|
||||
not provide this, you can get it from <https://tukaani.org/xz/>.
|
||||
|
||||
- cURL and its library. If your distribution does not provide it, you
|
||||
can get it from <https://curl.haxx.se/>.
|
||||
|
||||
|
@ -61,8 +51,7 @@
|
|||
you need version 2.5.35, which is available on
|
||||
[SourceForge](http://lex.sourceforge.net/). Slightly older versions
|
||||
may also work, but ancient versions like the ubiquitous 2.5.4a
|
||||
won't. Note that these are only required if you modify the parser or
|
||||
when you are building from the Git repository.
|
||||
won't.
|
||||
|
||||
- The `libseccomp` is used to provide syscall filtering on Linux. This
|
||||
is an optional dependency and can be disabled passing a
|
||||
|
|
|
@ -76,7 +76,7 @@ there after an upgrade. This means that you can _roll back_ to the
|
|||
old version:
|
||||
|
||||
```console
|
||||
$ nix-env --upgrade some-packages
|
||||
$ nix-env --upgrade -A nixpkgs.some-package
|
||||
$ nix-env --rollback
|
||||
```
|
||||
|
||||
|
@ -122,12 +122,12 @@ Nix expressions generally describe how to build a package from
|
|||
source, so an installation action like
|
||||
|
||||
```console
|
||||
$ nix-env --install firefox
|
||||
$ nix-env --install -A nixpkgs.firefox
|
||||
```
|
||||
|
||||
_could_ cause quite a bit of build activity, as not only Firefox but
|
||||
also all its dependencies (all the way up to the C library and the
|
||||
compiler) would have to built, at least if they are not already in the
|
||||
compiler) would have to be built, at least if they are not already in the
|
||||
Nix store. This is a _source deployment model_. For most users,
|
||||
building from source is not very pleasant as it takes far too long.
|
||||
However, Nix can automatically skip building from source and instead
|
||||
|
|
|
@ -24,7 +24,7 @@ collection; you could write your own Nix expressions based on Nixpkgs,
|
|||
or completely new ones.)
|
||||
|
||||
You can manually download the latest version of Nixpkgs from
|
||||
<http://nixos.org/nixpkgs/download.html>. However, it’s much more
|
||||
<https://github.com/NixOS/nixpkgs>. However, it’s much more
|
||||
convenient to use the Nixpkgs [*channel*](channels.md), since it makes
|
||||
it easy to stay up to date with new versions of Nixpkgs. Nixpkgs is
|
||||
automatically added to your list of “subscribed” channels when you
|
||||
|
@ -47,41 +47,45 @@ $ nix-channel --update
|
|||
You can view the set of available packages in Nixpkgs:
|
||||
|
||||
```console
|
||||
$ nix-env -qa
|
||||
aterm-2.2
|
||||
bash-3.0
|
||||
binutils-2.15
|
||||
bison-1.875d
|
||||
blackdown-1.4.2
|
||||
bzip2-1.0.2
|
||||
$ nix-env -qaP
|
||||
nixpkgs.aterm aterm-2.2
|
||||
nixpkgs.bash bash-3.0
|
||||
nixpkgs.binutils binutils-2.15
|
||||
nixpkgs.bison bison-1.875d
|
||||
nixpkgs.blackdown blackdown-1.4.2
|
||||
nixpkgs.bzip2 bzip2-1.0.2
|
||||
…
|
||||
```
|
||||
|
||||
The flag `-q` specifies a query operation, and `-a` means that you want
|
||||
The flag `-q` specifies a query operation, `-a` means that you want
|
||||
to show the “available” (i.e., installable) packages, as opposed to the
|
||||
installed packages. If you downloaded Nixpkgs yourself, or if you
|
||||
checked it out from GitHub, then you need to pass the path to your
|
||||
Nixpkgs tree using the `-f` flag:
|
||||
installed packages, and `-P` prints the attribute paths that can be used
|
||||
to unambiguously select a package for installation (listed in the first column).
|
||||
If you downloaded Nixpkgs yourself, or if you checked it out from GitHub,
|
||||
then you need to pass the path to your Nixpkgs tree using the `-f` flag:
|
||||
|
||||
```console
|
||||
$ nix-env -qaf /path/to/nixpkgs
|
||||
$ nix-env -qaPf /path/to/nixpkgs
|
||||
aterm aterm-2.2
|
||||
bash bash-3.0
|
||||
…
|
||||
```
|
||||
|
||||
where */path/to/nixpkgs* is where you’ve unpacked or checked out
|
||||
Nixpkgs.
|
||||
|
||||
You can select specific packages by name:
|
||||
You can filter the packages by name:
|
||||
|
||||
```console
|
||||
$ nix-env -qa firefox
|
||||
firefox-34.0.5
|
||||
firefox-with-plugins-34.0.5
|
||||
$ nix-env -qaP firefox
|
||||
nixpkgs.firefox-esr firefox-91.3.0esr
|
||||
nixpkgs.firefox firefox-94.0.1
|
||||
```
|
||||
|
||||
and using regular expressions:
|
||||
|
||||
```console
|
||||
$ nix-env -qa 'firefox.*'
|
||||
$ nix-env -qaP 'firefox.*'
|
||||
```
|
||||
|
||||
It is also possible to see the *status* of available packages, i.e.,
|
||||
|
@ -89,11 +93,11 @@ whether they are installed into the user environment and/or present in
|
|||
the system:
|
||||
|
||||
```console
|
||||
$ nix-env -qas
|
||||
$ nix-env -qaPs
|
||||
…
|
||||
-PS bash-3.0
|
||||
--S binutils-2.15
|
||||
IPS bison-1.875d
|
||||
-PS nixpkgs.bash bash-3.0
|
||||
--S nixpkgs.binutils binutils-2.15
|
||||
IPS nixpkgs.bison bison-1.875d
|
||||
…
|
||||
```
|
||||
|
||||
|
@ -106,13 +110,13 @@ which is Nix’s mechanism for doing binary deployment. It just means that
|
|||
Nix knows that it can fetch a pre-built package from somewhere
|
||||
(typically a network server) instead of building it locally.
|
||||
|
||||
You can install a package using `nix-env -i`. For instance,
|
||||
You can install a package using `nix-env -iA`. For instance,
|
||||
|
||||
```console
|
||||
$ nix-env -i subversion
|
||||
$ nix-env -iA nixpkgs.subversion
|
||||
```
|
||||
|
||||
will install the package called `subversion` (which is, of course, the
|
||||
will install the package called `subversion` from `nixpkgs` channel (which is, of course, the
|
||||
[Subversion version management system](http://subversion.tigris.org/)).
|
||||
|
||||
> **Note**
|
||||
|
@ -122,7 +126,7 @@ will install the package called `subversion` (which is, of course, the
|
|||
> binary cache <https://cache.nixos.org>; it contains binaries for most
|
||||
> packages in Nixpkgs. Only if no binary is available in the binary
|
||||
> cache, Nix will build the package from source. So if `nix-env
|
||||
> -i subversion` results in Nix building stuff from source, then either
|
||||
> -iA nixpkgs.subversion` results in Nix building stuff from source, then either
|
||||
> the package is not built for your platform by the Nixpkgs build
|
||||
> servers, or your version of Nixpkgs is too old or too new. For
|
||||
> instance, if you have a very recent checkout of Nixpkgs, then the
|
||||
|
@ -133,7 +137,10 @@ will install the package called `subversion` (which is, of course, the
|
|||
> using a Git checkout of the Nixpkgs tree), you will get binaries for
|
||||
> most packages.
|
||||
|
||||
Naturally, packages can also be uninstalled:
|
||||
Naturally, packages can also be uninstalled. Unlike when installing, you will
|
||||
need to use the derivation name (though the version part can be omitted),
|
||||
instead of the attribute path, as `nix-env` does not record which attribute
|
||||
was used for installing:
|
||||
|
||||
```console
|
||||
$ nix-env -e subversion
|
||||
|
@ -143,7 +150,7 @@ Upgrading to a new version is just as easy. If you have a new release of
|
|||
Nix Packages, you can do:
|
||||
|
||||
```console
|
||||
$ nix-env -u subversion
|
||||
$ nix-env -uA nixpkgs.subversion
|
||||
```
|
||||
|
||||
This will *only* upgrade Subversion if there is a “newer” version in the
|
||||
|
|
|
@ -9,7 +9,7 @@ The daemon that handles binary cache requests via HTTP, `nix-serve`, is
|
|||
not part of the Nix distribution, but you can install it from Nixpkgs:
|
||||
|
||||
```console
|
||||
$ nix-env -i nix-serve
|
||||
$ nix-env -iA nixpkgs.nix-serve
|
||||
```
|
||||
|
||||
You can then start the server, listening for HTTP connections on
|
||||
|
@ -35,7 +35,7 @@ On the client side, you can tell Nix to use your binary cache using
|
|||
`--option extra-binary-caches`, e.g.:
|
||||
|
||||
```console
|
||||
$ nix-env -i firefox --option extra-binary-caches http://avalon:8080/
|
||||
$ nix-env -iA nixpkgs.firefox --option extra-binary-caches http://avalon:8080/
|
||||
```
|
||||
|
||||
The option `extra-binary-caches` tells Nix to use this binary cache in
|
||||
|
|
|
@ -44,7 +44,7 @@ collector as follows:
|
|||
$ nix-store --gc
|
||||
```
|
||||
|
||||
The behaviour of the gargage collector is affected by the
|
||||
The behaviour of the garbage collector is affected by the
|
||||
`keep-derivations` (default: true) and `keep-outputs` (default: false)
|
||||
options in the Nix configuration file. The defaults will ensure that all
|
||||
derivations that are build-time dependencies of garbage collector roots
|
||||
|
|
|
@ -39,7 +39,7 @@ just Subversion 1.1.2 (arrows in the figure indicate symlinks). This
|
|||
would be what we would obtain if we had done
|
||||
|
||||
```console
|
||||
$ nix-env -i subversion
|
||||
$ nix-env -iA nixpkgs.subversion
|
||||
```
|
||||
|
||||
on a set of Nix expressions that contained Subversion 1.1.2.
|
||||
|
@ -54,7 +54,7 @@ environment is generated based on the current one. For instance,
|
|||
generation 43 was created from generation 42 when we did
|
||||
|
||||
```console
|
||||
$ nix-env -i subversion firefox
|
||||
$ nix-env -iA nixpkgs.subversion nixpkgs.firefox
|
||||
```
|
||||
|
||||
on a set of Nix expressions that contained Firefox and a new version of
|
||||
|
@ -127,7 +127,7 @@ All `nix-env` operations work on the profile pointed to by
|
|||
(abbreviation `-p`):
|
||||
|
||||
```console
|
||||
$ nix-env -p /nix/var/nix/profiles/other-profile -i subversion
|
||||
$ nix-env -p /nix/var/nix/profiles/other-profile -iA nixpkgs.subversion
|
||||
```
|
||||
|
||||
This will *not* change the `~/.nix-profile` symlink.
|
||||
|
|
|
@ -6,7 +6,7 @@ automatically fetching any store paths in Firefox’s closure if they are
|
|||
available on the server `avalon`:
|
||||
|
||||
```console
|
||||
$ nix-env -i firefox --substituters ssh://alice@avalon
|
||||
$ nix-env -iA nixpkgs.firefox --substituters ssh://alice@avalon
|
||||
```
|
||||
|
||||
This works similar to the binary cache substituter that Nix usually
|
||||
|
|
|
@ -19,19 +19,19 @@ to subsequent chapters.
|
|||
channel:
|
||||
|
||||
```console
|
||||
$ nix-env -qa
|
||||
docbook-xml-4.3
|
||||
docbook-xml-4.5
|
||||
firefox-33.0.2
|
||||
hello-2.9
|
||||
libxslt-1.1.28
|
||||
$ nix-env -qaP
|
||||
nixpkgs.docbook_xml_dtd_43 docbook-xml-4.3
|
||||
nixpkgs.docbook_xml_dtd_45 docbook-xml-4.5
|
||||
nixpkgs.firefox firefox-33.0.2
|
||||
nixpkgs.hello hello-2.9
|
||||
nixpkgs.libxslt libxslt-1.1.28
|
||||
…
|
||||
```
|
||||
|
||||
1. Install some packages from the channel:
|
||||
|
||||
```console
|
||||
$ nix-env -i hello
|
||||
$ nix-env -iA nixpkgs.hello
|
||||
```
|
||||
|
||||
This should download pre-built packages; it should not build them
|
||||
|
|
|
@ -1,8 +1,539 @@
|
|||
# Release 2.4 (202X-XX-XX)
|
||||
# Release 2.4 (2021-11-01)
|
||||
|
||||
- It is now an error to modify the `plugin-files` setting via a
|
||||
command-line flag that appears after the first non-flag argument
|
||||
to any command, including a subcommand to `nix`. For example,
|
||||
`nix-instantiate default.nix --plugin-files ""` must now become
|
||||
`nix-instantiate --plugin-files "" default.nix`.
|
||||
- Plugins that add new `nix` subcommands are now actually respected.
|
||||
This is the first release in more than two years and is the result of
|
||||
more than 2800 commits from 195 contributors since release 2.3.
|
||||
|
||||
## Highlights
|
||||
|
||||
* Nix's **error messages** have been improved a lot. For instance,
|
||||
evaluation errors now point out the location of the error:
|
||||
|
||||
```
|
||||
$ nix build
|
||||
error: undefined variable 'bzip3'
|
||||
|
||||
at /nix/store/449lv242z0zsgwv95a8124xi11sp419f-source/flake.nix:88:13:
|
||||
|
||||
87| [ curl
|
||||
88| bzip3 xz brotli editline
|
||||
| ^
|
||||
89| openssl sqlite
|
||||
```
|
||||
|
||||
* The **`nix` command** has seen a lot of work and is now almost at
|
||||
feature parity with the old command-line interface (the `nix-*`
|
||||
commands). It aims to be [more modern, consistent and pleasant to
|
||||
use](../contributing/cli-guideline.md) than the old CLI. It is still
|
||||
marked as experimental but its interface should not change much
|
||||
anymore in future releases.
|
||||
|
||||
* **Flakes** are a new format to package Nix-based projects in a more
|
||||
discoverable, composable, consistent and reproducible way. A flake
|
||||
is just a repository or tarball containing a file named `flake.nix`
|
||||
that specifies dependencies on other flakes and returns any Nix
|
||||
assets such as packages, Nixpkgs overlays, NixOS modules or CI
|
||||
tests. The new `nix` CLI is primarily based around flakes; for
|
||||
example, a command like `nix run nixpkgs#hello` runs the `hello`
|
||||
application from the `nixpkgs` flake.
|
||||
|
||||
Flakes are currently marked as experimental. For an introduction,
|
||||
see [this blog
|
||||
post](https://www.tweag.io/blog/2020-05-25-flakes/). For detailed
|
||||
information about flake syntax and semantics, see the [`nix flake`
|
||||
manual page](../command-ref/new-cli/nix3-flake.md).
|
||||
|
||||
* Nix's store can now be **content-addressed**, meaning that the hash
|
||||
component of a store path is the hash of the path's
|
||||
contents. Previously Nix could only build **input-addressed** store
|
||||
paths, where the hash is computed from the derivation dependency
|
||||
graph. Content-addressing allows deduplication, early cutoff in
|
||||
build systems, and unprivileged closure copying. This is still [an
|
||||
experimental
|
||||
feature](https://discourse.nixos.org/t/content-addressed-nix-call-for-testers/12881).
|
||||
|
||||
* The Nix manual has been converted into Markdown, making it easier to
|
||||
contribute. In addition, every `nix` subcommand now has a manual
|
||||
page, documenting every option.
|
||||
|
||||
* A new setting that allows **experimental features** to be enabled
|
||||
selectively. This allows us to merge unstable features into Nix more
|
||||
quickly and do more frequent releases.
|
||||
|
||||
## Other features
|
||||
|
||||
* There are many new `nix` subcommands:
|
||||
|
||||
- `nix develop` is intended to replace `nix-shell`. It has a number
|
||||
of new features:
|
||||
|
||||
* It automatically sets the output environment variables (such as
|
||||
`$out`) to writable locations (such as `./outputs/out`).
|
||||
|
||||
* It can store the environment in a profile. This is useful for
|
||||
offline work.
|
||||
|
||||
* It can run specific phases directly. For instance, `nix develop
|
||||
--build` runs `buildPhase`.
|
||||
|
||||
- It allows dependencies in the Nix store to be "redirected" to
|
||||
arbitrary directories using the `--redirect` flag. This is
|
||||
useful if you want to hack on a package *and* some of its
|
||||
dependencies at the same time.
|
||||
|
||||
- `nix print-dev-env` prints the environment variables and bash
|
||||
functions defined by a derivation. This is useful for users of
|
||||
other shells than bash (especially with `--json`).
|
||||
|
||||
- `nix shell` was previously named `nix run` and is intended to
|
||||
replace `nix-shell -p`, but without the `stdenv` overhead. It
|
||||
simply starts a shell where some packages have been added to
|
||||
`$PATH`.
|
||||
|
||||
- `nix run` (not to be confused with the old subcommand that has
|
||||
been renamed to `nix shell`) runs an "app", a flake output that
|
||||
specifies a command to run, or an eponymous program from a
|
||||
package. For example, `nix run nixpkgs#hello` runs the `hello`
|
||||
program from the `hello` package in `nixpkgs`.
|
||||
|
||||
- `nix flake` is the container for flake-related operations, such as
|
||||
creating a new flake, querying the contents of a flake or updating
|
||||
flake lock files.
|
||||
|
||||
- `nix registry` allows you to query and update the flake registry,
|
||||
which maps identifiers such as `nixpkgs` to concrete flake URLs.
|
||||
|
||||
- `nix profile` is intended to replace `nix-env`. Its main advantage
|
||||
is that it keeps track of the provenance of installed packages
|
||||
(e.g. exactly which flake version a package came from). It also
|
||||
has some helpful subcommands:
|
||||
|
||||
* `nix profile history` shows what packages were added, upgraded
|
||||
or removed between each version of a profile.
|
||||
|
||||
* `nix profile diff-closures` shows the changes between the
|
||||
closures of each version of a profile. This allows you to
|
||||
discover the addition or removal of dependencies or size
|
||||
changes.
|
||||
|
||||
**Warning**: after a profile has been updated using `nix profile`,
|
||||
it is no longer usable with `nix-env`.
|
||||
|
||||
- `nix store diff-closures` shows the differences between the
|
||||
closures of two store paths in terms of the versions and sizes of
|
||||
dependencies in the closures.
|
||||
|
||||
- `nix store make-content-addressable` rewrites an arbitrary closure
|
||||
to make it content-addressed. Such paths can be copied into other
|
||||
stores without requiring signatures.
|
||||
|
||||
- `nix bundle` uses the [`nix-bundle`
|
||||
program](https://github.com/matthewbauer/nix-bundle) to convert a
|
||||
closure into a self-extracting executable.
|
||||
|
||||
- Various other replacements for the old CLI, e.g. `nix store gc`,
|
||||
`nix store delete`, `nix store repair`, `nix nar dump-path`, `nix
|
||||
store prefetch-file`, `nix store prefetch-tarball`, `nix key` and
|
||||
`nix daemon`.
|
||||
|
||||
* Nix now has an **evaluation cache** for flake outputs. For example,
|
||||
a second invocation of the command `nix run nixpkgs#firefox` will
|
||||
not need to evaluate the `firefox` attribute because it's already in
|
||||
the evaluation cache. This is made possible by the hermetic
|
||||
evaluation model of flakes.
|
||||
|
||||
* The new `--offline` flag disables substituters and causes all
|
||||
locally cached tarballs and repositories to be considered
|
||||
up-to-date.
|
||||
|
||||
* The new `--refresh` flag causes all locally cached tarballs and
|
||||
repositories to be considered out-of-date.
|
||||
|
||||
* Many `nix` subcommands now have a `--json` option to produce
|
||||
machine-readable output.
|
||||
|
||||
* `nix repl` has a new `:doc` command to show documentation about
|
||||
builtin functions (e.g. `:doc builtins.map`).
|
||||
|
||||
* Binary cache stores now have an option `index-debug-info` to create
|
||||
an index of DWARF debuginfo files for use by
|
||||
[`dwarffs`](https://github.com/edolstra/dwarffs).
|
||||
|
||||
* To support flakes, Nix now has an extensible mechanism for fetching
|
||||
source trees. Currently it has the following backends:
|
||||
|
||||
* Git repositories
|
||||
|
||||
* Mercurial repositories
|
||||
|
||||
* GitHub and GitLab repositories (an optimisation for faster
|
||||
fetching than Git)
|
||||
|
||||
* Tarballs
|
||||
|
||||
* Arbitrary directories
|
||||
|
||||
The fetcher infrastructure is exposed via flake input specifications
|
||||
and via the `fetchTree` built-in.
|
||||
|
||||
* **Languages changes**: the only new language feature is that you can
|
||||
now have antiquotations in paths, e.g. `./${foo}` instead of `./. +
|
||||
foo`.
|
||||
|
||||
* **New built-in functions**:
|
||||
|
||||
- `builtins.fetchTree` allows fetching a source tree using any
|
||||
backends supported by the fetcher infrastructure. It subsumes the
|
||||
functionality of existing built-ins like `fetchGit`,
|
||||
`fetchMercurial` and `fetchTarball`.
|
||||
|
||||
- `builtins.getFlake` fetches a flake and returns its output
|
||||
attributes. This function should not be used inside flakes! Use
|
||||
flake inputs instead.
|
||||
|
||||
- `builtins.floor` and `builtins.ceil` round a floating-point number
|
||||
down and up, respectively.
|
||||
|
||||
* Experimental support for recursive Nix. This means that Nix
|
||||
derivations can now call Nix to build other derivations. This is not
|
||||
in a stable state yet and not well
|
||||
[documented](https://github.com/NixOS/nix/commit/c4d7c76b641d82b2696fef73ce0ac160043c18da).
|
||||
|
||||
* The new experimental feature `no-url-literals` disables URL
|
||||
literals. This helps to implement [RFC
|
||||
45](https://github.com/NixOS/rfcs/pull/45).
|
||||
|
||||
* Nix now uses `libarchive` to decompress and unpack tarballs and zip
|
||||
files, so `tar` is no longer required.
|
||||
|
||||
* The priority of substituters can now be overridden using the
|
||||
`priority` substituter setting (e.g. `--substituters
|
||||
'http://cache.nixos.org?priority=100 daemon?priority=10'`).
|
||||
|
||||
* `nix edit` now supports non-derivation attributes, e.g. `nix edit
|
||||
.#nixosConfigurations.bla`.
|
||||
|
||||
* The `nix` command now provides command line completion for `bash`,
|
||||
`zsh` and `fish`. Since the support for getting completions is built
|
||||
into `nix`, it's easy to add support for other shells.
|
||||
|
||||
* The new `--log-format` flag selects what Nix's output looks like. It
|
||||
defaults to a terse progress indicator. There is a new
|
||||
`internal-json` output format for use by other programs.
|
||||
|
||||
* `nix eval` has a new `--apply` flag that applies a function to the
|
||||
evaluation result.
|
||||
|
||||
* `nix eval` has a new `--write-to` flag that allows it to write a
|
||||
nested attribute set of string leaves to a corresponding directory
|
||||
tree.
|
||||
|
||||
* Memory improvements: many operations that add paths to the store or
|
||||
copy paths between stores now run in constant memory.
|
||||
|
||||
* Many `nix` commands now support the flag `--derivation` to operate
|
||||
on a `.drv` file itself instead of its outputs.
|
||||
|
||||
* There is a new store called `dummy://` that does not support
|
||||
building or adding paths. This is useful if you want to use the Nix
|
||||
evaluator but don't have a Nix store.
|
||||
|
||||
* The `ssh-ng://` store now allows substituting paths on the remote,
|
||||
as `ssh://` already did.
|
||||
|
||||
* When auto-calling a function with an ellipsis, all arguments are now
|
||||
passed.
|
||||
|
||||
* New `nix-shell` features:
|
||||
|
||||
- It preserves the `PS1` environment variable if
|
||||
`NIX_SHELL_PRESERVE_PROMPT` is set.
|
||||
|
||||
- With `-p`, it passes any `--arg`s as Nixpkgs arguments.
|
||||
|
||||
- Support for structured attributes.
|
||||
|
||||
* `nix-prefetch-url` has a new `--executable` flag.
|
||||
|
||||
* On `x86_64` systems, [`x86_64` microarchitecture
|
||||
levels](https://lwn.net/Articles/844831/) are mapped to additional
|
||||
system types (e.g. `x86_64-v1-linux`).
|
||||
|
||||
* The new `--eval-store` flag allows you to use a different store for
|
||||
evaluation than for building or storing the build result. This is
|
||||
primarily useful when you want to query whether something exists in
|
||||
a read-only store, such as a binary cache:
|
||||
|
||||
```
|
||||
# nix path-info --json --store https://cache.nixos.org \
|
||||
--eval-store auto nixpkgs#hello
|
||||
```
|
||||
|
||||
(Here `auto` indicates the local store.)
|
||||
|
||||
* The Nix daemon has a new low-latency mechanism for copying
|
||||
closures. This is useful when building on remote stores such as
|
||||
`ssh-ng://`.
|
||||
|
||||
* Plugins can now register `nix` subcommands.
|
||||
|
||||
## Incompatible changes
|
||||
|
||||
* The `nix` command is now marked as an experimental feature. This
|
||||
means that you need to add
|
||||
|
||||
```
|
||||
experimental-features = nix-command
|
||||
```
|
||||
|
||||
to your `nix.conf` if you want to use it, or pass
|
||||
`--extra-experimental-features nix-command` on the command line.
|
||||
|
||||
* The `nix` command no longer has a syntax for referring to packages
|
||||
in a channel. This means that the following no longer works:
|
||||
|
||||
```console
|
||||
nix build nixpkgs.hello # Nix 2.3
|
||||
```
|
||||
|
||||
Instead, you can either use the `#` syntax to select a package from
|
||||
a flake, e.g.
|
||||
|
||||
```console
|
||||
nix build nixpkgs#hello
|
||||
```
|
||||
|
||||
Or, if you want to use the `nixpkgs` channel in the `NIX_PATH`
|
||||
environment variable:
|
||||
|
||||
```console
|
||||
nix build -f '<nixpkgs>' hello
|
||||
```
|
||||
|
||||
* The old `nix run` has been renamed to `nix shell`, while there is a
|
||||
new `nix run` that runs a default command. So instead of
|
||||
|
||||
```console
|
||||
nix run nixpkgs.hello -c hello # Nix 2.3
|
||||
```
|
||||
|
||||
you should use
|
||||
|
||||
```console
|
||||
nix shell nixpkgs#hello -c hello
|
||||
```
|
||||
|
||||
or just
|
||||
|
||||
```console
|
||||
nix run nixpkgs#hello
|
||||
```
|
||||
|
||||
if the command you want to run has the same name as the package.
|
||||
|
||||
* It is now an error to modify the `plugin-files` setting via a
|
||||
command-line flag that appears after the first non-flag argument to
|
||||
any command, including a subcommand to `nix`. For example,
|
||||
`nix-instantiate default.nix --plugin-files ""` must now become
|
||||
`nix-instantiate --plugin-files "" default.nix`.
|
||||
|
||||
* We no longer release source tarballs. If you want to build from
|
||||
source, please build from the tags in the Git repository.
|
||||
|
||||
## Contributors
|
||||
|
||||
This release has contributions from
|
||||
Adam Höse,
|
||||
Albert Safin,
|
||||
Alex Kovar,
|
||||
Alex Zero,
|
||||
Alexander Bantyev,
|
||||
Alexandre Esteves,
|
||||
Alyssa Ross,
|
||||
Anatole Lucet,
|
||||
Anders Kaseorg,
|
||||
Andreas Rammhold,
|
||||
Antoine Eiche,
|
||||
Antoine Martin,
|
||||
Arnout Engelen,
|
||||
Arthur Gautier,
|
||||
aszlig,
|
||||
Ben Burdette,
|
||||
Benjamin Hipple,
|
||||
Bernardo Meurer,
|
||||
Björn Gohla,
|
||||
Bjørn Forsman,
|
||||
Bob van der Linden,
|
||||
Brian Leung,
|
||||
Brian McKenna,
|
||||
Brian Wignall,
|
||||
Bruce Toll,
|
||||
Bryan Richter,
|
||||
Calle Rosenquist,
|
||||
Calvin Loncaric,
|
||||
Carlo Nucera,
|
||||
Carlos D'Agostino,
|
||||
Chaz Schlarp,
|
||||
Christian Höppner,
|
||||
Christian Kampka,
|
||||
Chua Hou,
|
||||
Chuck,
|
||||
Cole Helbling,
|
||||
Daiderd Jordan,
|
||||
Dan Callahan,
|
||||
Dani,
|
||||
Daniel Fitzpatrick,
|
||||
Danila Fedorin,
|
||||
Daniël de Kok,
|
||||
Danny Bautista,
|
||||
DavHau,
|
||||
David McFarland,
|
||||
Dima,
|
||||
Domen Kožar,
|
||||
Dominik Schrempf,
|
||||
Dominique Martinet,
|
||||
dramforever,
|
||||
Dustin DeWeese,
|
||||
edef,
|
||||
Eelco Dolstra,
|
||||
Emilio Karakey,
|
||||
Emily,
|
||||
Eric Culp,
|
||||
Ersin Akinci,
|
||||
Fabian Möller,
|
||||
Farid Zakaria,
|
||||
Federico Pellegrin,
|
||||
Finn Behrens,
|
||||
Florian Franzen,
|
||||
Félix Baylac-Jacqué,
|
||||
Gabriel Gonzalez,
|
||||
Geoff Reedy,
|
||||
Georges Dubus,
|
||||
Graham Christensen,
|
||||
Greg Hale,
|
||||
Greg Price,
|
||||
Gregor Kleen,
|
||||
Gregory Hale,
|
||||
Griffin Smith,
|
||||
Guillaume Bouchard,
|
||||
Harald van Dijk,
|
||||
illustris,
|
||||
Ivan Zvonimir Horvat,
|
||||
Jade,
|
||||
Jake Waksbaum,
|
||||
jakobrs,
|
||||
James Ottaway,
|
||||
Jan Tojnar,
|
||||
Janne Heß,
|
||||
Jaroslavas Pocepko,
|
||||
Jarrett Keifer,
|
||||
Jeremy Schlatter,
|
||||
Joachim Breitner,
|
||||
Joe Hermaszewski,
|
||||
Joe Pea,
|
||||
John Ericson,
|
||||
Jonathan Ringer,
|
||||
Josef Kemetmüller,
|
||||
Joseph Lucas,
|
||||
Jude Taylor,
|
||||
Julian Stecklina,
|
||||
Julien Tanguy,
|
||||
Jörg Thalheim,
|
||||
Kai Wohlfahrt,
|
||||
keke,
|
||||
Keshav Kini,
|
||||
Kevin Quick,
|
||||
Kevin Stock,
|
||||
Kjetil Orbekk,
|
||||
Krzysztof Gogolewski,
|
||||
kvtb,
|
||||
Lars Mühmel,
|
||||
Leonhard Markert,
|
||||
Lily Ballard,
|
||||
Linus Heckemann,
|
||||
Lorenzo Manacorda,
|
||||
Lucas Desgouilles,
|
||||
Lucas Franceschino,
|
||||
Lucas Hoffmann,
|
||||
Luke Granger-Brown,
|
||||
Madeline Haraj,
|
||||
Marwan Aljubeh,
|
||||
Mat Marini,
|
||||
Mateusz Piotrowski,
|
||||
Matthew Bauer,
|
||||
Matthew Kenigsberg,
|
||||
Mauricio Scheffer,
|
||||
Maximilian Bosch,
|
||||
Michael Adler,
|
||||
Michael Bishop,
|
||||
Michael Fellinger,
|
||||
Michael Forney,
|
||||
Michael Reilly,
|
||||
mlatus,
|
||||
Mykola Orliuk,
|
||||
Nathan van Doorn,
|
||||
Naïm Favier,
|
||||
ng0,
|
||||
Nick Van den Broeck,
|
||||
Nicolas Stig124 Formichella,
|
||||
Niels Egberts,
|
||||
Niklas Hambüchen,
|
||||
Nikola Knezevic,
|
||||
oxalica,
|
||||
p01arst0rm,
|
||||
Pamplemousse,
|
||||
Patrick Hilhorst,
|
||||
Paul Opiyo,
|
||||
Pavol Rusnak,
|
||||
Peter Kolloch,
|
||||
Philipp Bartsch,
|
||||
Philipp Middendorf,
|
||||
Piotr Szubiakowski,
|
||||
Profpatsch,
|
||||
Puck Meerburg,
|
||||
Ricardo M. Correia,
|
||||
Rickard Nilsson,
|
||||
Robert Hensing,
|
||||
Robin Gloster,
|
||||
Rodrigo,
|
||||
Rok Garbas,
|
||||
Ronnie Ebrin,
|
||||
Rovanion Luckey,
|
||||
Ryan Burns,
|
||||
Ryan Mulligan,
|
||||
Ryne Everett,
|
||||
Sam Doshi,
|
||||
Sam Lidder,
|
||||
Samir Talwar,
|
||||
Samuel Dionne-Riel,
|
||||
Sebastian Ullrich,
|
||||
Sergei Trofimovich,
|
||||
Sevan Janiyan,
|
||||
Shao Cheng,
|
||||
Shea Levy,
|
||||
Silvan Mosberger,
|
||||
Stefan Frijters,
|
||||
Stefan Jaax,
|
||||
sternenseemann,
|
||||
Steven Shaw,
|
||||
Stéphan Kochen,
|
||||
SuperSandro2000,
|
||||
Suraj Barkale,
|
||||
Taeer Bar-Yam,
|
||||
Thomas Churchman,
|
||||
Théophane Hufschmitt,
|
||||
Timothy DeHerrera,
|
||||
Timothy Klim,
|
||||
Tobias Möst,
|
||||
Tobias Pflug,
|
||||
Tom Bereknyei,
|
||||
Travis A. Everett,
|
||||
Ujjwal Jain,
|
||||
Vladimír Čunát,
|
||||
Wil Taylor,
|
||||
Will Dietz,
|
||||
Yaroslav Bolyukin,
|
||||
Yestin L. Harrison,
|
||||
YI,
|
||||
Yorick van Pelt,
|
||||
Yuriy Taraday and
|
||||
zimbatm.
|
||||
|
|
7
doc/manual/src/release-notes/rl-next.md
Normal file
7
doc/manual/src/release-notes/rl-next.md
Normal file
|
@ -0,0 +1,7 @@
|
|||
# Release 2.5 (2021-XX-XX)
|
||||
|
||||
* Binary cache stores now have a setting `compression-level`.
|
||||
|
||||
* `nix develop` now has a flag `--unpack` to run `unpackPhase`.
|
||||
|
||||
* Lists can now be compared lexicographically using the `<` operator.
|
251
docker.nix
Normal file
251
docker.nix
Normal file
|
@ -0,0 +1,251 @@
|
|||
{ pkgs ? import <nixpkgs> { }
|
||||
, lib ? pkgs.lib
|
||||
, name ? "nix"
|
||||
, tag ? "latest"
|
||||
, channelName ? "nixpkgs"
|
||||
, channelURL ? "https://nixos.org/channels/nixpkgs-unstable"
|
||||
}:
|
||||
let
|
||||
defaultPkgs = with pkgs; [
|
||||
nix
|
||||
bashInteractive
|
||||
coreutils-full
|
||||
gnutar
|
||||
gzip
|
||||
gnugrep
|
||||
which
|
||||
curl
|
||||
less
|
||||
wget
|
||||
man
|
||||
cacert.out
|
||||
findutils
|
||||
];
|
||||
|
||||
users = {
|
||||
|
||||
root = {
|
||||
uid = 0;
|
||||
shell = "/bin/bash";
|
||||
home = "/root";
|
||||
gid = 0;
|
||||
};
|
||||
|
||||
} // lib.listToAttrs (
|
||||
map
|
||||
(
|
||||
n: {
|
||||
name = "nixbld${toString n}";
|
||||
value = {
|
||||
uid = 30000 + n;
|
||||
gid = 30000;
|
||||
groups = [ "nixbld" ];
|
||||
description = "Nix build user ${toString n}";
|
||||
};
|
||||
}
|
||||
)
|
||||
(lib.lists.range 1 32)
|
||||
);
|
||||
|
||||
groups = {
|
||||
root.gid = 0;
|
||||
nixbld.gid = 30000;
|
||||
};
|
||||
|
||||
userToPasswd = (
|
||||
k:
|
||||
{ uid
|
||||
, gid ? 65534
|
||||
, home ? "/var/empty"
|
||||
, description ? ""
|
||||
, shell ? "/bin/false"
|
||||
, groups ? [ ]
|
||||
}: "${k}:x:${toString uid}:${toString gid}:${description}:${home}:${shell}"
|
||||
);
|
||||
passwdContents = (
|
||||
lib.concatStringsSep "\n"
|
||||
(lib.attrValues (lib.mapAttrs userToPasswd users))
|
||||
);
|
||||
|
||||
userToShadow = k: { ... }: "${k}:!:1::::::";
|
||||
shadowContents = (
|
||||
lib.concatStringsSep "\n"
|
||||
(lib.attrValues (lib.mapAttrs userToShadow users))
|
||||
);
|
||||
|
||||
# Map groups to members
|
||||
# {
|
||||
# group = [ "user1" "user2" ];
|
||||
# }
|
||||
groupMemberMap = (
|
||||
let
|
||||
# Create a flat list of user/group mappings
|
||||
mappings = (
|
||||
builtins.foldl'
|
||||
(
|
||||
acc: user:
|
||||
let
|
||||
groups = users.${user}.groups or [ ];
|
||||
in
|
||||
acc ++ map
|
||||
(group: {
|
||||
inherit user group;
|
||||
})
|
||||
groups
|
||||
)
|
||||
[ ]
|
||||
(lib.attrNames users)
|
||||
);
|
||||
in
|
||||
(
|
||||
builtins.foldl'
|
||||
(
|
||||
acc: v: acc // {
|
||||
${v.group} = acc.${v.group} or [ ] ++ [ v.user ];
|
||||
}
|
||||
)
|
||||
{ }
|
||||
mappings)
|
||||
);
|
||||
|
||||
groupToGroup = k: { gid }:
|
||||
let
|
||||
members = groupMemberMap.${k} or [ ];
|
||||
in
|
||||
"${k}:x:${toString gid}:${lib.concatStringsSep "," members}";
|
||||
groupContents = (
|
||||
lib.concatStringsSep "\n"
|
||||
(lib.attrValues (lib.mapAttrs groupToGroup groups))
|
||||
);
|
||||
|
||||
nixConf = {
|
||||
sandbox = "false";
|
||||
build-users-group = "nixbld";
|
||||
trusted-public-keys = "cache.nixos.org-1:6NCHdD59X431o0gWypbMrAURkbJ16ZPMQFGspcDShjY=";
|
||||
};
|
||||
nixConfContents = (lib.concatStringsSep "\n" (lib.mapAttrsFlatten (n: v: "${n} = ${v}") nixConf)) + "\n";
|
||||
|
||||
baseSystem =
|
||||
let
|
||||
nixpkgs = pkgs.path;
|
||||
channel = pkgs.runCommand "channel-nixos" { } ''
|
||||
mkdir $out
|
||||
ln -s ${nixpkgs} $out/nixpkgs
|
||||
echo "[]" > $out/manifest.nix
|
||||
'';
|
||||
rootEnv = pkgs.buildPackages.buildEnv {
|
||||
name = "root-profile-env";
|
||||
paths = defaultPkgs;
|
||||
};
|
||||
profile = pkgs.buildPackages.runCommand "user-environment" { } ''
|
||||
mkdir $out
|
||||
cp -a ${rootEnv}/* $out/
|
||||
|
||||
cat > $out/manifest.nix <<EOF
|
||||
[
|
||||
${lib.concatStringsSep "\n" (builtins.map (drv: let
|
||||
outputs = drv.outputsToInstall or [ "out" ];
|
||||
in ''
|
||||
{
|
||||
${lib.concatStringsSep "\n" (builtins.map (output: ''
|
||||
${output} = { outPath = "${lib.getOutput output drv}"; };
|
||||
'') outputs)}
|
||||
outputs = [ ${lib.concatStringsSep " " (builtins.map (x: "\"${x}\"") outputs)} ];
|
||||
name = "${drv.name}";
|
||||
outPath = "${drv}";
|
||||
system = "${drv.system}";
|
||||
type = "derivation";
|
||||
meta = { };
|
||||
}
|
||||
'') defaultPkgs)}
|
||||
]
|
||||
EOF
|
||||
'';
|
||||
in
|
||||
pkgs.runCommand "base-system"
|
||||
{
|
||||
inherit passwdContents groupContents shadowContents nixConfContents;
|
||||
passAsFile = [
|
||||
"passwdContents"
|
||||
"groupContents"
|
||||
"shadowContents"
|
||||
"nixConfContents"
|
||||
];
|
||||
allowSubstitutes = false;
|
||||
preferLocalBuild = true;
|
||||
} ''
|
||||
env
|
||||
set -x
|
||||
mkdir -p $out/etc
|
||||
|
||||
cat $passwdContentsPath > $out/etc/passwd
|
||||
echo "" >> $out/etc/passwd
|
||||
|
||||
cat $groupContentsPath > $out/etc/group
|
||||
echo "" >> $out/etc/group
|
||||
|
||||
cat $shadowContentsPath > $out/etc/shadow
|
||||
echo "" >> $out/etc/shadow
|
||||
|
||||
mkdir -p $out/usr
|
||||
ln -s /nix/var/nix/profiles/share $out/usr/
|
||||
|
||||
mkdir -p $out/nix/var/nix/gcroots
|
||||
|
||||
mkdir $out/tmp
|
||||
|
||||
mkdir -p $out/etc/nix
|
||||
cat $nixConfContentsPath > $out/etc/nix/nix.conf
|
||||
|
||||
mkdir -p $out/root
|
||||
mkdir -p $out/nix/var/nix/profiles/per-user/root
|
||||
|
||||
ln -s ${profile} $out/nix/var/nix/profiles/default-1-link
|
||||
ln -s $out/nix/var/nix/profiles/default-1-link $out/nix/var/nix/profiles/default
|
||||
ln -s /nix/var/nix/profiles/default $out/root/.nix-profile
|
||||
|
||||
ln -s ${channel} $out/nix/var/nix/profiles/per-user/root/channels-1-link
|
||||
ln -s $out/nix/var/nix/profiles/per-user/root/channels-1-link $out/nix/var/nix/profiles/per-user/root/channels
|
||||
|
||||
mkdir -p $out/root/.nix-defexpr
|
||||
ln -s $out/nix/var/nix/profiles/per-user/root/channels $out/root/.nix-defexpr/channels
|
||||
echo "${channelURL} ${channelName}" > $out/root/.nix-channels
|
||||
|
||||
mkdir -p $out/bin $out/usr/bin
|
||||
ln -s ${pkgs.coreutils}/bin/env $out/usr/bin/env
|
||||
ln -s ${pkgs.bashInteractive}/bin/bash $out/bin/sh
|
||||
'';
|
||||
|
||||
in
|
||||
pkgs.dockerTools.buildLayeredImageWithNixDb {
|
||||
|
||||
inherit name tag;
|
||||
|
||||
contents = [ baseSystem ];
|
||||
|
||||
extraCommands = ''
|
||||
rm -rf nix-support
|
||||
ln -s /nix/var/nix/profiles nix/var/nix/gcroots/profiles
|
||||
'';
|
||||
|
||||
config = {
|
||||
Cmd = [ "/root/.nix-profile/bin/bash" ];
|
||||
Env = [
|
||||
"USER=root"
|
||||
"PATH=${lib.concatStringsSep ":" [
|
||||
"/root/.nix-profile/bin"
|
||||
"/nix/var/nix/profiles/default/bin"
|
||||
"/nix/var/nix/profiles/default/sbin"
|
||||
]}"
|
||||
"MANPATH=${lib.concatStringsSep ":" [
|
||||
"/root/.nix-profile/share/man"
|
||||
"/nix/var/nix/profiles/default/share/man"
|
||||
]}"
|
||||
"SSL_CERT_FILE=/nix/var/nix/profiles/default/etc/ssl/certs/ca-bundle.crt"
|
||||
"GIT_SSL_CAINFO=/nix/var/nix/profiles/default/etc/ssl/certs/ca-bundle.crt"
|
||||
"NIX_SSL_CERT_FILE=/nix/var/nix/profiles/default/etc/ssl/certs/ca-bundle.crt"
|
||||
"NIX_PATH=/nix/var/nix/profiles/per-user/root/channels:/root/.nix-defexpr/channels"
|
||||
];
|
||||
};
|
||||
|
||||
}
|
15
flake.lock
15
flake.lock
|
@ -3,32 +3,31 @@
|
|||
"lowdown-src": {
|
||||
"flake": false,
|
||||
"locked": {
|
||||
"lastModified": 1617481909,
|
||||
"narHash": "sha256-SqnfOFuLuVRRNeVJr1yeEPJue/qWoCp5N6o5Kr///p4=",
|
||||
"lastModified": 1633514407,
|
||||
"narHash": "sha256-Dw32tiMjdK9t3ETl5fzGrutQTzh2rufgZV4A/BbxuD4=",
|
||||
"owner": "kristapsdz",
|
||||
"repo": "lowdown",
|
||||
"rev": "148f9b2f586c41b7e36e73009db43ea68c7a1a4d",
|
||||
"rev": "d2c2b44ff6c27b936ec27358a2653caaef8f73b8",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "kristapsdz",
|
||||
"ref": "VERSION_0_8_4",
|
||||
"repo": "lowdown",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"nixpkgs": {
|
||||
"locked": {
|
||||
"lastModified": 1614309161,
|
||||
"narHash": "sha256-93kRxDPyEW9QIpxU71kCaV1r+hgOgP6/aVgC7vvO8IU=",
|
||||
"lastModified": 1632864508,
|
||||
"narHash": "sha256-d127FIvGR41XbVRDPVvozUPQ/uRHbHwvfyKHwEt5xFM=",
|
||||
"owner": "NixOS",
|
||||
"repo": "nixpkgs",
|
||||
"rev": "0e499fde7af3c28d63e9b13636716b86c3162b93",
|
||||
"rev": "82891b5e2c2359d7e58d08849e4c89511ab94234",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"id": "nixpkgs",
|
||||
"ref": "nixos-20.09-small",
|
||||
"ref": "nixos-21.05-small",
|
||||
"type": "indirect"
|
||||
}
|
||||
},
|
||||
|
|
337
flake.nix
337
flake.nix
|
@ -1,8 +1,8 @@
|
|||
{
|
||||
description = "The purely functional package manager";
|
||||
|
||||
inputs.nixpkgs.url = "nixpkgs/nixos-20.09-small";
|
||||
inputs.lowdown-src = { url = "github:kristapsdz/lowdown/VERSION_0_8_4"; flake = false; };
|
||||
inputs.nixpkgs.url = "nixpkgs/nixos-21.05-small";
|
||||
inputs.lowdown-src = { url = "github:kristapsdz/lowdown"; flake = false; };
|
||||
|
||||
outputs = { self, nixpkgs, lowdown-src }:
|
||||
|
||||
|
@ -18,7 +18,9 @@
|
|||
|
||||
linux64BitSystems = [ "x86_64-linux" "aarch64-linux" ];
|
||||
linuxSystems = linux64BitSystems ++ [ "i686-linux" ];
|
||||
systems = linuxSystems ++ [ "x86_64-darwin" ];
|
||||
systems = linuxSystems ++ [ "x86_64-darwin" "aarch64-darwin" ];
|
||||
|
||||
crossSystems = [ "armv6l-linux" "armv7l-linux" ];
|
||||
|
||||
forAllSystems = f: nixpkgs.lib.genAttrs systems (system: f system);
|
||||
|
||||
|
@ -59,6 +61,7 @@
|
|||
|
||||
configureFlags =
|
||||
lib.optionals stdenv.isLinux [
|
||||
"--with-boost=${boost}/lib"
|
||||
"--with-sandbox-shell=${sh}/bin/busybox"
|
||||
"LDFLAGS=-fuse-ld=gold"
|
||||
];
|
||||
|
@ -68,7 +71,7 @@
|
|||
[
|
||||
buildPackages.bison
|
||||
buildPackages.flex
|
||||
(lib.getBin buildPackages.lowdown)
|
||||
(lib.getBin buildPackages.lowdown-nix)
|
||||
buildPackages.mdbook
|
||||
buildPackages.autoconf-archive
|
||||
buildPackages.autoreconfHook
|
||||
|
@ -76,10 +79,10 @@
|
|||
|
||||
# Tests
|
||||
buildPackages.git
|
||||
buildPackages.mercurial
|
||||
buildPackages.mercurial # FIXME: remove? only needed for tests
|
||||
buildPackages.jq
|
||||
]
|
||||
++ lib.optionals stdenv.isLinux [(pkgs.util-linuxMinimal or pkgs.utillinuxMinimal)];
|
||||
++ lib.optionals stdenv.hostPlatform.isLinux [(buildPackages.util-linuxMinimal or buildPackages.utillinuxMinimal)];
|
||||
|
||||
buildDeps =
|
||||
[ curl
|
||||
|
@ -87,13 +90,12 @@
|
|||
openssl sqlite
|
||||
libarchive
|
||||
boost
|
||||
nlohmann_json
|
||||
lowdown
|
||||
gmock
|
||||
lowdown-nix
|
||||
gtest
|
||||
]
|
||||
++ lib.optionals stdenv.isLinux [libseccomp]
|
||||
++ lib.optional (stdenv.isLinux || stdenv.isDarwin) libsodium
|
||||
++ lib.optional stdenv.isx86_64 libcpuid;
|
||||
++ lib.optional stdenv.hostPlatform.isx86_64 libcpuid;
|
||||
|
||||
awsDeps = lib.optional (stdenv.isLinux || stdenv.isDarwin)
|
||||
(aws-sdk-cpp.override {
|
||||
|
@ -102,7 +104,13 @@
|
|||
});
|
||||
|
||||
propagatedDeps =
|
||||
[ (boehmgc.override { enableLargeConfig = true; })
|
||||
[ ((boehmgc.override {
|
||||
enableLargeConfig = true;
|
||||
}).overrideAttrs(o: {
|
||||
patches = (o.patches or []) ++ [
|
||||
./boehmgc-coroutine-sp-fallback.diff
|
||||
];
|
||||
}))
|
||||
];
|
||||
|
||||
perlDeps =
|
||||
|
@ -119,8 +127,7 @@
|
|||
''
|
||||
mkdir -p $out/nix-support
|
||||
|
||||
# Converts /nix/store/50p3qk8kka9dl6wyq40vydq945k0j3kv-nix-2.4pre20201102_550e11f/bin/nix
|
||||
# To 50p3qk8kka9dl6wyq40vydq945k0j3kv/bin/nix
|
||||
# Converts /nix/store/50p3qk8k...-nix-2.4pre20201102_550e11f/bin/nix to 50p3qk8k.../bin/nix.
|
||||
tarballPath() {
|
||||
# Remove the store prefix
|
||||
local path=''${1#${builtins.storeDir}/}
|
||||
|
@ -133,10 +140,11 @@
|
|||
|
||||
substitute ${./scripts/install.in} $out/install \
|
||||
${pkgs.lib.concatMapStrings
|
||||
(system:
|
||||
'' \
|
||||
--replace '@tarballHash_${system}@' $(nix --experimental-features nix-command hash-file --base16 --type sha256 ${self.hydraJobs.binaryTarball.${system}}/*.tar.xz) \
|
||||
--replace '@tarballPath_${system}@' $(tarballPath ${self.hydraJobs.binaryTarball.${system}}/*.tar.xz) \
|
||||
(system: let
|
||||
tarball = if builtins.elem system crossSystems then self.hydraJobs.binaryTarballCross.x86_64-linux.${system} else self.hydraJobs.binaryTarball.${system};
|
||||
in '' \
|
||||
--replace '@tarballHash_${system}@' $(nix --experimental-features nix-command hash-file --base16 --type sha256 ${tarball}/*.tar.xz) \
|
||||
--replace '@tarballPath_${system}@' $(tarballPath ${tarball}/*.tar.xz) \
|
||||
''
|
||||
)
|
||||
systems
|
||||
|
@ -145,13 +153,15 @@
|
|||
echo "file installer $out/install" >> $out/nix-support/hydra-build-products
|
||||
'';
|
||||
|
||||
testNixVersions = pkgs: client: daemon: with commonDeps pkgs; pkgs.stdenv.mkDerivation {
|
||||
testNixVersions = pkgs: client: daemon: with commonDeps pkgs; with pkgs.lib; pkgs.stdenv.mkDerivation {
|
||||
NIX_DAEMON_PACKAGE = daemon;
|
||||
NIX_CLIENT_PACKAGE = client;
|
||||
# Must keep this name short as OSX has a rather strict limit on the
|
||||
# socket path length, and this name appears in the path of the
|
||||
# nix-daemon socket used in the tests
|
||||
name = "nix-tests";
|
||||
name =
|
||||
"nix-tests"
|
||||
+ optionalString
|
||||
(versionAtLeast daemon.version "2.4pre20211005" &&
|
||||
versionAtLeast client.version "2.4pre20211005")
|
||||
"-${client.version}-against-${daemon.version}";
|
||||
inherit version;
|
||||
|
||||
src = self;
|
||||
|
@ -170,21 +180,92 @@
|
|||
installPhase = ''
|
||||
mkdir -p $out
|
||||
'';
|
||||
installCheckPhase = "make installcheck";
|
||||
|
||||
installCheckPhase = "make installcheck -j$NIX_BUILD_CORES -l$NIX_BUILD_CORES";
|
||||
};
|
||||
|
||||
binaryTarball = buildPackages: nix: pkgs: let
|
||||
inherit (pkgs) cacert;
|
||||
installerClosureInfo = buildPackages.closureInfo { rootPaths = [ nix cacert ]; };
|
||||
in
|
||||
|
||||
buildPackages.runCommand "nix-binary-tarball-${version}"
|
||||
{ #nativeBuildInputs = lib.optional (system != "aarch64-linux") shellcheck;
|
||||
meta.description = "Distribution-independent Nix bootstrap binaries for ${pkgs.system}";
|
||||
}
|
||||
''
|
||||
cp ${installerClosureInfo}/registration $TMPDIR/reginfo
|
||||
cp ${./scripts/create-darwin-volume.sh} $TMPDIR/create-darwin-volume.sh
|
||||
substitute ${./scripts/install-nix-from-closure.sh} $TMPDIR/install \
|
||||
--subst-var-by nix ${nix} \
|
||||
--subst-var-by cacert ${cacert}
|
||||
|
||||
substitute ${./scripts/install-darwin-multi-user.sh} $TMPDIR/install-darwin-multi-user.sh \
|
||||
--subst-var-by nix ${nix} \
|
||||
--subst-var-by cacert ${cacert}
|
||||
substitute ${./scripts/install-systemd-multi-user.sh} $TMPDIR/install-systemd-multi-user.sh \
|
||||
--subst-var-by nix ${nix} \
|
||||
--subst-var-by cacert ${cacert}
|
||||
substitute ${./scripts/install-multi-user.sh} $TMPDIR/install-multi-user \
|
||||
--subst-var-by nix ${nix} \
|
||||
--subst-var-by cacert ${cacert}
|
||||
|
||||
if type -p shellcheck; then
|
||||
# SC1090: Don't worry about not being able to find
|
||||
# $nix/etc/profile.d/nix.sh
|
||||
shellcheck --exclude SC1090 $TMPDIR/install
|
||||
shellcheck $TMPDIR/create-darwin-volume.sh
|
||||
shellcheck $TMPDIR/install-darwin-multi-user.sh
|
||||
shellcheck $TMPDIR/install-systemd-multi-user.sh
|
||||
|
||||
# SC1091: Don't panic about not being able to source
|
||||
# /etc/profile
|
||||
# SC2002: Ignore "useless cat" "error", when loading
|
||||
# .reginfo, as the cat is a much cleaner
|
||||
# implementation, even though it is "useless"
|
||||
# SC2116: Allow ROOT_HOME=$(echo ~root) for resolving
|
||||
# root's home directory
|
||||
shellcheck --external-sources \
|
||||
--exclude SC1091,SC2002,SC2116 $TMPDIR/install-multi-user
|
||||
fi
|
||||
|
||||
chmod +x $TMPDIR/install
|
||||
chmod +x $TMPDIR/create-darwin-volume.sh
|
||||
chmod +x $TMPDIR/install-darwin-multi-user.sh
|
||||
chmod +x $TMPDIR/install-systemd-multi-user.sh
|
||||
chmod +x $TMPDIR/install-multi-user
|
||||
dir=nix-${version}-${pkgs.system}
|
||||
fn=$out/$dir.tar.xz
|
||||
mkdir -p $out/nix-support
|
||||
echo "file binary-dist $fn" >> $out/nix-support/hydra-build-products
|
||||
tar cvfJ $fn \
|
||||
--owner=0 --group=0 --mode=u+rw,uga+r \
|
||||
--absolute-names \
|
||||
--hard-dereference \
|
||||
--transform "s,$TMPDIR/install,$dir/install," \
|
||||
--transform "s,$TMPDIR/create-darwin-volume.sh,$dir/create-darwin-volume.sh," \
|
||||
--transform "s,$TMPDIR/reginfo,$dir/.reginfo," \
|
||||
--transform "s,$NIX_STORE,$dir/store,S" \
|
||||
$TMPDIR/install \
|
||||
$TMPDIR/create-darwin-volume.sh \
|
||||
$TMPDIR/install-darwin-multi-user.sh \
|
||||
$TMPDIR/install-systemd-multi-user.sh \
|
||||
$TMPDIR/install-multi-user \
|
||||
$TMPDIR/reginfo \
|
||||
$(cat ${installerClosureInfo}/store-paths)
|
||||
'';
|
||||
|
||||
in {
|
||||
|
||||
# A Nixpkgs overlay that overrides the 'nix' and
|
||||
# 'nix.perl-bindings' packages.
|
||||
overlay = final: prev: {
|
||||
|
||||
# An older version of Nix to test against when using the daemon.
|
||||
# Currently using `nixUnstable` as the stable one doesn't respect
|
||||
# `NIX_DAEMON_SOCKET_PATH` which is needed for the tests.
|
||||
nixStable = prev.nix;
|
||||
|
||||
# Forward from the previous stage as we don’t want it to pick the lowdown override
|
||||
nixUnstable = prev.nixUnstable;
|
||||
|
||||
nix = with final; with commonDeps pkgs; stdenv.mkDerivation {
|
||||
name = "nix-${version}";
|
||||
inherit version;
|
||||
|
@ -254,9 +335,9 @@
|
|||
xz
|
||||
pkgs.perl
|
||||
boost
|
||||
nlohmann_json
|
||||
]
|
||||
++ lib.optional (stdenv.isLinux || stdenv.isDarwin) libsodium;
|
||||
++ lib.optional (stdenv.isLinux || stdenv.isDarwin) libsodium
|
||||
++ lib.optional stdenv.isDarwin darwin.apple_sdk.frameworks.Security;
|
||||
|
||||
configureFlags = ''
|
||||
--with-dbi=${perlPackages.DBI}/${pkgs.perl.libPrefix}
|
||||
|
@ -270,24 +351,17 @@
|
|||
|
||||
};
|
||||
|
||||
lowdown = with final; stdenv.mkDerivation rec {
|
||||
name = "lowdown-0.8.4";
|
||||
|
||||
/*
|
||||
src = fetchurl {
|
||||
url = "https://kristaps.bsd.lv/lowdown/snapshots/${name}.tar.gz";
|
||||
hash = "sha512-U9WeGoInT9vrawwa57t6u9dEdRge4/P+0wLxmQyOL9nhzOEUU2FRz2Be9H0dCjYE7p2v3vCXIYk40M+jjULATw==";
|
||||
};
|
||||
*/
|
||||
lowdown-nix = with final; stdenv.mkDerivation rec {
|
||||
name = "lowdown-0.9.0";
|
||||
|
||||
src = lowdown-src;
|
||||
|
||||
outputs = [ "out" "bin" "dev" ];
|
||||
|
||||
nativeBuildInputs = [ which ];
|
||||
nativeBuildInputs = [ buildPackages.which ];
|
||||
|
||||
configurePhase =
|
||||
''
|
||||
configurePhase = ''
|
||||
${if (stdenv.isDarwin && stdenv.isAarch64) then "echo \"HAVE_SANDBOX_INIT=false\" > configure.local" else ""}
|
||||
./configure \
|
||||
PREFIX=${placeholder "dev"} \
|
||||
BINDIR=${placeholder "bin"}/bin
|
||||
|
@ -303,92 +377,48 @@
|
|||
|
||||
buildStatic = nixpkgs.lib.genAttrs linux64BitSystems (system: self.packages.${system}.nix-static);
|
||||
|
||||
buildCross = nixpkgs.lib.genAttrs crossSystems (crossSystem:
|
||||
nixpkgs.lib.genAttrs ["x86_64-linux"] (system: self.packages.${system}."nix-${crossSystem}"));
|
||||
|
||||
# Perl bindings for various platforms.
|
||||
perlBindings = nixpkgs.lib.genAttrs systems (system: self.packages.${system}.nix.perl-bindings);
|
||||
|
||||
# Binary tarball for various platforms, containing a Nix store
|
||||
# with the closure of 'nix' package, and the second half of
|
||||
# the installation script.
|
||||
binaryTarball = nixpkgs.lib.genAttrs systems (system:
|
||||
binaryTarball = nixpkgs.lib.genAttrs systems (system: binaryTarball nixpkgsFor.${system} nixpkgsFor.${system}.nix nixpkgsFor.${system});
|
||||
|
||||
with nixpkgsFor.${system};
|
||||
|
||||
let
|
||||
installerClosureInfo = closureInfo { rootPaths = [ nix cacert ]; };
|
||||
in
|
||||
|
||||
runCommand "nix-binary-tarball-${version}"
|
||||
{ #nativeBuildInputs = lib.optional (system != "aarch64-linux") shellcheck;
|
||||
meta.description = "Distribution-independent Nix bootstrap binaries for ${system}";
|
||||
}
|
||||
''
|
||||
cp ${installerClosureInfo}/registration $TMPDIR/reginfo
|
||||
cp ${./scripts/create-darwin-volume.sh} $TMPDIR/create-darwin-volume.sh
|
||||
substitute ${./scripts/install-nix-from-closure.sh} $TMPDIR/install \
|
||||
--subst-var-by nix ${nix} \
|
||||
--subst-var-by cacert ${cacert}
|
||||
|
||||
substitute ${./scripts/install-darwin-multi-user.sh} $TMPDIR/install-darwin-multi-user.sh \
|
||||
--subst-var-by nix ${nix} \
|
||||
--subst-var-by cacert ${cacert}
|
||||
substitute ${./scripts/install-systemd-multi-user.sh} $TMPDIR/install-systemd-multi-user.sh \
|
||||
--subst-var-by nix ${nix} \
|
||||
--subst-var-by cacert ${cacert}
|
||||
substitute ${./scripts/install-multi-user.sh} $TMPDIR/install-multi-user \
|
||||
--subst-var-by nix ${nix} \
|
||||
--subst-var-by cacert ${cacert}
|
||||
|
||||
if type -p shellcheck; then
|
||||
# SC1090: Don't worry about not being able to find
|
||||
# $nix/etc/profile.d/nix.sh
|
||||
shellcheck --exclude SC1090 $TMPDIR/install
|
||||
shellcheck $TMPDIR/create-darwin-volume.sh
|
||||
shellcheck $TMPDIR/install-darwin-multi-user.sh
|
||||
shellcheck $TMPDIR/install-systemd-multi-user.sh
|
||||
|
||||
# SC1091: Don't panic about not being able to source
|
||||
# /etc/profile
|
||||
# SC2002: Ignore "useless cat" "error", when loading
|
||||
# .reginfo, as the cat is a much cleaner
|
||||
# implementation, even though it is "useless"
|
||||
# SC2116: Allow ROOT_HOME=$(echo ~root) for resolving
|
||||
# root's home directory
|
||||
shellcheck --external-sources \
|
||||
--exclude SC1091,SC2002,SC2116 $TMPDIR/install-multi-user
|
||||
fi
|
||||
|
||||
chmod +x $TMPDIR/install
|
||||
chmod +x $TMPDIR/create-darwin-volume.sh
|
||||
chmod +x $TMPDIR/install-darwin-multi-user.sh
|
||||
chmod +x $TMPDIR/install-systemd-multi-user.sh
|
||||
chmod +x $TMPDIR/install-multi-user
|
||||
dir=nix-${version}-${system}
|
||||
fn=$out/$dir.tar.xz
|
||||
mkdir -p $out/nix-support
|
||||
echo "file binary-dist $fn" >> $out/nix-support/hydra-build-products
|
||||
tar cvfJ $fn \
|
||||
--owner=0 --group=0 --mode=u+rw,uga+r \
|
||||
--absolute-names \
|
||||
--hard-dereference \
|
||||
--transform "s,$TMPDIR/install,$dir/install," \
|
||||
--transform "s,$TMPDIR/create-darwin-volume.sh,$dir/create-darwin-volume.sh," \
|
||||
--transform "s,$TMPDIR/reginfo,$dir/.reginfo," \
|
||||
--transform "s,$NIX_STORE,$dir/store,S" \
|
||||
$TMPDIR/install \
|
||||
$TMPDIR/create-darwin-volume.sh \
|
||||
$TMPDIR/install-darwin-multi-user.sh \
|
||||
$TMPDIR/install-systemd-multi-user.sh \
|
||||
$TMPDIR/install-multi-user \
|
||||
$TMPDIR/reginfo \
|
||||
$(cat ${installerClosureInfo}/store-paths)
|
||||
'');
|
||||
binaryTarballCross = nixpkgs.lib.genAttrs ["x86_64-linux"] (system: builtins.listToAttrs (map (crossSystem: {
|
||||
name = crossSystem;
|
||||
value = let
|
||||
nixpkgsCross = import nixpkgs {
|
||||
inherit system crossSystem;
|
||||
overlays = [ self.overlay ];
|
||||
};
|
||||
in binaryTarball nixpkgsFor.${system} self.packages.${system}."nix-${crossSystem}" nixpkgsCross;
|
||||
}) crossSystems));
|
||||
|
||||
# The first half of the installation script. This is uploaded
|
||||
# to https://nixos.org/nix/install. It downloads the binary
|
||||
# tarball for the user's system and calls the second half of the
|
||||
# installation script.
|
||||
installerScript = installScriptFor [ "x86_64-linux" "i686-linux" "x86_64-darwin" "aarch64-linux" ];
|
||||
installerScriptForGHA = installScriptFor [ "x86_64-linux" "x86_64-darwin" ];
|
||||
installerScript = installScriptFor [ "x86_64-linux" "i686-linux" "aarch64-linux" "x86_64-darwin" "aarch64-darwin" "armv6l-linux" "armv7l-linux" ];
|
||||
installerScriptForGHA = installScriptFor [ "x86_64-linux" "x86_64-darwin" "armv6l-linux" "armv7l-linux"];
|
||||
|
||||
# docker image with Nix inside
|
||||
dockerImage = nixpkgs.lib.genAttrs linux64BitSystems (system:
|
||||
let
|
||||
pkgs = nixpkgsFor.${system};
|
||||
image = import ./docker.nix { inherit pkgs; tag = version; };
|
||||
in pkgs.runCommand "docker-image-tarball-${version}"
|
||||
{ meta.description = "Docker image with Nix for ${system}";
|
||||
}
|
||||
''
|
||||
mkdir -p $out/nix-support
|
||||
image=$out/image.tar.gz
|
||||
ln -s ${image} $image
|
||||
echo "file binary-dist $image" >> $out/nix-support/hydra-build-products
|
||||
'');
|
||||
|
||||
# Line coverage analysis.
|
||||
coverage =
|
||||
|
@ -430,6 +460,12 @@
|
|||
inherit (self) overlay;
|
||||
};
|
||||
|
||||
tests.nssPreload = (import ./tests/nss-preload.nix rec {
|
||||
system = "x86_64-linux";
|
||||
inherit nixpkgs;
|
||||
inherit (self) overlay;
|
||||
});
|
||||
|
||||
tests.githubFlakes = (import ./tests/github-flakes.nix rec {
|
||||
system = "x86_64-linux";
|
||||
inherit nixpkgs;
|
||||
|
@ -468,25 +504,33 @@
|
|||
'';
|
||||
*/
|
||||
|
||||
installTests = forAllSystems (system:
|
||||
let pkgs = nixpkgsFor.${system}; in
|
||||
pkgs.runCommand "install-tests" {
|
||||
againstSelf = testNixVersions pkgs pkgs.nix pkgs.pkgs.nix;
|
||||
againstCurrentUnstable =
|
||||
# FIXME: temporarily disable this on macOS because of #3605.
|
||||
if system == "x86_64-linux"
|
||||
then testNixVersions pkgs pkgs.nix pkgs.nixUnstable
|
||||
else null;
|
||||
# Disabled because the latest stable version doesn't handle
|
||||
# `NIX_DAEMON_SOCKET_PATH` which is required for the tests to work
|
||||
# againstLatestStable = testNixVersions pkgs pkgs.nix pkgs.nixStable;
|
||||
} "touch $out");
|
||||
|
||||
};
|
||||
|
||||
checks = forAllSystems (system: {
|
||||
binaryTarball = self.hydraJobs.binaryTarball.${system};
|
||||
perlBindings = self.hydraJobs.perlBindings.${system};
|
||||
installTests =
|
||||
let pkgs = nixpkgsFor.${system}; in
|
||||
pkgs.runCommand "install-tests" {
|
||||
againstSelf = testNixVersions pkgs pkgs.nix pkgs.pkgs.nix;
|
||||
againstCurrentUnstable = testNixVersions pkgs pkgs.nix pkgs.nixUnstable;
|
||||
# Disabled because the latest stable version doesn't handle
|
||||
# `NIX_DAEMON_SOCKET_PATH` which is required for the tests to work
|
||||
# againstLatestStable = testNixVersions pkgs pkgs.nix pkgs.nixStable;
|
||||
} "touch $out";
|
||||
});
|
||||
installTests = self.hydraJobs.installTests.${system};
|
||||
} // (if system == "x86_64-linux" then {
|
||||
dockerImage = self.hydraJobs.dockerImage.${system};
|
||||
} else {}));
|
||||
|
||||
packages = forAllSystems (system: {
|
||||
inherit (nixpkgsFor.${system}) nix;
|
||||
} // nixpkgs.lib.optionalAttrs (builtins.elem system linux64BitSystems) {
|
||||
} // (nixpkgs.lib.optionalAttrs (builtins.elem system linux64BitSystems) {
|
||||
nix-static = let
|
||||
nixpkgs = nixpkgsFor.${system}.pkgsStatic;
|
||||
in with commonDeps nixpkgs; nixpkgs.stdenv.mkDerivation {
|
||||
|
@ -524,8 +568,49 @@
|
|||
stripAllList = ["bin"];
|
||||
|
||||
strictDeps = true;
|
||||
|
||||
hardeningDisable = [ "pie" ];
|
||||
};
|
||||
});
|
||||
} // builtins.listToAttrs (map (crossSystem: {
|
||||
name = "nix-${crossSystem}";
|
||||
value = let
|
||||
nixpkgsCross = import nixpkgs {
|
||||
inherit system crossSystem;
|
||||
overlays = [ self.overlay ];
|
||||
};
|
||||
in with commonDeps nixpkgsCross; nixpkgsCross.stdenv.mkDerivation {
|
||||
name = "nix-${version}";
|
||||
|
||||
src = self;
|
||||
|
||||
VERSION_SUFFIX = versionSuffix;
|
||||
|
||||
outputs = [ "out" "dev" "doc" ];
|
||||
|
||||
nativeBuildInputs = nativeBuildDeps;
|
||||
buildInputs = buildDeps ++ propagatedDeps;
|
||||
|
||||
configureFlags = [ "--sysconfdir=/etc" "--disable-doc-gen" ];
|
||||
|
||||
enableParallelBuilding = true;
|
||||
|
||||
makeFlags = "profiledir=$(out)/etc/profile.d";
|
||||
|
||||
doCheck = true;
|
||||
|
||||
installFlags = "sysconfdir=$(out)/etc";
|
||||
|
||||
postInstall = ''
|
||||
mkdir -p $doc/nix-support
|
||||
echo "doc manual $doc/share/doc/nix/manual" >> $doc/nix-support/hydra-build-products
|
||||
mkdir -p $out/nix-support
|
||||
echo "file binary-dist $out/bin/nix" >> $out/nix-support/hydra-build-products
|
||||
'';
|
||||
|
||||
doInstallCheck = true;
|
||||
installCheckFlags = "sysconfdir=$(out)/etc";
|
||||
};
|
||||
}) crossSystems)));
|
||||
|
||||
defaultPackage = forAllSystems (system: self.packages.${system}.nix);
|
||||
|
||||
|
|
|
@ -19,6 +19,8 @@ my $nixpkgsDir = "/home/eelco/Dev/nixpkgs-pristine";
|
|||
|
||||
my $TMPDIR = $ENV{'TMPDIR'} // "/tmp";
|
||||
|
||||
my $isLatest = ($ENV{'IS_LATEST'} // "") eq "1";
|
||||
|
||||
# FIXME: cut&paste from nixos-channel-scripts.
|
||||
sub fetch {
|
||||
my ($url, $type) = @_;
|
||||
|
@ -35,16 +37,18 @@ sub fetch {
|
|||
my $evalUrl = "https://hydra.nixos.org/eval/$evalId";
|
||||
my $evalInfo = decode_json(fetch($evalUrl, 'application/json'));
|
||||
#print Dumper($evalInfo);
|
||||
my $flakeUrl = $evalInfo->{flake} or die;
|
||||
my $flakeInfo = decode_json(`nix flake metadata --json "$flakeUrl"` or die);
|
||||
my $nixRev = $flakeInfo->{revision} or die;
|
||||
|
||||
my $nixRev = $evalInfo->{jobsetevalinputs}->{nix}->{revision} or die;
|
||||
my $buildInfo = decode_json(fetch("$evalUrl/job/build.x86_64-linux", 'application/json'));
|
||||
#print Dumper($buildInfo);
|
||||
|
||||
my $tarballInfo = decode_json(fetch("$evalUrl/job/tarball", 'application/json'));
|
||||
|
||||
my $releaseName = $tarballInfo->{releasename};
|
||||
my $releaseName = $buildInfo->{nixname};
|
||||
$releaseName =~ /nix-(.*)$/ or die;
|
||||
my $version = $1;
|
||||
|
||||
print STDERR "Nix revision is $nixRev, version is $version\n";
|
||||
print STDERR "Flake URL is $flakeUrl, Nix revision is $nixRev, version is $version\n";
|
||||
|
||||
my $releaseDir = "nix/$releaseName";
|
||||
|
||||
|
@ -83,12 +87,12 @@ sub downloadFile {
|
|||
|
||||
if (!-e $tmpFile) {
|
||||
print STDERR "downloading $srcFile to $tmpFile...\n";
|
||||
system("NIX_REMOTE=https://cache.nixos.org/ nix cat-store '$srcFile' > '$tmpFile'") == 0
|
||||
system("NIX_REMOTE=https://cache.nixos.org/ nix store cat '$srcFile' > '$tmpFile'") == 0
|
||||
or die "unable to fetch $srcFile\n";
|
||||
}
|
||||
|
||||
my $sha256_expected = $buildInfo->{buildproducts}->{$productNr}->{sha256hash} or die;
|
||||
my $sha256_actual = `nix hash-file --base16 --type sha256 '$tmpFile'`;
|
||||
my $sha256_actual = `nix hash file --base16 --type sha256 '$tmpFile'`;
|
||||
chomp $sha256_actual;
|
||||
if ($sha256_expected ne $sha256_actual) {
|
||||
print STDERR "file $tmpFile is corrupt, got $sha256_actual, expected $sha256_expected\n";
|
||||
|
@ -104,12 +108,13 @@ sub downloadFile {
|
|||
return $sha256_expected;
|
||||
}
|
||||
|
||||
downloadFile("tarball", "2"); # .tar.bz2
|
||||
my $tarballHash = downloadFile("tarball", "3"); # .tar.xz
|
||||
downloadFile("binaryTarball.i686-linux", "1");
|
||||
downloadFile("binaryTarball.x86_64-linux", "1");
|
||||
downloadFile("binaryTarball.aarch64-linux", "1");
|
||||
downloadFile("binaryTarball.x86_64-darwin", "1");
|
||||
downloadFile("binaryTarball.aarch64-darwin", "1");
|
||||
downloadFile("binaryTarballCross.x86_64-linux.armv6l-linux", "1");
|
||||
downloadFile("binaryTarballCross.x86_64-linux.armv7l-linux", "1");
|
||||
downloadFile("installerScript", "1");
|
||||
|
||||
for my $fn (glob "$tmpDir/*") {
|
||||
|
@ -131,53 +136,38 @@ for my $fn (glob "$tmpDir/*") {
|
|||
}
|
||||
}
|
||||
|
||||
exit if $version =~ /pre/;
|
||||
# Update nix-fallback-paths.nix.
|
||||
if ($isLatest) {
|
||||
system("cd $nixpkgsDir && git pull") == 0 or die;
|
||||
|
||||
# Update Nixpkgs in a very hacky way.
|
||||
system("cd $nixpkgsDir && git pull") == 0 or die;
|
||||
my $oldName = `nix-instantiate --eval $nixpkgsDir -A nix.name`; chomp $oldName;
|
||||
my $oldHash = `nix-instantiate --eval $nixpkgsDir -A nix.src.outputHash`; chomp $oldHash;
|
||||
print STDERR "old stable version in Nixpkgs = $oldName / $oldHash\n";
|
||||
|
||||
my $fn = "$nixpkgsDir/pkgs/tools/package-management/nix/default.nix";
|
||||
my $oldFile = read_file($fn);
|
||||
$oldFile =~ s/$oldName/"$releaseName"/g;
|
||||
$oldFile =~ s/$oldHash/"$tarballHash"/g;
|
||||
write_file($fn, $oldFile);
|
||||
|
||||
$oldName =~ s/nix-//g;
|
||||
$oldName =~ s/"//g;
|
||||
|
||||
sub getStorePath {
|
||||
my ($jobName) = @_;
|
||||
my $buildInfo = decode_json(fetch("$evalUrl/job/$jobName", 'application/json'));
|
||||
for my $product (values %{$buildInfo->{buildproducts}}) {
|
||||
next unless $product->{type} eq "nix-build";
|
||||
next if $product->{path} =~ /[a-z]+$/;
|
||||
return $product->{path};
|
||||
sub getStorePath {
|
||||
my ($jobName) = @_;
|
||||
my $buildInfo = decode_json(fetch("$evalUrl/job/$jobName", 'application/json'));
|
||||
return $buildInfo->{buildoutputs}->{out}->{path} or die "cannot get store path for '$jobName'";
|
||||
}
|
||||
die;
|
||||
|
||||
write_file("$nixpkgsDir/nixos/modules/installer/tools/nix-fallback-paths.nix",
|
||||
"{\n" .
|
||||
" x86_64-linux = \"" . getStorePath("build.x86_64-linux") . "\";\n" .
|
||||
" i686-linux = \"" . getStorePath("build.i686-linux") . "\";\n" .
|
||||
" aarch64-linux = \"" . getStorePath("build.aarch64-linux") . "\";\n" .
|
||||
" x86_64-darwin = \"" . getStorePath("build.x86_64-darwin") . "\";\n" .
|
||||
" aarch64-darwin = \"" . getStorePath("build.aarch64-darwin") . "\";\n" .
|
||||
"}\n");
|
||||
|
||||
system("cd $nixpkgsDir && git commit -a -m 'nix-fallback-paths.nix: Update to $version'") == 0 or die;
|
||||
}
|
||||
|
||||
write_file("$nixpkgsDir/nixos/modules/installer/tools/nix-fallback-paths.nix",
|
||||
"{\n" .
|
||||
" x86_64-linux = \"" . getStorePath("build.x86_64-linux") . "\";\n" .
|
||||
" i686-linux = \"" . getStorePath("build.i686-linux") . "\";\n" .
|
||||
" aarch64-linux = \"" . getStorePath("build.aarch64-linux") . "\";\n" .
|
||||
" x86_64-darwin = \"" . getStorePath("build.x86_64-darwin") . "\";\n" .
|
||||
"}\n");
|
||||
|
||||
system("cd $nixpkgsDir && git commit -a -m 'nix: $oldName -> $version'") == 0 or die;
|
||||
|
||||
# Update the "latest" symlink.
|
||||
$channelsBucket->add_key(
|
||||
"nix-latest/install", "",
|
||||
{ "x-amz-website-redirect-location" => "https://releases.nixos.org/$releaseDir/install" })
|
||||
or die $channelsBucket->err . ": " . $channelsBucket->errstr;
|
||||
or die $channelsBucket->err . ": " . $channelsBucket->errstr
|
||||
if $isLatest;
|
||||
|
||||
# Tag the release in Git.
|
||||
chdir("/home/eelco/Dev/nix-pristine") or die;
|
||||
system("git remote update origin") == 0 or die;
|
||||
system("git tag --force --sign $version $nixRev -m 'Tagging release $version'") == 0 or die;
|
||||
system("git push --tags") == 0 or die;
|
||||
system("git push --force-with-lease origin $nixRev:refs/heads/latest-release") == 0 or die;
|
||||
system("git push --force-with-lease origin $nixRev:refs/heads/latest-release") == 0 or die if $isLatest;
|
||||
|
|
37
misc/fish/completion.fish
Normal file
37
misc/fish/completion.fish
Normal file
|
@ -0,0 +1,37 @@
|
|||
function _nix_complete
|
||||
# Get the current command up to a cursor.
|
||||
# - Behaves correctly even with pipes and nested in commands like env.
|
||||
# - TODO: Returns the command verbatim (does not interpolate variables).
|
||||
# That might not be optimal for arguments like -f.
|
||||
set -l nix_args (commandline --current-process --tokenize --cut-at-cursor)
|
||||
# --cut-at-cursor with --tokenize removes the current token so we need to add it separately.
|
||||
# https://github.com/fish-shell/fish-shell/issues/7375
|
||||
# Can be an empty string.
|
||||
set -l current_token (commandline --current-token --cut-at-cursor)
|
||||
|
||||
# Nix wants the index of the argv item to complete but the $nix_args variable
|
||||
# also contains the program name (argv[0]) so we would need to subtract 1.
|
||||
# But the variable also misses the current token so it cancels out.
|
||||
set -l nix_arg_to_complete (count $nix_args)
|
||||
|
||||
env NIX_GET_COMPLETIONS=$nix_arg_to_complete $nix_args $current_token
|
||||
end
|
||||
|
||||
function _nix_accepts_files
|
||||
set -l response (_nix_complete)
|
||||
# First line is either filenames or no-filenames.
|
||||
test $response[1] = 'filenames'
|
||||
end
|
||||
|
||||
function _nix
|
||||
set -l response (_nix_complete)
|
||||
# Skip the first line since it handled by _nix_accepts_files.
|
||||
# Tail lines each contain a command followed by a tab character and, optionally, a description.
|
||||
# This is also the format fish expects.
|
||||
string collect -- $response[2..-1]
|
||||
end
|
||||
|
||||
# Disable file path completion if paths do not belong in the current context.
|
||||
complete --command nix --condition 'not _nix_accepts_files' --no-files
|
||||
|
||||
complete --command nix --arguments '(_nix)'
|
1
misc/fish/local.mk
Normal file
1
misc/fish/local.mk
Normal file
|
@ -0,0 +1 @@
|
|||
$(eval $(call install-file-as, $(d)/completion.fish, $(datarootdir)/fish/vendor_completions.d/nix.fish, 0644))
|
|
@ -1,4 +1,4 @@
|
|||
ifeq ($(OS), Darwin)
|
||||
ifdef HOST_DARWIN
|
||||
|
||||
$(eval $(call install-data-in, $(d)/org.nixos.nix-daemon.plist, $(prefix)/Library/LaunchDaemons))
|
||||
|
||||
|
|
|
@ -19,7 +19,7 @@
|
|||
<array>
|
||||
<string>/bin/sh</string>
|
||||
<string>-c</string>
|
||||
<string>/bin/wait4path /nix/var/nix/profiles/default/bin/nix-daemon && /nix/var/nix/profiles/default/bin/nix-daemon</string>
|
||||
<string>/bin/wait4path /nix/var/nix/profiles/default/bin/nix-daemon && exec /nix/var/nix/profiles/default/bin/nix-daemon</string>
|
||||
</array>
|
||||
<key>StandardErrorPath</key>
|
||||
<string>/var/log/nix-daemon.log</string>
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
ifeq ($(OS), Linux)
|
||||
ifdef HOST_LINUX
|
||||
|
||||
$(foreach n, nix-daemon.socket nix-daemon.service, $(eval $(call install-file-in, $(d)/$(n), $(prefix)/lib/systemd/system, 0644)))
|
||||
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
ifeq ($(OS), Linux)
|
||||
ifdef HOST_LINUX
|
||||
|
||||
$(foreach n, nix-daemon.conf, $(eval $(call install-file-in, $(d)/$(n), $(sysconfdir)/init, 0644)))
|
||||
|
||||
|
|
|
@ -1,3 +1,5 @@
|
|||
#compdef nix
|
||||
|
||||
function _nix() {
|
||||
local ifs_bk="$IFS"
|
||||
local input=("${(Q)words[@]}")
|
||||
|
@ -18,4 +20,4 @@ function _nix() {
|
|||
_describe 'nix' suggestions
|
||||
}
|
||||
|
||||
compdef _nix nix
|
||||
_nix "$@"
|
||||
|
|
1
misc/zsh/local.mk
Normal file
1
misc/zsh/local.mk
Normal file
|
@ -0,0 +1 @@
|
|||
$(eval $(call install-file-as, $(d)/completion.zsh, $(datarootdir)/zsh/site-functions/_nix, 0644))
|
27
mk/lib.mk
27
mk/lib.mk
|
@ -10,8 +10,25 @@ bin-scripts :=
|
|||
noinst-scripts :=
|
||||
man-pages :=
|
||||
install-tests :=
|
||||
OS = $(shell uname -s)
|
||||
|
||||
ifdef HOST_OS
|
||||
HOST_KERNEL = $(firstword $(subst -, ,$(HOST_OS)))
|
||||
ifeq ($(HOST_KERNEL), cygwin)
|
||||
HOST_CYGWIN = 1
|
||||
endif
|
||||
ifeq ($(patsubst darwin%,,$(HOST_KERNEL)),)
|
||||
HOST_DARWIN = 1
|
||||
endif
|
||||
ifeq ($(patsubst freebsd%,,$(HOST_KERNEL)),)
|
||||
HOST_FREEBSD = 1
|
||||
endif
|
||||
ifeq ($(HOST_KERNEL), linux)
|
||||
HOST_LINUX = 1
|
||||
endif
|
||||
ifeq ($(patsubst solaris%,,$(HOST_KERNEL)),)
|
||||
HOST_SOLARIS = 1
|
||||
endif
|
||||
endif
|
||||
|
||||
# Hack to define a literal space.
|
||||
space :=
|
||||
|
@ -50,16 +67,16 @@ endif
|
|||
BUILD_SHARED_LIBS ?= 1
|
||||
|
||||
ifeq ($(BUILD_SHARED_LIBS), 1)
|
||||
ifeq (CYGWIN,$(findstring CYGWIN,$(OS)))
|
||||
ifdef HOST_CYGWIN
|
||||
GLOBAL_CFLAGS += -U__STRICT_ANSI__ -D_GNU_SOURCE
|
||||
GLOBAL_CXXFLAGS += -U__STRICT_ANSI__ -D_GNU_SOURCE
|
||||
else
|
||||
GLOBAL_CFLAGS += -fPIC
|
||||
GLOBAL_CXXFLAGS += -fPIC
|
||||
endif
|
||||
ifneq ($(OS), Darwin)
|
||||
ifneq ($(OS), SunOS)
|
||||
ifneq ($(OS), FreeBSD)
|
||||
ifndef HOST_DARWIN
|
||||
ifndef HOST_SOLARIS
|
||||
ifndef HOST_FREEBSD
|
||||
GLOBAL_LDFLAGS += -Wl,--no-copy-dt-needed-entries
|
||||
endif
|
||||
endif
|
||||
|
|
|
@ -1,9 +1,9 @@
|
|||
libs-list :=
|
||||
|
||||
ifeq ($(OS), Darwin)
|
||||
ifdef HOST_DARWIN
|
||||
SO_EXT = dylib
|
||||
else
|
||||
ifeq (CYGWIN,$(findstring CYGWIN,$(OS)))
|
||||
ifdef HOST_CYGWIN
|
||||
SO_EXT = dll
|
||||
else
|
||||
SO_EXT = so
|
||||
|
@ -59,7 +59,7 @@ define build-library
|
|||
$(1)_OBJS := $$(addprefix $(buildprefix), $$(addsuffix .o, $$(basename $$(_srcs))))
|
||||
_libs := $$(foreach lib, $$($(1)_LIBS), $$($$(lib)_PATH))
|
||||
|
||||
ifeq (CYGWIN,$(findstring CYGWIN,$(OS)))
|
||||
ifdef HOST_CYGWIN
|
||||
$(1)_INSTALL_DIR ?= $$(bindir)
|
||||
else
|
||||
$(1)_INSTALL_DIR ?= $$(libdir)
|
||||
|
@ -73,18 +73,18 @@ define build-library
|
|||
ifeq ($(BUILD_SHARED_LIBS), 1)
|
||||
|
||||
ifdef $(1)_ALLOW_UNDEFINED
|
||||
ifeq ($(OS), Darwin)
|
||||
ifdef HOST_DARWIN
|
||||
$(1)_LDFLAGS += -undefined suppress -flat_namespace
|
||||
endif
|
||||
else
|
||||
ifneq ($(OS), Darwin)
|
||||
ifneq (CYGWIN,$(findstring CYGWIN,$(OS)))
|
||||
ifndef HOST_DARWIN
|
||||
ifndef HOST_CYGWIN
|
||||
$(1)_LDFLAGS += -Wl,-z,defs
|
||||
endif
|
||||
endif
|
||||
endif
|
||||
|
||||
ifneq ($(OS), Darwin)
|
||||
ifndef HOST_DARWIN
|
||||
$(1)_LDFLAGS += -Wl,-soname=$$($(1)_NAME).$(SO_EXT)
|
||||
endif
|
||||
|
||||
|
@ -93,7 +93,7 @@ define build-library
|
|||
$$($(1)_PATH): $$($(1)_OBJS) $$(_libs) | $$(_d)/
|
||||
$$(trace-ld) $(CXX) -o $$(abspath $$@) -shared $$(LDFLAGS) $$(GLOBAL_LDFLAGS) $$($(1)_OBJS) $$($(1)_LDFLAGS) $$($(1)_LDFLAGS_PROPAGATED) $$(foreach lib, $$($(1)_LIBS), $$($$(lib)_LDFLAGS_USE)) $$($(1)_LDFLAGS_UNINSTALLED)
|
||||
|
||||
ifneq ($(OS), Darwin)
|
||||
ifndef HOST_DARWIN
|
||||
$(1)_LDFLAGS_USE += -Wl,-rpath,$$(abspath $$(_d))
|
||||
endif
|
||||
$(1)_LDFLAGS_USE += -L$$(_d) -l$$(patsubst lib%,%,$$(strip $$($(1)_NAME)))
|
||||
|
@ -108,7 +108,7 @@ define build-library
|
|||
$$(trace-ld) $(CXX) -o $$@ -shared $$(LDFLAGS) $$(GLOBAL_LDFLAGS) $$($(1)_OBJS) $$($(1)_LDFLAGS) $$($(1)_LDFLAGS_PROPAGATED) $$(foreach lib, $$($(1)_LIBS), $$($$(lib)_LDFLAGS_USE_INSTALLED))
|
||||
|
||||
$(1)_LDFLAGS_USE_INSTALLED += -L$$(DESTDIR)$$($(1)_INSTALL_DIR) -l$$(patsubst lib%,%,$$(strip $$($(1)_NAME)))
|
||||
ifneq ($(OS), Darwin)
|
||||
ifndef HOST_DARWIN
|
||||
ifeq ($(SET_RPATH_TO_LIBS), 1)
|
||||
$(1)_LDFLAGS_USE_INSTALLED += -Wl,-rpath,$$($(1)_INSTALL_DIR)
|
||||
else
|
||||
|
@ -125,8 +125,8 @@ define build-library
|
|||
$(1)_PATH := $$(_d)/$$($(1)_NAME).a
|
||||
|
||||
$$($(1)_PATH): $$($(1)_OBJS) | $$(_d)/
|
||||
$(trace-ld) $(LD) -Ur -o $$(_d)/$$($(1)_NAME).o $$?
|
||||
$(trace-ar) $(AR) crs $$@ $$(_d)/$$($(1)_NAME).o
|
||||
$$(trace-ld) $(LD) -Ur -o $$(_d)/$$($(1)_NAME).o $$?
|
||||
$$(trace-ar) $(AR) crs $$@ $$(_d)/$$($(1)_NAME).o
|
||||
|
||||
$(1)_LDFLAGS_USE += $$($(1)_PATH) $$($(1)_LDFLAGS)
|
||||
|
||||
|
|
|
@ -13,3 +13,7 @@ define run-install-test
|
|||
endef
|
||||
|
||||
.PHONY: check installcheck
|
||||
|
||||
print-top-help += \
|
||||
echo " check: Run unit tests"; \
|
||||
echo " installcheck: Run functional tests";
|
||||
|
|
|
@ -8,10 +8,15 @@ endif
|
|||
|
||||
libnixrust_PATH := $(d)/target/$(RUST_DIR)/libnixrust.$(SO_EXT)
|
||||
libnixrust_INSTALL_PATH := $(libdir)/libnixrust.$(SO_EXT)
|
||||
libnixrust_LDFLAGS_USE := -L$(d)/target/$(RUST_DIR) -lnixrust -ldl
|
||||
libnixrust_LDFLAGS_USE_INSTALLED := -L$(libdir) -lnixrust -ldl
|
||||
libnixrust_LDFLAGS_USE := -L$(d)/target/$(RUST_DIR) -lnixrust
|
||||
libnixrust_LDFLAGS_USE_INSTALLED := -L$(libdir) -lnixrust
|
||||
|
||||
ifeq ($(OS), Darwin)
|
||||
ifdef HOST_LINUX
|
||||
libnixrust_LDFLAGS_USE += -ldl
|
||||
libnixrust_LDFLAGS_USE_INSTALLED += -ldl
|
||||
endif
|
||||
|
||||
ifdef HOST_DARWIN
|
||||
libnixrust_BUILD_FLAGS = NIX_LDFLAGS="-undefined dynamic_lookup"
|
||||
else
|
||||
libnixrust_LDFLAGS_USE += -Wl,-rpath,$(abspath $(d)/target/$(RUST_DIR))
|
||||
|
@ -26,7 +31,7 @@ $(libnixrust_PATH): $(call rwildcard, $(d)/src, *.rs) $(d)/Cargo.toml
|
|||
|
||||
$(libnixrust_INSTALL_PATH): $(libnixrust_PATH)
|
||||
$(target-gen) cp $^ $@
|
||||
ifeq ($(OS), Darwin)
|
||||
ifdef HOST_DARWIN
|
||||
install_name_tool -id $@ $@
|
||||
endif
|
||||
|
||||
|
@ -35,7 +40,7 @@ clean: clean-rust
|
|||
clean-rust:
|
||||
$(suppress) rm -rfv nix-rust/target
|
||||
|
||||
ifneq ($(OS), Darwin)
|
||||
ifndef HOST_DARWIN
|
||||
check: rust-tests
|
||||
|
||||
rust-tests:
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
makefiles = local.mk
|
||||
|
||||
GLOBAL_CXXFLAGS += -g -Wall -std=c++17
|
||||
GLOBAL_CXXFLAGS += -g -Wall -std=c++17 -I ../src
|
||||
|
||||
-include Makefile.config
|
||||
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
HOST_OS = @host_os@
|
||||
CC = @CC@
|
||||
CFLAGS = @CFLAGS@
|
||||
CXX = @CXX@
|
||||
|
|
|
@ -7,6 +7,8 @@ CXXFLAGS=
|
|||
AC_PROG_CC
|
||||
AC_PROG_CXX
|
||||
|
||||
AC_CANONICAL_HOST
|
||||
|
||||
# Use 64-bit file system calls so that we can support files > 2 GiB.
|
||||
AC_SYS_LARGEFILE
|
||||
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
package Nix::Config;
|
||||
|
||||
use MIME::Base64;
|
||||
use Nix::Store;
|
||||
|
||||
$version = "@PACKAGE_VERSION@";
|
||||
|
||||
|
|
|
@ -22,6 +22,7 @@ our @EXPORT = qw(
|
|||
derivationFromPath
|
||||
addTempRoot
|
||||
getBinDir getStoreDir
|
||||
queryRawRealisation
|
||||
);
|
||||
|
||||
our $VERSION = '0.15';
|
||||
|
|
|
@ -15,6 +15,7 @@
|
|||
#include "crypto.hh"
|
||||
|
||||
#include <sodium.h>
|
||||
#include <nlohmann/json.hpp>
|
||||
|
||||
|
||||
using namespace nix;
|
||||
|
@ -120,6 +121,18 @@ SV * queryPathInfo(char * path, int base32)
|
|||
croak("%s", e.what());
|
||||
}
|
||||
|
||||
SV * queryRawRealisation(char * outputId)
|
||||
PPCODE:
|
||||
try {
|
||||
auto realisation = store()->queryRealisation(DrvOutput::parse(outputId));
|
||||
if (realisation)
|
||||
XPUSHs(sv_2mortal(newSVpv(realisation->toJSON().dump().c_str(), 0)));
|
||||
else
|
||||
XPUSHs(sv_2mortal(newSVpv("", 0)));
|
||||
} catch (Error & e) {
|
||||
croak("%s", e.what());
|
||||
}
|
||||
|
||||
|
||||
SV * queryPathFromHashPart(char * hashPart)
|
||||
PPCODE:
|
||||
|
|
|
@ -28,7 +28,7 @@ Store_CXXFLAGS = \
|
|||
|
||||
Store_LDFLAGS := $(SODIUM_LIBS) $(NIX_LIBS)
|
||||
|
||||
ifeq (CYGWIN,$(findstring CYGWIN,$(OS)))
|
||||
ifdef HOST_CYGWIN
|
||||
archlib = $(shell perl -E 'use Config; print $$Config{archlib};')
|
||||
libperl = $(shell perl -E 'use Config; print $$Config{libperl};')
|
||||
Store_LDFLAGS += $(shell find ${archlib} -name ${libperl})
|
||||
|
|
|
@ -1,33 +1,262 @@
|
|||
#!/bin/sh
|
||||
set -e
|
||||
#!/usr/bin/env bash
|
||||
set -eu
|
||||
set -o pipefail
|
||||
|
||||
root_disk() {
|
||||
diskutil info -plist /
|
||||
}
|
||||
# I'm a little agnostic on the choices, but supporting a wide
|
||||
# slate of uses for now, including:
|
||||
# - import-only: `. create-darwin-volume.sh no-main[ ...]`
|
||||
# - legacy: `./create-darwin-volume.sh` or `. create-darwin-volume.sh`
|
||||
# (both will run main())
|
||||
# - external alt-routine: `./create-darwin-volume.sh no-main func[ ...]`
|
||||
if [ "${1-}" = "no-main" ]; then
|
||||
shift
|
||||
readonly _CREATE_VOLUME_NO_MAIN=1
|
||||
else
|
||||
readonly _CREATE_VOLUME_NO_MAIN=0
|
||||
# declare some things we expect to inherit from install-multi-user
|
||||
# I don't love this (because it's a bit of a kludge).
|
||||
#
|
||||
# CAUTION: (Dec 19 2020)
|
||||
# This is a stopgap. It doesn't cover the full slate of
|
||||
# identifiers we inherit--just those necessary to:
|
||||
# - avoid breaking direct invocations of this script (here/now)
|
||||
# - avoid hard-to-reverse structural changes before the call to rm
|
||||
# single-user support is verified
|
||||
#
|
||||
# In the near-mid term, I (personally) think we should:
|
||||
# - decide to deprecate the direct call and add a notice
|
||||
# - fold all of this into install-darwin-multi-user.sh
|
||||
# - intentionally remove the old direct-invocation form (kill the
|
||||
# routine, replace this script w/ deprecation notice and a note
|
||||
# on the remove-after date)
|
||||
#
|
||||
readonly NIX_ROOT="${NIX_ROOT:-/nix}"
|
||||
|
||||
# i.e., "disk1"
|
||||
_sudo() {
|
||||
shift # throw away the 'explanation'
|
||||
/usr/bin/sudo "$@"
|
||||
}
|
||||
failure() {
|
||||
if [ "$*" = "" ]; then
|
||||
cat
|
||||
else
|
||||
echo "$@"
|
||||
fi
|
||||
exit 1
|
||||
}
|
||||
task() {
|
||||
echo "$@"
|
||||
}
|
||||
fi
|
||||
|
||||
# usually "disk1"
|
||||
root_disk_identifier() {
|
||||
diskutil info -plist / | xmllint --xpath "/plist/dict/key[text()='ParentWholeDisk']/following-sibling::string[1]/text()" -
|
||||
# For performance (~10ms vs 280ms) I'm parsing 'diskX' from stat output
|
||||
# (~diskXsY)--but I'm retaining the more-semantic approach since
|
||||
# it documents intent better.
|
||||
# /usr/sbin/diskutil info -plist / | xmllint --xpath "/plist/dict/key[text()='ParentWholeDisk']/following-sibling::string[1]/text()" -
|
||||
#
|
||||
local special_device
|
||||
special_device="$(/usr/bin/stat -f "%Sd" /)"
|
||||
echo "${special_device%s[0-9]*}"
|
||||
}
|
||||
|
||||
find_nix_volume() {
|
||||
diskutil apfs list -plist "$1" | xmllint --xpath "(/plist/dict/array/dict/key[text()='Volumes']/following-sibling::array/dict/key[text()='Name']/following-sibling::string[starts-with(translate(text(),'N','n'),'nix')]/text())[1]" - 2>/dev/null || true
|
||||
# make it easy to play w/ 'Case-sensitive APFS'
|
||||
readonly NIX_VOLUME_FS="${NIX_VOLUME_FS:-APFS}"
|
||||
readonly NIX_VOLUME_LABEL="${NIX_VOLUME_LABEL:-Nix Store}"
|
||||
# Strongly assuming we'll make a volume on the device / is on
|
||||
# But you can override NIX_VOLUME_USE_DISK to create it on some other device
|
||||
readonly NIX_VOLUME_USE_DISK="${NIX_VOLUME_USE_DISK:-$(root_disk_identifier)}"
|
||||
NIX_VOLUME_USE_SPECIAL="${NIX_VOLUME_USE_SPECIAL:-}"
|
||||
NIX_VOLUME_USE_UUID="${NIX_VOLUME_USE_UUID:-}"
|
||||
readonly NIX_VOLUME_MOUNTD_DEST="${NIX_VOLUME_MOUNTD_DEST:-/Library/LaunchDaemons/org.nixos.darwin-store.plist}"
|
||||
|
||||
if /usr/bin/fdesetup isactive >/dev/null; then
|
||||
test_filevault_in_use() { return 0; }
|
||||
# no readonly; we may modify if user refuses from cure_volume
|
||||
NIX_VOLUME_DO_ENCRYPT="${NIX_VOLUME_DO_ENCRYPT:-1}"
|
||||
else
|
||||
test_filevault_in_use() { return 1; }
|
||||
NIX_VOLUME_DO_ENCRYPT="${NIX_VOLUME_DO_ENCRYPT:-0}"
|
||||
fi
|
||||
|
||||
should_encrypt_volume() {
|
||||
test_filevault_in_use && (( NIX_VOLUME_DO_ENCRYPT == 1 ))
|
||||
}
|
||||
|
||||
substep() {
|
||||
printf " %s\n" "" "- $1" "" "${@:2}"
|
||||
}
|
||||
|
||||
|
||||
volumes_labeled() {
|
||||
local label="$1"
|
||||
xsltproc --novalid --stringparam label "$label" - <(/usr/sbin/ioreg -ra -c "AppleAPFSVolume") <<'EOF'
|
||||
<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform" version="1.0">
|
||||
<xsl:output method="text"/>
|
||||
<xsl:template match="/">
|
||||
<xsl:apply-templates select="/plist/array/dict/key[text()='IORegistryEntryName']/following-sibling::*[1][text()=$label]/.."/>
|
||||
</xsl:template>
|
||||
<xsl:template match="dict">
|
||||
<xsl:apply-templates match="string" select="key[text()='BSD Name']/following-sibling::*[1]"/>
|
||||
<xsl:text>=</xsl:text>
|
||||
<xsl:apply-templates match="string" select="key[text()='UUID']/following-sibling::*[1]"/>
|
||||
<xsl:text>
</xsl:text>
|
||||
</xsl:template>
|
||||
</xsl:stylesheet>
|
||||
EOF
|
||||
# I cut label out of the extracted values, but here it is for reference:
|
||||
# <xsl:apply-templates match="string" select="key[text()='IORegistryEntryName']/following-sibling::*[1]"/>
|
||||
# <xsl:text>=</xsl:text>
|
||||
}
|
||||
|
||||
right_disk() {
|
||||
local volume_special="$1" # (i.e., disk1s7)
|
||||
[[ "$volume_special" == "$NIX_VOLUME_USE_DISK"s* ]]
|
||||
}
|
||||
|
||||
right_volume() {
|
||||
local volume_special="$1" # (i.e., disk1s7)
|
||||
# if set, it must match; otherwise ensure it's on the right disk
|
||||
if [ -z "$NIX_VOLUME_USE_SPECIAL" ]; then
|
||||
if right_disk "$volume_special"; then
|
||||
NIX_VOLUME_USE_SPECIAL="$volume_special" # latch on
|
||||
return 0
|
||||
else
|
||||
return 1
|
||||
fi
|
||||
else
|
||||
[ "$volume_special" = "$NIX_VOLUME_USE_SPECIAL" ]
|
||||
fi
|
||||
}
|
||||
|
||||
right_uuid() {
|
||||
local volume_uuid="$1"
|
||||
# if set, it must match; otherwise allow
|
||||
if [ -z "$NIX_VOLUME_USE_UUID" ]; then
|
||||
NIX_VOLUME_USE_UUID="$volume_uuid" # latch on
|
||||
return 0
|
||||
else
|
||||
[ "$volume_uuid" = "$NIX_VOLUME_USE_UUID" ]
|
||||
fi
|
||||
}
|
||||
|
||||
cure_volumes() {
|
||||
local found volume special uuid
|
||||
# loop just in case they have more than one volume
|
||||
# (nothing stops you from doing this)
|
||||
for volume in $(volumes_labeled "$NIX_VOLUME_LABEL"); do
|
||||
# CAUTION: this could (maybe) be a more normal read
|
||||
# loop like:
|
||||
# while IFS== read -r special uuid; do
|
||||
# # ...
|
||||
# done <<<"$(volumes_labeled "$NIX_VOLUME_LABEL")"
|
||||
#
|
||||
# I did it with for to skirt a problem with the obvious
|
||||
# pattern replacing stdin and causing user prompts
|
||||
# inside (which also use read and access stdin) to skip
|
||||
#
|
||||
# If there's an existing encrypted volume we can't find
|
||||
# in keychain, the user never gets prompted to delete
|
||||
# the volume, and the install fails.
|
||||
#
|
||||
# If you change this, a human needs to test a very
|
||||
# specific scenario: you already have an encrypted
|
||||
# Nix Store volume, and have deleted its credential
|
||||
# from keychain. Ensure the script asks you if it can
|
||||
# delete the volume, and then prompts for your sudo
|
||||
# password to confirm.
|
||||
#
|
||||
# shellcheck disable=SC1097
|
||||
IFS== read -r special uuid <<< "$volume"
|
||||
# take the first one that's on the right disk
|
||||
if [ -z "${found:-}" ]; then
|
||||
if right_volume "$special" && right_uuid "$uuid"; then
|
||||
cure_volume "$special" "$uuid"
|
||||
found="${special} (${uuid})"
|
||||
else
|
||||
warning <<EOF
|
||||
Ignoring ${special} (${uuid}) because I am looking for:
|
||||
disk=${NIX_VOLUME_USE_DISK} special=${NIX_VOLUME_USE_SPECIAL:-${NIX_VOLUME_USE_DISK}sX} uuid=${NIX_VOLUME_USE_UUID:-any}
|
||||
EOF
|
||||
# TODO: give chance to delete if ! headless?
|
||||
fi
|
||||
else
|
||||
warning <<EOF
|
||||
Ignoring ${special} (${uuid}), already found target: $found
|
||||
EOF
|
||||
# TODO reminder? I feel like I want one
|
||||
# idiom that reminds some warnings, or warns
|
||||
# some reminders?
|
||||
# TODO: if ! headless, chance to delete?
|
||||
fi
|
||||
done
|
||||
if [ -z "${found:-}" ]; then
|
||||
readonly NIX_VOLUME_USE_SPECIAL NIX_VOLUME_USE_UUID
|
||||
fi
|
||||
}
|
||||
|
||||
volume_encrypted() {
|
||||
local volume_special="$1" # (i.e., disk1s7)
|
||||
# Trying to match the first line of output; known first lines:
|
||||
# No cryptographic users for <special>
|
||||
# Cryptographic user for <special> (1 found)
|
||||
# Cryptographic users for <special> (2 found)
|
||||
/usr/sbin/diskutil apfs listCryptoUsers -plist "$volume_special" | /usr/bin/grep -q APFSCryptoUserUUID
|
||||
}
|
||||
|
||||
test_fstab() {
|
||||
grep -q "/nix apfs rw" /etc/fstab 2>/dev/null
|
||||
/usr/bin/grep -q "$NIX_ROOT apfs rw" /etc/fstab 2>/dev/null
|
||||
}
|
||||
|
||||
test_nix_symlink() {
|
||||
[ -L "/nix" ] || grep -q "^nix." /etc/synthetic.conf 2>/dev/null
|
||||
test_nix_root_is_symlink() {
|
||||
[ -L "$NIX_ROOT" ]
|
||||
}
|
||||
|
||||
test_synthetic_conf() {
|
||||
grep -q "^nix$" /etc/synthetic.conf 2>/dev/null
|
||||
test_synthetic_conf_either(){
|
||||
/usr/bin/grep -qE "^${NIX_ROOT:1}($|\t.{3,}$)" /etc/synthetic.conf 2>/dev/null
|
||||
}
|
||||
|
||||
test_synthetic_conf_mountable() {
|
||||
/usr/bin/grep -q "^${NIX_ROOT:1}$" /etc/synthetic.conf 2>/dev/null
|
||||
}
|
||||
|
||||
test_synthetic_conf_symlinked() {
|
||||
/usr/bin/grep -qE "^${NIX_ROOT:1}\t.{3,}$" /etc/synthetic.conf 2>/dev/null
|
||||
}
|
||||
|
||||
test_nix_volume_mountd_installed() {
|
||||
test -e "$NIX_VOLUME_MOUNTD_DEST"
|
||||
}
|
||||
|
||||
# current volume password
|
||||
test_keychain_by_uuid() {
|
||||
local volume_uuid="$1"
|
||||
# Note: doesn't need sudo just to check; doesn't output pw
|
||||
security find-generic-password -s "$volume_uuid" &>/dev/null
|
||||
}
|
||||
|
||||
get_volume_pass() {
|
||||
local volume_uuid="$1"
|
||||
_sudo \
|
||||
"to confirm keychain has a password that unlocks this volume" \
|
||||
security find-generic-password -s "$volume_uuid" -w
|
||||
}
|
||||
|
||||
verify_volume_pass() {
|
||||
local volume_special="$1" # (i.e., disk1s7)
|
||||
local volume_uuid="$2"
|
||||
/usr/sbin/diskutil apfs unlockVolume "$volume_special" -verify -stdinpassphrase -user "$volume_uuid"
|
||||
}
|
||||
|
||||
volume_pass_works() {
|
||||
local volume_special="$1" # (i.e., disk1s7)
|
||||
local volume_uuid="$2"
|
||||
get_volume_pass "$volume_uuid" | verify_volume_pass "$volume_special" "$volume_uuid"
|
||||
}
|
||||
|
||||
# Create the paths defined in synthetic.conf, saving us a reboot.
|
||||
create_synthetic_objects(){
|
||||
create_synthetic_objects() {
|
||||
# Big Sur takes away the -B flag we were using and replaces it
|
||||
# with a -t flag that appears to do the same thing (but they
|
||||
# don't behave exactly the same way in terms of return values).
|
||||
|
@ -41,129 +270,578 @@ create_synthetic_objects(){
|
|||
}
|
||||
|
||||
test_nix() {
|
||||
test -d "/nix"
|
||||
test -d "$NIX_ROOT"
|
||||
}
|
||||
|
||||
test_t2_chip_present(){
|
||||
# Use xartutil to see if system has a t2 chip.
|
||||
#
|
||||
# This isn't well-documented on its own; until it is,
|
||||
# let's keep track of knowledge/assumptions.
|
||||
#
|
||||
# Warnings:
|
||||
# - Don't search "xart" if porn will cause you trouble :)
|
||||
# - Other xartutil flags do dangerous things. Don't run them
|
||||
# naively. If you must, search "xartutil" first.
|
||||
#
|
||||
# Assumptions:
|
||||
# - the "xART session seeds recovery utility"
|
||||
# appears to interact with xartstorageremoted
|
||||
# - `sudo xartutil --list` lists xART sessions
|
||||
# and their seeds and exits 0 if successful. If
|
||||
# not, it exits 1 and prints an error such as:
|
||||
# xartutil: ERROR: No supported link to the SEP present
|
||||
# - xART sessions/seeds are present when a T2 chip is
|
||||
# (and not, otherwise)
|
||||
# - the presence of a T2 chip means a newly-created
|
||||
# volume on the primary drive will be
|
||||
# encrypted at rest
|
||||
# - all together: `sudo xartutil --list`
|
||||
# should exit 0 if a new Nix Store volume will
|
||||
# be encrypted at rest, and exit 1 if not.
|
||||
sudo xartutil --list >/dev/null 2>/dev/null
|
||||
test_voldaemon() {
|
||||
test -f "$NIX_VOLUME_MOUNTD_DEST"
|
||||
}
|
||||
|
||||
test_filevault_in_use() {
|
||||
fdesetup isactive >/dev/null
|
||||
generate_mount_command() {
|
||||
local cmd_type="$1" # encrypted|unencrypted
|
||||
local volume_uuid mountpoint cmd=()
|
||||
printf -v volume_uuid "%q" "$2"
|
||||
printf -v mountpoint "%q" "$NIX_ROOT"
|
||||
|
||||
case "$cmd_type" in
|
||||
encrypted)
|
||||
cmd=(/bin/sh -c "/usr/bin/security find-generic-password -s '$volume_uuid' -w | /usr/sbin/diskutil apfs unlockVolume '$volume_uuid' -mountpoint '$mountpoint' -stdinpassphrase");;
|
||||
unencrypted)
|
||||
cmd=(/usr/sbin/diskutil mount -mountPoint "$mountpoint" "$volume_uuid");;
|
||||
*)
|
||||
failure "Invalid first arg $cmd_type to generate_mount_command";;
|
||||
esac
|
||||
|
||||
printf " <string>%s</string>\n" "${cmd[@]}"
|
||||
}
|
||||
|
||||
# use after error msg for conditions we don't understand
|
||||
suggest_report_error(){
|
||||
# ex "error: something sad happened :(" >&2
|
||||
echo " please report this @ https://github.com/nixos/nix/issues" >&2
|
||||
generate_mount_daemon() {
|
||||
local cmd_type="$1" # encrypted|unencrypted
|
||||
local volume_uuid="$2"
|
||||
cat <<EOF
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<!DOCTYPE plist PUBLIC "-//Apple Computer//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
|
||||
<plist version="1.0">
|
||||
<dict>
|
||||
<key>RunAtLoad</key>
|
||||
<true/>
|
||||
<key>Label</key>
|
||||
<string>org.nixos.darwin-store</string>
|
||||
<key>ProgramArguments</key>
|
||||
<array>
|
||||
$(generate_mount_command "$cmd_type" "$volume_uuid")
|
||||
</array>
|
||||
</dict>
|
||||
</plist>
|
||||
EOF
|
||||
}
|
||||
|
||||
main() {
|
||||
(
|
||||
echo ""
|
||||
echo " ------------------------------------------------------------------ "
|
||||
echo " | This installer will create a volume for the nix store and |"
|
||||
echo " | configure it to mount at /nix. Follow these steps to uninstall. |"
|
||||
echo " ------------------------------------------------------------------ "
|
||||
echo ""
|
||||
echo " 1. Remove the entry from fstab using 'sudo vifs'"
|
||||
echo " 2. Destroy the data volume using 'diskutil apfs deleteVolume'"
|
||||
echo " 3. Remove the 'nix' line from /etc/synthetic.conf or the file"
|
||||
echo ""
|
||||
) >&2
|
||||
_eat_bootout_err() {
|
||||
/usr/bin/grep -v "Boot-out failed: 36: Operation now in progress"
|
||||
}
|
||||
|
||||
if test_nix_symlink; then
|
||||
echo "error: /nix is a symlink, please remove it and make sure it's not in synthetic.conf (in which case a reboot is required)" >&2
|
||||
echo " /nix -> $(readlink "/nix")" >&2
|
||||
exit 2
|
||||
# TODO: remove with --uninstall?
|
||||
uninstall_launch_daemon_directions() {
|
||||
local daemon_label="$1" # i.e., org.nixos.blah-blah
|
||||
local daemon_plist="$2" # abspath
|
||||
substep "Uninstall LaunchDaemon $daemon_label" \
|
||||
" sudo launchctl bootout system/$daemon_label" \
|
||||
" sudo rm $daemon_plist"
|
||||
}
|
||||
|
||||
uninstall_launch_daemon_prompt() {
|
||||
local daemon_label="$1" # i.e., org.nixos.blah-blah
|
||||
local daemon_plist="$2" # abspath
|
||||
local reason_for_daemon="$3"
|
||||
cat <<EOF
|
||||
|
||||
The installer adds a LaunchDaemon to $reason_for_daemon: $daemon_label
|
||||
EOF
|
||||
if ui_confirm "Can I remove it?"; then
|
||||
_sudo "to terminate the daemon" \
|
||||
launchctl bootout "system/$daemon_label" 2> >(_eat_bootout_err >&2) || true
|
||||
# this can "fail" with a message like:
|
||||
# Boot-out failed: 36: Operation now in progress
|
||||
_sudo "to remove the daemon definition" rm "$daemon_plist"
|
||||
fi
|
||||
}
|
||||
|
||||
nix_volume_mountd_uninstall_directions() {
|
||||
uninstall_launch_daemon_directions "org.nixos.darwin-store" \
|
||||
"$NIX_VOLUME_MOUNTD_DEST"
|
||||
}
|
||||
|
||||
nix_volume_mountd_uninstall_prompt() {
|
||||
uninstall_launch_daemon_prompt "org.nixos.darwin-store" \
|
||||
"$NIX_VOLUME_MOUNTD_DEST" \
|
||||
"mount your Nix volume"
|
||||
}
|
||||
|
||||
# TODO: move nix_daemon to install-darwin-multi-user if/when uninstall_launch_daemon_prompt moves up to install-multi-user
|
||||
nix_daemon_uninstall_prompt() {
|
||||
uninstall_launch_daemon_prompt "org.nixos.nix-daemon" \
|
||||
"$NIX_DAEMON_DEST" \
|
||||
"run the nix-daemon"
|
||||
}
|
||||
|
||||
# TODO: remove with --uninstall?
|
||||
nix_daemon_uninstall_directions() {
|
||||
uninstall_launch_daemon_directions "org.nixos.nix-daemon" \
|
||||
"$NIX_DAEMON_DEST"
|
||||
}
|
||||
|
||||
|
||||
# TODO: remove with --uninstall?
|
||||
synthetic_conf_uninstall_directions() {
|
||||
# :1 to strip leading slash
|
||||
substep "Remove ${NIX_ROOT:1} from /etc/synthetic.conf" \
|
||||
" If nix is the only entry: sudo rm /etc/synthetic.conf" \
|
||||
" Otherwise: sudo /usr/bin/sed -i '' -e '/^${NIX_ROOT:1}$/d' /etc/synthetic.conf"
|
||||
}
|
||||
|
||||
synthetic_conf_uninstall_prompt() {
|
||||
cat <<EOF
|
||||
|
||||
During install, I add '${NIX_ROOT:1}' to /etc/synthetic.conf, which instructs
|
||||
macOS to create an empty root directory for mounting the Nix volume.
|
||||
EOF
|
||||
# make the edit to a copy
|
||||
/usr/bin/grep -vE "^${NIX_ROOT:1}($|\t.{3,}$)" /etc/synthetic.conf > "$SCRATCH/synthetic.conf.edit"
|
||||
|
||||
if test_synthetic_conf_symlinked; then
|
||||
warning <<EOF
|
||||
|
||||
/etc/synthetic.conf already contains a line instructing your system
|
||||
to make '${NIX_ROOT}' as a symlink:
|
||||
$(/usr/bin/grep -nE "^${NIX_ROOT:1}\t.{3,}$" /etc/synthetic.conf)
|
||||
|
||||
This may mean your system has/had a non-standard Nix install.
|
||||
|
||||
The volume-creation process in this installer is *not* compatible
|
||||
with a symlinked store, so I'll have to remove this instruction to
|
||||
continue.
|
||||
|
||||
If you want/need to keep this instruction, answer 'n' to abort.
|
||||
|
||||
EOF
|
||||
fi
|
||||
|
||||
if ! test_synthetic_conf; then
|
||||
echo "Configuring /etc/synthetic.conf..." >&2
|
||||
echo nix | sudo tee -a /etc/synthetic.conf
|
||||
if ! test_synthetic_conf; then
|
||||
echo "error: failed to configure synthetic.conf;" >&2
|
||||
suggest_report_error
|
||||
exit 1
|
||||
# ask to rm if this left the file empty aside from comments, else edit
|
||||
if /usr/bin/diff -q <(:) <(/usr/bin/grep -v "^#" "$SCRATCH/synthetic.conf.edit") &>/dev/null; then
|
||||
if confirm_rm "/etc/synthetic.conf"; then
|
||||
if test_nix_root_is_symlink; then
|
||||
failure >&2 <<EOF
|
||||
I removed /etc/synthetic.conf, but $NIX_ROOT is already a symlink
|
||||
(-> $(readlink "$NIX_ROOT")). The system should remove it when you reboot.
|
||||
Once you've rebooted, run the installer again.
|
||||
EOF
|
||||
fi
|
||||
return 0
|
||||
fi
|
||||
else
|
||||
if confirm_edit "$SCRATCH/synthetic.conf.edit" "/etc/synthetic.conf"; then
|
||||
if test_nix_root_is_symlink; then
|
||||
failure >&2 <<EOF
|
||||
I edited Nix out of /etc/synthetic.conf, but $NIX_ROOT is already a symlink
|
||||
(-> $(readlink "$NIX_ROOT")). The system should remove it when you reboot.
|
||||
Once you've rebooted, run the installer again.
|
||||
EOF
|
||||
fi
|
||||
return 0
|
||||
fi
|
||||
fi
|
||||
# fallback instructions
|
||||
echo "Manually remove nix from /etc/synthetic.conf"
|
||||
return 1
|
||||
}
|
||||
|
||||
if ! test_nix; then
|
||||
echo "Creating mountpoint for /nix..." >&2
|
||||
create_synthetic_objects # the ones we defined in synthetic.conf
|
||||
if ! test_nix; then
|
||||
sudo mkdir -p /nix 2>/dev/null || true
|
||||
add_nix_vol_fstab_line() {
|
||||
local uuid="$1"
|
||||
# shellcheck disable=SC1003,SC2026
|
||||
local escaped_mountpoint="${NIX_ROOT/ /'\\\'040}"
|
||||
shift
|
||||
EDITOR="/usr/bin/ex" _sudo "to add nix to fstab" "$@" <<EOF
|
||||
:a
|
||||
UUID=$uuid $escaped_mountpoint apfs rw,noauto,nobrowse,suid,owners
|
||||
.
|
||||
:x
|
||||
EOF
|
||||
# TODO: preserving my notes on suid,owners above until resolved
|
||||
# There *may* be some issue regarding volume ownership, see nix#3156
|
||||
#
|
||||
# It seems like the cheapest fix is adding "suid,owners" to fstab, but:
|
||||
# - We don't have much info on this condition yet
|
||||
# - I'm not certain if these cause other problems?
|
||||
# - There's a "chown" component some people claim to need to fix this
|
||||
# that I don't understand yet
|
||||
# (Note however that I've had to add a chown step to handle
|
||||
# single->multi-user reinstalls, which may cover this)
|
||||
#
|
||||
# I'm not sure if it's safe to approach this way?
|
||||
#
|
||||
# I think I think the most-proper way to test for it is:
|
||||
# diskutil info -plist "$NIX_VOLUME_LABEL" | xmllint --xpath "(/plist/dict/key[text()='GlobalPermissionsEnabled'])/following-sibling::*[1][name()='true']" -; echo $?
|
||||
#
|
||||
# There's also `sudo /usr/sbin/vsdbutil -c /path` (which is much faster, but is also
|
||||
# deprecated and needs minor parsing).
|
||||
#
|
||||
# If no one finds a problem with doing so, I think the simplest approach
|
||||
# is to just eagerly set this. I found a few imperative approaches:
|
||||
# (diskutil enableOwnership, ~100ms), a cheap one (/usr/sbin/vsdbutil -a, ~40-50ms),
|
||||
# a very cheap one (append the internal format to /var/db/volinfo.database).
|
||||
#
|
||||
# But vsdbutil's deprecation notice suggests using fstab, so I want to
|
||||
# give that a whirl first.
|
||||
#
|
||||
# TODO: when this is workable, poke infinisil about reproducing the issue
|
||||
# and confirming this fix?
|
||||
}
|
||||
|
||||
delete_nix_vol_fstab_line() {
|
||||
# TODO: I'm scaffolding this to handle the new nix volumes
|
||||
# but it might be nice to generalize a smidge further to
|
||||
# go ahead and set up a pattern for curing "old" things
|
||||
# we no longer do?
|
||||
EDITOR="/usr/bin/patch" _sudo "to cut nix from fstab" "$@" < <(/usr/bin/diff /etc/fstab <(/usr/bin/grep -v "$NIX_ROOT apfs rw" /etc/fstab))
|
||||
# leaving some parts out of the grep; people may fiddle this a little?
|
||||
}
|
||||
|
||||
# TODO: hope to remove with --uninstall
|
||||
fstab_uninstall_directions() {
|
||||
substep "Remove ${NIX_ROOT} from /etc/fstab" \
|
||||
" If nix is the only entry: sudo rm /etc/fstab" \
|
||||
" Otherwise, run 'sudo /usr/sbin/vifs' to remove the nix line"
|
||||
}
|
||||
|
||||
fstab_uninstall_prompt() {
|
||||
cat <<EOF
|
||||
During install, I add '${NIX_ROOT}' to /etc/fstab so that macOS knows what
|
||||
mount options to use for the Nix volume.
|
||||
EOF
|
||||
cp /etc/fstab "$SCRATCH/fstab.edit"
|
||||
# technically doesn't need the _sudo path, but throwing away the
|
||||
# output is probably better than mostly-duplicating the code...
|
||||
delete_nix_vol_fstab_line patch "$SCRATCH/fstab.edit" &>/dev/null
|
||||
|
||||
# if the patch test edit, minus comment lines, is equal to empty (:)
|
||||
if /usr/bin/diff -q <(:) <(/usr/bin/grep -v "^#" "$SCRATCH/fstab.edit") &>/dev/null; then
|
||||
# this edit would leave it empty; propose deleting it
|
||||
if confirm_rm "/etc/fstab"; then
|
||||
return 0
|
||||
else
|
||||
echo "Remove nix from /etc/fstab (or remove the file)"
|
||||
fi
|
||||
if ! test_nix; then
|
||||
echo "error: failed to bootstrap /nix; if a reboot doesn't help," >&2
|
||||
suggest_report_error
|
||||
exit 1
|
||||
else
|
||||
echo "I might be able to help you make this edit. Here's the diff:"
|
||||
if ! _diff "/etc/fstab" "$SCRATCH/fstab.edit" && ui_confirm "Does the change above look right?"; then
|
||||
delete_nix_vol_fstab_line /usr/sbin/vifs
|
||||
else
|
||||
echo "Remove nix from /etc/fstab (or remove the file)"
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
disk="$(root_disk_identifier)"
|
||||
volume=$(find_nix_volume "$disk")
|
||||
if [ -z "$volume" ]; then
|
||||
echo "Creating a Nix Store volume..." >&2
|
||||
remove_volume() {
|
||||
local volume_special="$1" # (i.e., disk1s7)
|
||||
_sudo "to unmount the Nix volume" \
|
||||
/usr/sbin/diskutil unmount force "$volume_special" || true # might not be mounted
|
||||
_sudo "to delete the Nix volume" \
|
||||
/usr/sbin/diskutil apfs deleteVolume "$volume_special"
|
||||
}
|
||||
|
||||
if test_filevault_in_use; then
|
||||
# TODO: Not sure if it's in-scope now, but `diskutil apfs list`
|
||||
# shows both filevault and encrypted at rest status, and it
|
||||
# may be the more semantic way to test for this? It'll show
|
||||
# `FileVault: No (Encrypted at rest)`
|
||||
# `FileVault: No`
|
||||
# `FileVault: Yes (Unlocked)`
|
||||
# and so on.
|
||||
if test_t2_chip_present; then
|
||||
echo "warning: boot volume is FileVault-encrypted, but the Nix store volume" >&2
|
||||
echo " is only encrypted at rest." >&2
|
||||
echo " See https://nixos.org/nix/manual/#sect-macos-installation" >&2
|
||||
# aspiration: robust enough to both fix problems
|
||||
# *and* update older darwin volumes
|
||||
cure_volume() {
|
||||
local volume_special="$1" # (i.e., disk1s7)
|
||||
local volume_uuid="$2"
|
||||
header "Found existing Nix volume"
|
||||
row " special" "$volume_special"
|
||||
row " uuid" "$volume_uuid"
|
||||
|
||||
if volume_encrypted "$volume_special"; then
|
||||
row "encrypted" "yes"
|
||||
if volume_pass_works "$volume_special" "$volume_uuid"; then
|
||||
NIX_VOLUME_DO_ENCRYPT=0
|
||||
ok "Found a working decryption password in keychain :)"
|
||||
echo ""
|
||||
else
|
||||
# - this is a volume we made, and
|
||||
# - the user encrypted it on their own
|
||||
# - something deleted the credential
|
||||
# - this is an old or BYO volume and the pw
|
||||
# just isn't somewhere we can find it.
|
||||
#
|
||||
# We're going to explain why we're freaking out
|
||||
# and prompt them to either delete the volume
|
||||
# (requiring a sudo auth), or abort to fix
|
||||
warning <<EOF
|
||||
|
||||
This volume is encrypted, but I don't see a password to decrypt it.
|
||||
The quick fix is to let me delete this volume and make you a new one.
|
||||
If that's okay, enter your (sudo) password to continue. If not, you
|
||||
can ensure the decryption password is in your system keychain with a
|
||||
"Where" (service) field set to this volume's UUID:
|
||||
$volume_uuid
|
||||
EOF
|
||||
if password_confirm "delete this volume"; then
|
||||
remove_volume "$volume_special"
|
||||
else
|
||||
echo "error: refusing to create Nix store volume because the boot volume is" >&2
|
||||
echo " FileVault encrypted, but encryption-at-rest is not available." >&2
|
||||
echo " Manually create a volume for the store and re-run this script." >&2
|
||||
echo " See https://nixos.org/nix/manual/#sect-macos-installation" >&2
|
||||
exit 1
|
||||
# TODO: this is a good design case for a warn-and
|
||||
# remind idiom...
|
||||
failure <<EOF
|
||||
Your Nix volume is encrypted, but I couldn't find its password. Either:
|
||||
- Delete or rename the volume out of the way
|
||||
- Ensure its decryption password is in the system keychain with a
|
||||
"Where" (service) field set to this volume's UUID:
|
||||
$volume_uuid
|
||||
EOF
|
||||
fi
|
||||
fi
|
||||
elif test_filevault_in_use; then
|
||||
row "encrypted" "no"
|
||||
warning <<EOF
|
||||
FileVault is on, but your $NIX_VOLUME_LABEL volume isn't encrypted.
|
||||
EOF
|
||||
# if we're interactive, give them a chance to
|
||||
# encrypt the volume. If not, /shrug
|
||||
if ! headless && (( NIX_VOLUME_DO_ENCRYPT == 1 )); then
|
||||
if ui_confirm "Should I encrypt it and add the decryption key to your keychain?"; then
|
||||
encrypt_volume "$volume_uuid" "$NIX_VOLUME_LABEL"
|
||||
NIX_VOLUME_DO_ENCRYPT=0
|
||||
else
|
||||
NIX_VOLUME_DO_ENCRYPT=0
|
||||
reminder "FileVault is on, but your $NIX_VOLUME_LABEL volume isn't encrypted."
|
||||
fi
|
||||
fi
|
||||
|
||||
sudo diskutil apfs addVolume "$disk" APFS 'Nix Store' -mountpoint /nix
|
||||
volume="Nix Store"
|
||||
else
|
||||
echo "Using existing '$volume' volume" >&2
|
||||
fi
|
||||
|
||||
if ! test_fstab; then
|
||||
echo "Configuring /etc/fstab..." >&2
|
||||
label=$(echo "$volume" | sed 's/ /\\040/g')
|
||||
# shellcheck disable=SC2209
|
||||
printf "\$a\nLABEL=%s /nix apfs rw,nobrowse\n.\nwq\n" "$label" | EDITOR=ed sudo vifs
|
||||
row "encrypted" "no"
|
||||
fi
|
||||
}
|
||||
|
||||
main "$@"
|
||||
remove_volume_artifacts() {
|
||||
if test_synthetic_conf_either; then
|
||||
# NIX_ROOT is in synthetic.conf
|
||||
if synthetic_conf_uninstall_prompt; then
|
||||
# TODO: moot until we tackle uninstall, but when we're
|
||||
# actually uninstalling, we should issue:
|
||||
# reminder "macOS will clean up the empty mount-point directory at $NIX_ROOT on reboot."
|
||||
:
|
||||
fi
|
||||
fi
|
||||
if test_fstab; then
|
||||
fstab_uninstall_prompt
|
||||
fi
|
||||
|
||||
if test_nix_volume_mountd_installed; then
|
||||
nix_volume_mountd_uninstall_prompt
|
||||
fi
|
||||
}
|
||||
|
||||
setup_synthetic_conf() {
|
||||
if test_nix_root_is_symlink; then
|
||||
if ! test_synthetic_conf_symlinked; then
|
||||
failure >&2 <<EOF
|
||||
error: $NIX_ROOT is a symlink (-> $(readlink "$NIX_ROOT")).
|
||||
Please remove it. If nix is in /etc/synthetic.conf, remove it and reboot.
|
||||
EOF
|
||||
fi
|
||||
fi
|
||||
if ! test_synthetic_conf_mountable; then
|
||||
task "Configuring /etc/synthetic.conf to make a mount-point at $NIX_ROOT" >&2
|
||||
# technically /etc/synthetic.d/nix is supported in Big Sur+
|
||||
# but handling both takes even more code...
|
||||
_sudo "to add Nix to /etc/synthetic.conf" \
|
||||
/usr/bin/ex /etc/synthetic.conf <<EOF
|
||||
:a
|
||||
${NIX_ROOT:1}
|
||||
.
|
||||
:x
|
||||
EOF
|
||||
if ! test_synthetic_conf_mountable; then
|
||||
failure "error: failed to configure synthetic.conf" >&2
|
||||
fi
|
||||
create_synthetic_objects
|
||||
if ! test_nix; then
|
||||
failure >&2 <<EOF
|
||||
error: failed to bootstrap $NIX_ROOT
|
||||
If you enabled FileVault after booting, this is likely a known issue
|
||||
with macOS that you'll have to reboot to fix. If you didn't enable FV,
|
||||
though, please open an issue describing how the system that you see
|
||||
this error on was set up.
|
||||
EOF
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
setup_fstab() {
|
||||
local volume_uuid="$1"
|
||||
# fstab used to be responsible for mounting the volume. Now the last
|
||||
# step adds a LaunchDaemon responsible for mounting. This is technically
|
||||
# redundant for mounting, but diskutil appears to pick up mount options
|
||||
# from fstab (and diskutil's support for specifying them directly is not
|
||||
# consistent across versions/subcommands).
|
||||
if ! test_fstab; then
|
||||
task "Configuring /etc/fstab to specify volume mount options" >&2
|
||||
add_nix_vol_fstab_line "$volume_uuid" /usr/sbin/vifs
|
||||
fi
|
||||
}
|
||||
|
||||
encrypt_volume() {
|
||||
local volume_uuid="$1"
|
||||
local volume_label="$2"
|
||||
local password
|
||||
# Note: mount/unmount are late additions to support the right order
|
||||
# of operations for creating the volume and then baking its uuid into
|
||||
# other artifacts; not as well-trod wrt to potential errors, race
|
||||
# conditions, etc.
|
||||
|
||||
/usr/sbin/diskutil mount "$volume_label"
|
||||
|
||||
password="$(/usr/bin/xxd -l 32 -p -c 256 /dev/random)"
|
||||
_sudo "to add your Nix volume's password to Keychain" \
|
||||
/usr/bin/security -i <<EOF
|
||||
add-generic-password -a "$volume_label" -s "$volume_uuid" -l "$volume_label encryption password" -D "Encrypted volume password" -j "Added automatically by the Nix installer for use by $NIX_VOLUME_MOUNTD_DEST" -w "$password" -T /System/Library/CoreServices/APFSUserAgent -T /System/Library/CoreServices/CSUserAgent -T /usr/bin/security "/Library/Keychains/System.keychain"
|
||||
EOF
|
||||
builtin printf "%s" "$password" | _sudo "to encrypt your Nix volume" \
|
||||
/usr/sbin/diskutil apfs encryptVolume "$volume_label" -user disk -stdinpassphrase
|
||||
|
||||
/usr/sbin/diskutil unmount force "$volume_label"
|
||||
}
|
||||
|
||||
create_volume() {
|
||||
# Notes:
|
||||
# 1) using `-nomount` instead of `-mountpoint "$NIX_ROOT"` to get
|
||||
# its UUID and set mount opts in fstab before first mount
|
||||
#
|
||||
# 2) system is in some sense less secure than user keychain... (it's
|
||||
# possible to read the password for decrypting the keychain) but
|
||||
# the user keychain appears to be available too late. As far as I
|
||||
# can tell, the file with this password (/var/db/SystemKey) is
|
||||
# inside the FileVault envelope. If that isn't true, it may make
|
||||
# sense to store the password inside the envelope?
|
||||
#
|
||||
# 3) At some point it would be ideal to have a small binary to serve
|
||||
# as the daemon itself, and for it to replace /usr/bin/security here.
|
||||
#
|
||||
# 4) *UserAgent exemptions should let the system seamlessly supply the
|
||||
# password if noauto is removed from fstab entry. This is intentional;
|
||||
# the user will hopefully look for help if the volume stops mounting,
|
||||
# rather than failing over into subtle race-condition problems.
|
||||
#
|
||||
# 5) If we ever get users griping about not having space to do
|
||||
# anything useful with Nix, it is possibly to specify
|
||||
# `-reserve 10g` or something, which will fail w/o that much
|
||||
#
|
||||
# 6) getting special w/ awk may be fragile, but doing it to:
|
||||
# - save time over running slow diskutil commands
|
||||
# - skirt risk we grab wrong volume if multiple match
|
||||
_sudo "to create a new APFS volume '$NIX_VOLUME_LABEL' on $NIX_VOLUME_USE_DISK" \
|
||||
/usr/sbin/diskutil apfs addVolume "$NIX_VOLUME_USE_DISK" "$NIX_VOLUME_FS" "$NIX_VOLUME_LABEL" -nomount | /usr/bin/awk '/Created new APFS Volume/ {print $5}'
|
||||
}
|
||||
|
||||
volume_uuid_from_special() {
|
||||
local volume_special="$1" # (i.e., disk1s7)
|
||||
# For reasons I won't pretend to fathom, this returns 253 when it works
|
||||
/System/Library/Filesystems/apfs.fs/Contents/Resources/apfs.util -k "$volume_special" || true
|
||||
}
|
||||
|
||||
# this sometimes clears immediately, and AFAIK clears
|
||||
# within about 1s. diskutil info on an unmounted path
|
||||
# fails in around 50-100ms and a match takes about
|
||||
# 250-300ms. I suspect it's usually ~250-750ms
|
||||
await_volume() {
|
||||
# caution: this could, in theory, get stuck
|
||||
until /usr/sbin/diskutil info "$NIX_ROOT" &>/dev/null; do
|
||||
:
|
||||
done
|
||||
}
|
||||
|
||||
setup_volume() {
|
||||
local use_special use_uuid profile_packages
|
||||
task "Creating a Nix volume" >&2
|
||||
|
||||
use_special="${NIX_VOLUME_USE_SPECIAL:-$(create_volume)}"
|
||||
|
||||
_sudo "to ensure the Nix volume is not mounted" \
|
||||
/usr/sbin/diskutil unmount force "$use_special" || true # might not be mounted
|
||||
|
||||
use_uuid=${NIX_VOLUME_USE_UUID:-$(volume_uuid_from_special "$use_special")}
|
||||
|
||||
setup_fstab "$use_uuid"
|
||||
|
||||
if should_encrypt_volume; then
|
||||
encrypt_volume "$use_uuid" "$NIX_VOLUME_LABEL"
|
||||
setup_volume_daemon "encrypted" "$use_uuid"
|
||||
# TODO: might be able to save ~60ms by caching or setting
|
||||
# this somewhere rather than re-checking here.
|
||||
elif volume_encrypted "$use_special"; then
|
||||
setup_volume_daemon "encrypted" "$use_uuid"
|
||||
else
|
||||
setup_volume_daemon "unencrypted" "$use_uuid"
|
||||
fi
|
||||
|
||||
await_volume
|
||||
|
||||
if [ "$(/usr/sbin/diskutil info -plist "$NIX_ROOT" | xmllint --xpath "(/plist/dict/key[text()='GlobalPermissionsEnabled'])/following-sibling::*[1]" -)" = "<false/>" ]; then
|
||||
_sudo "to set enableOwnership (enabling users to own files)" \
|
||||
/usr/sbin/diskutil enableOwnership "$NIX_ROOT"
|
||||
fi
|
||||
|
||||
# TODO: below is a vague kludge for now; I just don't know
|
||||
# what if any safe action there is to take here. Also, the
|
||||
# reminder isn't very helpful.
|
||||
# I'm less sure where this belongs, but it also wants mounted, pre-install
|
||||
if type -p nix-env; then
|
||||
profile_packages="$(nix-env --query --installed)"
|
||||
# TODO: can probably do below faster w/ read
|
||||
# intentionally unquoted string to eat whitespace in wc output
|
||||
# shellcheck disable=SC2046,SC2059
|
||||
if ! [ $(printf "$profile_packages" | /usr/bin/wc -l) = "0" ]; then
|
||||
reminder <<EOF
|
||||
Nix now supports only multi-user installs on Darwin/macOS, and your user's
|
||||
Nix profile has some packages in it. These packages may obscure those in the
|
||||
default profile, including the Nix this installer will add. You should
|
||||
review these packages:
|
||||
$profile_packages
|
||||
EOF
|
||||
fi
|
||||
fi
|
||||
|
||||
}
|
||||
|
||||
setup_volume_daemon() {
|
||||
local cmd_type="$1" # encrypted|unencrypted
|
||||
local volume_uuid="$2"
|
||||
if ! test_voldaemon; then
|
||||
task "Configuring LaunchDaemon to mount '$NIX_VOLUME_LABEL'" >&2
|
||||
_sudo "to install the Nix volume mounter" /usr/bin/ex "$NIX_VOLUME_MOUNTD_DEST" <<EOF
|
||||
:a
|
||||
$(generate_mount_daemon "$cmd_type" "$volume_uuid")
|
||||
.
|
||||
:x
|
||||
EOF
|
||||
|
||||
# TODO: should probably alert the user if this is disabled?
|
||||
_sudo "to launch the Nix volume mounter" \
|
||||
launchctl bootstrap system "$NIX_VOLUME_MOUNTD_DEST" || true
|
||||
# TODO: confirm whether kickstart is necessesary?
|
||||
# I feel a little superstitous, but it can guard
|
||||
# against multiple problems (doesn't start, old
|
||||
# version still running for some reason...)
|
||||
_sudo "to launch the Nix volume mounter" \
|
||||
launchctl kickstart -k system/org.nixos.darwin-store
|
||||
fi
|
||||
}
|
||||
|
||||
setup_darwin_volume() {
|
||||
setup_synthetic_conf
|
||||
setup_volume
|
||||
}
|
||||
|
||||
if [ "$_CREATE_VOLUME_NO_MAIN" = 1 ]; then
|
||||
if [ -n "$*" ]; then
|
||||
"$@" # expose functions in case we want multiple routines?
|
||||
fi
|
||||
else
|
||||
# no reason to pay for bash to process this
|
||||
main() {
|
||||
{
|
||||
echo ""
|
||||
echo " ------------------------------------------------------------------ "
|
||||
echo " | This installer will create a volume for the nix store and |"
|
||||
echo " | configure it to mount at $NIX_ROOT. Follow these steps to uninstall. |"
|
||||
echo " ------------------------------------------------------------------ "
|
||||
echo ""
|
||||
echo " 1. Remove the entry from fstab using 'sudo /usr/sbin/vifs'"
|
||||
echo " 2. Run 'sudo launchctl bootout system/org.nixos.darwin-store'"
|
||||
echo " 3. Remove $NIX_VOLUME_MOUNTD_DEST"
|
||||
echo " 4. Destroy the data volume using '/usr/sbin/diskutil apfs deleteVolume'"
|
||||
echo " 5. Remove the 'nix' line from /etc/synthetic.conf (or the file)"
|
||||
echo ""
|
||||
} >&2
|
||||
|
||||
setup_darwin_volume
|
||||
}
|
||||
|
||||
main "$@"
|
||||
fi
|
||||
|
|
|
@ -3,59 +3,110 @@
|
|||
set -eu
|
||||
set -o pipefail
|
||||
|
||||
readonly PLIST_DEST=/Library/LaunchDaemons/org.nixos.nix-daemon.plist
|
||||
readonly NIX_DAEMON_DEST=/Library/LaunchDaemons/org.nixos.nix-daemon.plist
|
||||
# create by default; set 0 to DIY, use a symlink, etc.
|
||||
readonly NIX_VOLUME_CREATE=${NIX_VOLUME_CREATE:-1} # now default
|
||||
NIX_FIRST_BUILD_UID="301"
|
||||
NIX_BUILD_USER_NAME_TEMPLATE="_nixbld%d"
|
||||
|
||||
dsclattr() {
|
||||
/usr/bin/dscl . -read "$1" \
|
||||
| awk "/$2/ { print \$2 }"
|
||||
# caution: may update times on / if not run as normal non-root user
|
||||
read_only_root() {
|
||||
# this touch command ~should~ always produce an error
|
||||
# as of this change I confirmed /usr/bin/touch emits:
|
||||
# "touch: /: Operation not permitted" Monterey
|
||||
# "touch: /: Read-only file system" Catalina+ and Big Sur
|
||||
# "touch: /: Permission denied" Mojave
|
||||
# (not matching prefix for compat w/ coreutils touch in case using
|
||||
# an explicit path causes problems; its prefix differs)
|
||||
case "$(/usr/bin/touch / 2>&1)" in
|
||||
*"Read-only file system") # Catalina, Big Sur
|
||||
return 0
|
||||
;;
|
||||
*"Operation not permitted") # Monterey
|
||||
return 0
|
||||
;;
|
||||
*)
|
||||
return 1
|
||||
;;
|
||||
esac
|
||||
|
||||
# Avoiding the slow semantic way to get this information (~330ms vs ~8ms)
|
||||
# unless using touch causes problems. Just in case, that approach is:
|
||||
# diskutil info -plist / | <find the Writable or WritableVolume keys>, i.e.
|
||||
# diskutil info -plist / | xmllint --xpath "name(/plist/dict/key[text()='Writable']/following-sibling::*[1])" -
|
||||
}
|
||||
|
||||
poly_validate_assumptions() {
|
||||
if [ "$(uname -s)" != "Darwin" ]; then
|
||||
failure "This script is for use with macOS!"
|
||||
if read_only_root && [ "$NIX_VOLUME_CREATE" = 1 ]; then
|
||||
should_create_volume() { return 0; }
|
||||
else
|
||||
should_create_volume() { return 1; }
|
||||
fi
|
||||
|
||||
# shellcheck source=./create-darwin-volume.sh
|
||||
. "$EXTRACTED_NIX_PATH/create-darwin-volume.sh" "no-main"
|
||||
|
||||
dsclattr() {
|
||||
/usr/bin/dscl . -read "$1" \
|
||||
| /usr/bin/awk "/$2/ { print \$2 }"
|
||||
}
|
||||
|
||||
test_nix_daemon_installed() {
|
||||
test -e "$NIX_DAEMON_DEST"
|
||||
}
|
||||
|
||||
poly_cure_artifacts() {
|
||||
if should_create_volume; then
|
||||
task "Fixing any leftover Nix volume state"
|
||||
cat <<EOF
|
||||
Before I try to install, I'll check for any existing Nix volume config
|
||||
and ask for your permission to remove it (so that the installer can
|
||||
start fresh). I'll also ask for permission to fix any issues I spot.
|
||||
EOF
|
||||
cure_volumes
|
||||
remove_volume_artifacts
|
||||
fi
|
||||
}
|
||||
|
||||
poly_service_installed_check() {
|
||||
[ -e "$PLIST_DEST" ]
|
||||
if should_create_volume; then
|
||||
test_nix_daemon_installed || test_nix_volume_mountd_installed
|
||||
else
|
||||
test_nix_daemon_installed
|
||||
fi
|
||||
}
|
||||
|
||||
poly_service_uninstall_directions() {
|
||||
cat <<EOF
|
||||
$1. Delete $PLIST_DEST
|
||||
|
||||
sudo launchctl unload $PLIST_DEST
|
||||
sudo rm $PLIST_DEST
|
||||
|
||||
EOF
|
||||
echo "$1. Remove macOS-specific components:"
|
||||
if should_create_volume && test_nix_volume_mountd_installed; then
|
||||
nix_volume_mountd_uninstall_directions
|
||||
fi
|
||||
if test_nix_daemon_installed; then
|
||||
nix_daemon_uninstall_directions
|
||||
fi
|
||||
}
|
||||
|
||||
poly_service_setup_note() {
|
||||
cat <<EOF
|
||||
- load and start a LaunchDaemon (at $PLIST_DEST) for nix-daemon
|
||||
|
||||
EOF
|
||||
if should_create_volume; then
|
||||
echo " - create a Nix volume and a LaunchDaemon to mount it"
|
||||
fi
|
||||
echo " - create a LaunchDaemon (at $NIX_DAEMON_DEST) for nix-daemon"
|
||||
echo ""
|
||||
}
|
||||
|
||||
poly_extra_try_me_commands(){
|
||||
:
|
||||
}
|
||||
poly_extra_setup_instructions(){
|
||||
:
|
||||
poly_extra_try_me_commands() {
|
||||
:
|
||||
}
|
||||
|
||||
poly_configure_nix_daemon_service() {
|
||||
task "Setting up the nix-daemon LaunchDaemon"
|
||||
_sudo "to set up the nix-daemon as a LaunchDaemon" \
|
||||
cp -f "/nix/var/nix/profiles/default$PLIST_DEST" "$PLIST_DEST"
|
||||
/bin/cp -f "/nix/var/nix/profiles/default$NIX_DAEMON_DEST" "$NIX_DAEMON_DEST"
|
||||
|
||||
_sudo "to load the LaunchDaemon plist for nix-daemon" \
|
||||
launchctl load /Library/LaunchDaemons/org.nixos.nix-daemon.plist
|
||||
|
||||
_sudo "to start the nix-daemon" \
|
||||
launchctl start org.nixos.nix-daemon
|
||||
|
||||
launchctl kickstart -k system/org.nixos.nix-daemon
|
||||
}
|
||||
|
||||
poly_group_exists() {
|
||||
|
@ -96,6 +147,8 @@ poly_user_home_get() {
|
|||
}
|
||||
|
||||
poly_user_home_set() {
|
||||
# This can trigger a permission prompt now:
|
||||
# "Terminal" would like to administer your computer. Administration can include modifying passwords, networking, and system settings.
|
||||
_sudo "in order to give $1 a safe home directory" \
|
||||
/usr/bin/dscl . -create "/Users/$1" "NFSHomeDirectory" "$2"
|
||||
}
|
||||
|
@ -121,7 +174,7 @@ poly_user_shell_set() {
|
|||
poly_user_in_group_check() {
|
||||
username=$1
|
||||
group=$2
|
||||
dseditgroup -o checkmember -m "$username" "$group" > /dev/null 2>&1
|
||||
/usr/sbin/dseditgroup -o checkmember -m "$username" "$group" > /dev/null 2>&1
|
||||
}
|
||||
|
||||
poly_user_in_group_set() {
|
||||
|
@ -151,3 +204,21 @@ poly_create_build_user() {
|
|||
/usr/bin/dscl . create "/Users/$username" \
|
||||
UniqueID "${uid}"
|
||||
}
|
||||
|
||||
poly_prepare_to_install() {
|
||||
if should_create_volume; then
|
||||
header "Preparing a Nix volume"
|
||||
# intentional indent below to match task indent
|
||||
cat <<EOF
|
||||
Nix traditionally stores its data in the root directory $NIX_ROOT, but
|
||||
macOS now (starting in 10.15 Catalina) has a read-only root directory.
|
||||
To support Nix, I will create a volume and configure macOS to mount it
|
||||
at $NIX_ROOT.
|
||||
EOF
|
||||
setup_darwin_volume
|
||||
fi
|
||||
|
||||
if [ "$(diskutil info -plist /nix | xmllint --xpath "(/plist/dict/key[text()='GlobalPermissionsEnabled'])/following-sibling::*[1]" -)" = "<false/>" ]; then
|
||||
failure "This script needs a /nix volume with global permissions! This may require running sudo diskutil enableOwnership /nix."
|
||||
fi
|
||||
}
|
||||
|
|
|
@ -33,7 +33,7 @@ NIX_BUILD_USER_NAME_TEMPLATE="nixbld%d"
|
|||
readonly NIX_ROOT="/nix"
|
||||
readonly NIX_EXTRA_CONF=${NIX_EXTRA_CONF:-}
|
||||
|
||||
readonly PROFILE_TARGETS=("/etc/bashrc" "/etc/profile.d/nix.sh" "/etc/zshenv" "/etc/bash.bashrc" "/etc/zsh/zshenv")
|
||||
readonly PROFILE_TARGETS=("/etc/bashrc" "/etc/profile.d/nix.sh" "/etc/zshrc" "/etc/bash.bashrc" "/etc/zsh/zshrc")
|
||||
readonly PROFILE_BACKUP_SUFFIX=".backup-before-nix"
|
||||
readonly PROFILE_NIX_FILE="$NIX_ROOT/var/nix/profiles/default/etc/profile.d/nix-daemon.sh"
|
||||
|
||||
|
@ -43,7 +43,7 @@ readonly NIX_INSTALLED_CACERT="@cacert@"
|
|||
#readonly NIX_INSTALLED_CACERT="/nix/store/7dxhzymvy330i28ii676fl1pqwcahv2f-nss-cacert-3.49.2"
|
||||
readonly EXTRACTED_NIX_PATH="$(dirname "$0")"
|
||||
|
||||
readonly ROOT_HOME=$(echo ~root)
|
||||
readonly ROOT_HOME=~root
|
||||
|
||||
if [ -t 0 ]; then
|
||||
readonly IS_HEADLESS='no'
|
||||
|
@ -59,14 +59,19 @@ headless() {
|
|||
fi
|
||||
}
|
||||
|
||||
contactme() {
|
||||
contact_us() {
|
||||
echo "You can open an issue at https://github.com/nixos/nix/issues"
|
||||
echo ""
|
||||
echo "Or feel free to contact the team:"
|
||||
echo " - Matrix: #nix:nixos.org"
|
||||
echo " - IRC: in #nixos on irc.libera.chat"
|
||||
echo " - twitter: @nixos_org"
|
||||
echo " - forum: https://discourse.nixos.org"
|
||||
}
|
||||
get_help() {
|
||||
echo "We'd love to help if you need it."
|
||||
echo ""
|
||||
echo "If you can, open an issue at https://github.com/nixos/nix/issues"
|
||||
echo ""
|
||||
echo "Or feel free to contact the team,"
|
||||
echo " - on IRC #nixos on irc.freenode.net"
|
||||
echo " - on twitter @nixos_org"
|
||||
contact_us
|
||||
}
|
||||
|
||||
uninstall_directions() {
|
||||
|
@ -102,7 +107,6 @@ $step. Delete the files Nix added to your system:
|
|||
and that is it.
|
||||
|
||||
EOF
|
||||
|
||||
}
|
||||
|
||||
nix_user_for_core() {
|
||||
|
@ -170,7 +174,7 @@ failure() {
|
|||
header "oh no!"
|
||||
_textout "$RED" "$@"
|
||||
echo ""
|
||||
_textout "$RED" "$(contactme)"
|
||||
_textout "$RED" "$(get_help)"
|
||||
trap finish_cleanup EXIT
|
||||
exit 1
|
||||
}
|
||||
|
@ -201,6 +205,95 @@ ui_confirm() {
|
|||
return 1
|
||||
}
|
||||
|
||||
printf -v _UNCHANGED_GRP_FMT "%b" $'\033[2m%='"$ESC" # "dim"
|
||||
# bold+invert+red and bold+invert+green just for the +/- below
|
||||
# red/green foreground for rest of the line
|
||||
printf -v _OLD_LINE_FMT "%b" $'\033[1;7;31m-'"$ESC ${RED}%L${ESC}"
|
||||
printf -v _NEW_LINE_FMT "%b" $'\033[1;7;32m+'"$ESC ${GREEN}%L${ESC}"
|
||||
|
||||
_diff() {
|
||||
# simple colorized diff comatible w/ pre `--color` versions
|
||||
diff --unchanged-group-format="$_UNCHANGED_GRP_FMT" --old-line-format="$_OLD_LINE_FMT" --new-line-format="$_NEW_LINE_FMT" --unchanged-line-format=" %L" "$@"
|
||||
}
|
||||
|
||||
confirm_rm() {
|
||||
local path="$1"
|
||||
if ui_confirm "Can I remove $path?"; then
|
||||
_sudo "to remove $path" rm "$path"
|
||||
fi
|
||||
}
|
||||
|
||||
confirm_edit() {
|
||||
local path="$1"
|
||||
local edit_path="$2"
|
||||
cat <<EOF
|
||||
|
||||
Nix isn't the only thing in $path,
|
||||
but I think I know how to edit it out.
|
||||
Here's the diff:
|
||||
EOF
|
||||
|
||||
# could technically test the diff, but caller should do it
|
||||
_diff "$path" "$edit_path"
|
||||
if ui_confirm "Does the change above look right?"; then
|
||||
_sudo "remove nix from $path" cp "$edit_path" "$path"
|
||||
fi
|
||||
}
|
||||
|
||||
_SERIOUS_BUSINESS="${RED}%s:${ESC} "
|
||||
password_confirm() {
|
||||
local do_something_consequential="$1"
|
||||
if ui_confirm "Can I $do_something_consequential?"; then
|
||||
# shellcheck disable=SC2059
|
||||
sudo -kv --prompt="$(printf "${_SERIOUS_BUSINESS}" "Enter your password to $do_something_consequential")"
|
||||
else
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
# Support accumulating reminders over the course of a run and showing
|
||||
# them at the end. An example where this helps: the installer changes
|
||||
# something, but it won't work without a reboot. If you tell the user
|
||||
# when you do it, they may miss it in the stream. The value of the
|
||||
# setting isn't enough to decide whether to message because you only
|
||||
# need to message if you *changed* it.
|
||||
|
||||
# reminders stored in array delimited by empty entry; if ! headless,
|
||||
# user is asked to confirm after each delimiter.
|
||||
_reminders=()
|
||||
((_remind_num=1))
|
||||
|
||||
remind() {
|
||||
# (( arithmetic expression ))
|
||||
if (( _remind_num > 1 )); then
|
||||
header "Reminders"
|
||||
for line in "${_reminders[@]}"; do
|
||||
echo "$line"
|
||||
if ! headless && [ "${#line}" = 0 ]; then
|
||||
if read -r -p "Press enter/return to acknowledge."; then
|
||||
printf $'\033[A\33[2K\r'
|
||||
fi
|
||||
fi
|
||||
done
|
||||
fi
|
||||
}
|
||||
|
||||
reminder() {
|
||||
printf -v label "${BLUE}[ %d ]${ESC}" "$_remind_num"
|
||||
_reminders+=("$label")
|
||||
if [[ "$*" = "" ]]; then
|
||||
while read -r line; do
|
||||
_reminders+=("$line")
|
||||
done
|
||||
else
|
||||
# this expands each arg to an array entry (and each entry will
|
||||
# ultimately be a separate line in the output)
|
||||
_reminders+=("$@")
|
||||
fi
|
||||
_reminders+=("")
|
||||
((_remind_num++))
|
||||
}
|
||||
|
||||
__sudo() {
|
||||
local expl="$1"
|
||||
local cmd="$2"
|
||||
|
@ -221,18 +314,18 @@ _sudo() {
|
|||
local expl="$1"
|
||||
shift
|
||||
if ! headless; then
|
||||
__sudo "$expl" "$*"
|
||||
__sudo "$expl" "$*" >&2
|
||||
fi
|
||||
sudo "$@"
|
||||
}
|
||||
|
||||
|
||||
readonly SCRATCH=$(mktemp -d -t tmp.XXXXXXXXXX)
|
||||
function finish_cleanup {
|
||||
readonly SCRATCH=$(mktemp -d "${TMPDIR:-/tmp/}tmp.XXXXXXXXXX")
|
||||
finish_cleanup() {
|
||||
rm -rf "$SCRATCH"
|
||||
}
|
||||
|
||||
function finish_fail {
|
||||
finish_fail() {
|
||||
finish_cleanup
|
||||
|
||||
failure <<EOF
|
||||
|
@ -244,45 +337,46 @@ EOF
|
|||
}
|
||||
trap finish_fail EXIT
|
||||
|
||||
channel_update_failed=0
|
||||
function finish_success {
|
||||
finish_cleanup
|
||||
|
||||
finish_success() {
|
||||
ok "Alright! We're done!"
|
||||
if [ "x$channel_update_failed" = x1 ]; then
|
||||
echo ""
|
||||
echo "But fetching the nixpkgs channel failed. (Are you offline?)"
|
||||
echo "To try again later, run \"sudo -i nix-channel --update nixpkgs\"."
|
||||
fi
|
||||
|
||||
cat <<EOF
|
||||
|
||||
Before Nix will work in your existing shells, you'll need to close
|
||||
them and open them again. Other than that, you should be ready to go.
|
||||
|
||||
Try it! Open a new terminal, and type:
|
||||
$(poly_extra_try_me_commands)
|
||||
$ nix-shell -p nix-info --run "nix-info -m"
|
||||
$(poly_extra_setup_instructions)
|
||||
Thank you for using this installer. If you have any feedback, don't
|
||||
hesitate:
|
||||
|
||||
$(contactme)
|
||||
Thank you for using this installer. If you have any feedback or need
|
||||
help, don't hesitate:
|
||||
|
||||
$(contact_us)
|
||||
EOF
|
||||
|
||||
remind
|
||||
finish_cleanup
|
||||
}
|
||||
|
||||
finish_uninstall_success() {
|
||||
ok "Alright! Nix should be removed!"
|
||||
|
||||
cat <<EOF
|
||||
If you spot anything this uninstaller missed or have feedback,
|
||||
don't hesitate:
|
||||
|
||||
$(contact_us)
|
||||
EOF
|
||||
remind
|
||||
finish_cleanup
|
||||
}
|
||||
|
||||
remove_nix_artifacts() {
|
||||
failure "Not implemented yet"
|
||||
}
|
||||
|
||||
cure_artifacts() {
|
||||
poly_cure_artifacts
|
||||
# remove_nix_artifacts (LATER)
|
||||
}
|
||||
|
||||
validate_starting_assumptions() {
|
||||
poly_validate_assumptions
|
||||
|
||||
if [ $EUID -eq 0 ]; then
|
||||
failure <<EOF
|
||||
Please do not run this script with root privileges. We will call sudo
|
||||
when we need to.
|
||||
EOF
|
||||
fi
|
||||
|
||||
if type nix-env 2> /dev/null >&2; then
|
||||
warning <<EOF
|
||||
Nix already appears to be installed. This installer may run into issues.
|
||||
|
@ -293,19 +387,28 @@ EOF
|
|||
fi
|
||||
|
||||
for profile_target in "${PROFILE_TARGETS[@]}"; do
|
||||
# TODO: I think it would be good to accumulate a list of all
|
||||
# of the copies so that people don't hit this 2 or 3x in
|
||||
# a row for different files.
|
||||
if [ -e "$profile_target$PROFILE_BACKUP_SUFFIX" ]; then
|
||||
# this backup process first released in Nix 2.1
|
||||
failure <<EOF
|
||||
When this script runs, it backs up the current $profile_target to
|
||||
$profile_target$PROFILE_BACKUP_SUFFIX. This backup file already exists, though.
|
||||
I back up shell profile/rc scripts before I add Nix to them.
|
||||
I need to back up $profile_target to $profile_target$PROFILE_BACKUP_SUFFIX,
|
||||
but the latter already exists.
|
||||
|
||||
Please follow these instructions to clean up the old backup file:
|
||||
Here's how to clean up the old backup file:
|
||||
|
||||
1. Copy $profile_target and $profile_target$PROFILE_BACKUP_SUFFIX to another place, just
|
||||
in case.
|
||||
1. Back up (copy) $profile_target and $profile_target$PROFILE_BACKUP_SUFFIX
|
||||
to another location, just in case.
|
||||
|
||||
2. Take care to make sure that $profile_target$PROFILE_BACKUP_SUFFIX doesn't look like
|
||||
it has anything nix-related in it. If it does, something is probably
|
||||
quite wrong. Please open an issue or get in touch immediately.
|
||||
2. Ensure $profile_target$PROFILE_BACKUP_SUFFIX does not have anything
|
||||
Nix-related in it. If it does, something is probably quite
|
||||
wrong. Please open an issue or get in touch immediately.
|
||||
|
||||
3. Once you confirm $profile_target is backed up and
|
||||
$profile_target$PROFILE_BACKUP_SUFFIX doesn't mention Nix, run:
|
||||
mv $profile_target$PROFILE_BACKUP_SUFFIX $profile_target
|
||||
EOF
|
||||
fi
|
||||
done
|
||||
|
@ -444,18 +547,46 @@ create_build_users() {
|
|||
|
||||
create_directories() {
|
||||
# FIXME: remove all of this because it duplicates LocalStore::LocalStore().
|
||||
task "Setting up the basic directory structure"
|
||||
if [ -d "$NIX_ROOT" ]; then
|
||||
# if /nix already exists, take ownership
|
||||
#
|
||||
# Caution: notes below are macOS-y
|
||||
# This is a bit of a goldilocks zone for taking ownership
|
||||
# if there are already files on the volume; the volume is
|
||||
# now mounted, but we haven't added a bunch of new files
|
||||
|
||||
# this is probably a bit slow; I've been seeing 3.3-4s even
|
||||
# when promptly installed over a fresh single-user install.
|
||||
# In case anyone's aware of a shortcut.
|
||||
# `|| true`: .Trashes errors w/o full disk perm
|
||||
|
||||
# rumor per #4488 that macOS 11.2 may not have
|
||||
# sbin on path, and that's where chown is, but
|
||||
# since this bit is cross-platform:
|
||||
# - first try with `command -vp` to try and find
|
||||
# chown in the usual places
|
||||
# - fall back on `command -v` which would find
|
||||
# any chown on path
|
||||
# if we don't find one, the command is already
|
||||
# hiding behind || true, and the general state
|
||||
# should be one the user can repair once they
|
||||
# figure out where chown is...
|
||||
local get_chr_own="$(command -vp chown)"
|
||||
if [[ -z "$get_chr_own" ]]; then
|
||||
get_chr_own="$(command -v chown)"
|
||||
fi
|
||||
_sudo "to take root ownership of existing Nix store files" \
|
||||
"$get_chr_own" -R "root:$NIX_BUILD_GROUP_NAME" "$NIX_ROOT" || true
|
||||
fi
|
||||
_sudo "to make the basic directory structure of Nix (part 1)" \
|
||||
mkdir -pv -m 0755 /nix /nix/var /nix/var/log /nix/var/log/nix /nix/var/log/nix/drvs /nix/var/nix{,/db,/gcroots,/profiles,/temproots,/userpool} /nix/var/nix/{gcroots,profiles}/per-user
|
||||
install -dv -m 0755 /nix /nix/var /nix/var/log /nix/var/log/nix /nix/var/log/nix/drvs /nix/var/nix{,/db,/gcroots,/profiles,/temproots,/userpool} /nix/var/nix/{gcroots,profiles}/per-user
|
||||
|
||||
_sudo "to make the basic directory structure of Nix (part 2)" \
|
||||
mkdir -pv -m 1775 /nix/store
|
||||
|
||||
_sudo "to make the basic directory structure of Nix (part 3)" \
|
||||
chgrp "$NIX_BUILD_GROUP_NAME" /nix/store
|
||||
install -dv -g "$NIX_BUILD_GROUP_NAME" -m 1775 /nix/store
|
||||
|
||||
_sudo "to place the default nix daemon configuration (part 1)" \
|
||||
mkdir -pv -m 0555 /etc/nix
|
||||
install -dv -m 0555 /etc/nix
|
||||
}
|
||||
|
||||
place_channel_configuration() {
|
||||
|
@ -475,9 +606,9 @@ This installation tool will set up your computer with the Nix package
|
|||
manager. This will happen in a few stages:
|
||||
|
||||
1. Make sure your computer doesn't already have Nix. If it does, I
|
||||
will show you instructions on how to clean up your old one.
|
||||
will show you instructions on how to clean up your old install.
|
||||
|
||||
2. Show you what we are going to install and where. Then we will ask
|
||||
2. Show you what I am going to install and where. Then I will ask
|
||||
if you are ready to continue.
|
||||
|
||||
3. Create the system users and groups that the Nix daemon uses to run
|
||||
|
@ -492,14 +623,14 @@ manager. This will happen in a few stages:
|
|||
|
||||
EOF
|
||||
|
||||
if ui_confirm "Would you like to see a more detailed list of what we will do?"; then
|
||||
if ui_confirm "Would you like to see a more detailed list of what I will do?"; then
|
||||
cat <<EOF
|
||||
|
||||
We will:
|
||||
I will:
|
||||
|
||||
- make sure your computer doesn't already have Nix files
|
||||
(if it does, I will tell you how to clean them up.)
|
||||
- create local users (see the list above for the users we'll make)
|
||||
- create local users (see the list above for the users I'll make)
|
||||
- create a local group ($NIX_BUILD_GROUP_NAME)
|
||||
- install Nix in to $NIX_ROOT
|
||||
- create a configuration file in /etc/nix
|
||||
|
@ -534,7 +665,7 @@ run in a headless fashion, like this:
|
|||
|
||||
$ curl -L https://nixos.org/nix/install | sh
|
||||
|
||||
or maybe in a CI pipeline. Because of that, we're going to skip the
|
||||
or maybe in a CI pipeline. Because of that, I'm going to skip the
|
||||
verbose output in the interest of brevity.
|
||||
|
||||
If you would like to
|
||||
|
@ -548,7 +679,7 @@ EOF
|
|||
fi
|
||||
|
||||
cat <<EOF
|
||||
This script is going to call sudo a lot. Every time we do, it'll
|
||||
This script is going to call sudo a lot. Every time I do, it'll
|
||||
output exactly what it'll do, and why.
|
||||
|
||||
Just like this:
|
||||
|
@ -560,25 +691,29 @@ EOF
|
|||
cat <<EOF
|
||||
|
||||
This might look scary, but everything can be undone by running just a
|
||||
few commands. We used to ask you to confirm each time sudo ran, but it
|
||||
few commands. I used to ask you to confirm each time sudo ran, but it
|
||||
was too many times. Instead, I'll just ask you this one time:
|
||||
|
||||
EOF
|
||||
if ui_confirm "Can we use sudo?"; then
|
||||
if ui_confirm "Can I use sudo?"; then
|
||||
ok "Yay! Thanks! Let's get going!"
|
||||
else
|
||||
failure <<EOF
|
||||
That is okay, but we can't install.
|
||||
That is okay, but I can't install.
|
||||
EOF
|
||||
fi
|
||||
}
|
||||
|
||||
install_from_extracted_nix() {
|
||||
task "Installing Nix"
|
||||
(
|
||||
cd "$EXTRACTED_NIX_PATH"
|
||||
|
||||
_sudo "to copy the basic Nix files to the new store at $NIX_ROOT/store" \
|
||||
rsync -rlpt --chmod=-w ./store/* "$NIX_ROOT/store/"
|
||||
cp -RLp ./store/* "$NIX_ROOT/store/"
|
||||
|
||||
_sudo "to make the new store non-writable at $NIX_ROOT/store" \
|
||||
chmod -R ugo-w "$NIX_ROOT/store/"
|
||||
|
||||
if [ -d "$NIX_INSTALLED_NIX" ]; then
|
||||
echo " Alright! We have our first nix at $NIX_INSTALLED_NIX"
|
||||
|
@ -589,9 +724,8 @@ $NIX_INSTALLED_NIX.
|
|||
EOF
|
||||
fi
|
||||
|
||||
cat ./.reginfo \
|
||||
| _sudo "to load data for the first time in to the Nix Database" \
|
||||
"$NIX_INSTALLED_NIX/bin/nix-store" --load-db
|
||||
_sudo "to load data for the first time in to the Nix Database" \
|
||||
"$NIX_INSTALLED_NIX/bin/nix-store" --load-db < ./.reginfo
|
||||
|
||||
echo " Just finished getting the nix database ready."
|
||||
)
|
||||
|
@ -610,6 +744,7 @@ EOF
|
|||
}
|
||||
|
||||
configure_shell_profile() {
|
||||
task "Setting up shell profiles: ${PROFILE_TARGETS[*]}"
|
||||
for profile_target in "${PROFILE_TARGETS[@]}"; do
|
||||
if [ -e "$profile_target" ]; then
|
||||
_sudo "to back up your current $profile_target to $profile_target$PROFILE_BACKUP_SUFFIX" \
|
||||
|
@ -629,14 +764,27 @@ configure_shell_profile() {
|
|||
tee -a "$profile_target"
|
||||
fi
|
||||
done
|
||||
# TODO: should we suggest '. $PROFILE_NIX_FILE'? It would get them on
|
||||
# their way less disruptively, but a counter-argument is that they won't
|
||||
# immediately notice if something didn't get set up right?
|
||||
reminder "Nix won't work in active shell sessions until you restart them."
|
||||
}
|
||||
|
||||
cert_in_store() {
|
||||
# in a subshell
|
||||
# - change into the cert-file dir
|
||||
# - get the phyiscal pwd
|
||||
# and test if this path is in the Nix store
|
||||
[[ "$(cd -- "$(dirname "$NIX_SSL_CERT_FILE")" && exec pwd -P)" == "$NIX_ROOT/store/"* ]]
|
||||
}
|
||||
|
||||
setup_default_profile() {
|
||||
_sudo "to installing a bootstrapping Nix in to the default Profile" \
|
||||
task "Setting up the default profile"
|
||||
_sudo "to install a bootstrapping Nix in to the default profile" \
|
||||
HOME="$ROOT_HOME" "$NIX_INSTALLED_NIX/bin/nix-env" -i "$NIX_INSTALLED_NIX"
|
||||
|
||||
if [ -z "${NIX_SSL_CERT_FILE:-}" ] || ! [ -f "${NIX_SSL_CERT_FILE:-}" ]; then
|
||||
_sudo "to installing a bootstrapping SSL certificate just for Nix in to the default Profile" \
|
||||
if [ -z "${NIX_SSL_CERT_FILE:-}" ] || ! [ -f "${NIX_SSL_CERT_FILE:-}" ] || cert_in_store; then
|
||||
_sudo "to install a bootstrapping SSL certificate just for Nix in to the default profile" \
|
||||
HOME="$ROOT_HOME" "$NIX_INSTALLED_NIX/bin/nix-env" -i "$NIX_INSTALLED_CACERT"
|
||||
export NIX_SSL_CERT_FILE=/nix/var/nix/profiles/default/etc/ssl/certs/ca-bundle.crt
|
||||
fi
|
||||
|
@ -645,9 +793,13 @@ setup_default_profile() {
|
|||
# Have to explicitly pass NIX_SSL_CERT_FILE as part of the sudo call,
|
||||
# otherwise it will be lost in environments where sudo doesn't pass
|
||||
# all the environment variables by default.
|
||||
_sudo "to update the default channel in the default profile" \
|
||||
HOME="$ROOT_HOME" NIX_SSL_CERT_FILE="$NIX_SSL_CERT_FILE" "$NIX_INSTALLED_NIX/bin/nix-channel" --update nixpkgs \
|
||||
|| channel_update_failed=1
|
||||
if ! _sudo "to update the default channel in the default profile" \
|
||||
HOME="$ROOT_HOME" NIX_SSL_CERT_FILE="$NIX_SSL_CERT_FILE" "$NIX_INSTALLED_NIX/bin/nix-channel" --update nixpkgs; then
|
||||
reminder <<EOF
|
||||
I had trouble fetching the nixpkgs channel (are you offline?)
|
||||
To try again later, run: sudo -i nix-channel --update nixpkgs
|
||||
EOF
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
|
@ -662,6 +814,17 @@ EOF
|
|||
}
|
||||
|
||||
main() {
|
||||
# TODO: I've moved this out of validate_starting_assumptions so we
|
||||
# can fail faster in this case. Sourcing install-darwin... now runs
|
||||
# `touch /` to detect Read-only root, but it could update times on
|
||||
# pre-Catalina macOS if run as root user.
|
||||
if [ "$EUID" -eq 0 ]; then
|
||||
failure <<EOF
|
||||
Please do not run this script with root privileges. I will call sudo
|
||||
when I need to.
|
||||
EOF
|
||||
fi
|
||||
|
||||
if [ "$(uname -s)" = "Darwin" ]; then
|
||||
# shellcheck source=./install-darwin-multi-user.sh
|
||||
. "$EXTRACTED_NIX_PATH/install-darwin-multi-user.sh"
|
||||
|
@ -675,17 +838,24 @@ main() {
|
|||
welcome_to_nix
|
||||
chat_about_sudo
|
||||
|
||||
cure_artifacts
|
||||
# TODO: there's a tension between cure and validate. I moved the
|
||||
# the sudo/root check out of validate to the head of this func.
|
||||
# Cure is *intended* to subsume the validate-and-abort approach,
|
||||
# so it may eventually obsolete it.
|
||||
validate_starting_assumptions
|
||||
|
||||
setup_report
|
||||
|
||||
if ! ui_confirm "Ready to continue?"; then
|
||||
ok "Alright, no changes have been made :)"
|
||||
contactme
|
||||
get_help
|
||||
trap finish_cleanup EXIT
|
||||
exit 1
|
||||
fi
|
||||
|
||||
poly_prepare_to_install
|
||||
|
||||
create_build_group
|
||||
create_build_users
|
||||
create_directories
|
||||
|
@ -695,6 +865,7 @@ main() {
|
|||
configure_shell_profile
|
||||
|
||||
set +eu
|
||||
# shellcheck disable=SC1091
|
||||
. /etc/profile
|
||||
set -eu
|
||||
|
||||
|
@ -706,5 +877,20 @@ main() {
|
|||
trap finish_success EXIT
|
||||
}
|
||||
|
||||
# set an empty initial arg for bare invocations in case we need to
|
||||
# disambiguate someone directly invoking this later.
|
||||
if [ "${#@}" = 0 ]; then
|
||||
set ""
|
||||
fi
|
||||
|
||||
main
|
||||
# ACTION for override
|
||||
case "${1-}" in
|
||||
# uninstall)
|
||||
# shift
|
||||
# uninstall "$@";;
|
||||
# install == same as the no-arg condition for now (but, explicit)
|
||||
""|install)
|
||||
main;;
|
||||
*) # holding space for future options (like uninstall + install?)
|
||||
failure "install-multi-user: invalid argument";;
|
||||
esac
|
||||
|
|
|
@ -26,18 +26,9 @@ fi
|
|||
|
||||
# macOS support for 10.12.6 or higher
|
||||
if [ "$(uname -s)" = "Darwin" ]; then
|
||||
IFS='.' read macos_major macos_minor macos_patch << EOF
|
||||
IFS='.' read -r macos_major macos_minor macos_patch << EOF
|
||||
$(sw_vers -productVersion)
|
||||
EOF
|
||||
# TODO: this is a temporary speed-bump to keep people from naively installing Nix
|
||||
# on macOS Big Sur (11.0+, 10.16+) until nixpkgs updates are ready for them.
|
||||
# *Ideally* this is gone before next Nix release. If you're intentionally working on
|
||||
# Nix + Big Sur, just comment out this block and be on your way :)
|
||||
if [ "$macos_major" -gt 10 ] || { [ "$macos_major" -eq 10 ] && [ "$macos_minor" -gt 15 ]; }; then
|
||||
echo "$0: nixpkgs isn't quite ready to support macOS $(sw_vers -productVersion) yet"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ "$macos_major" -lt 10 ] || { [ "$macos_major" -eq 10 ] && [ "$macos_minor" -lt 12 ]; } || { [ "$macos_minor" -eq 12 ] && [ "$macos_patch" -lt 6 ]; }; then
|
||||
# patch may not be present; command substitution for simplicity
|
||||
echo "$0: macOS $(sw_vers -productVersion) is not supported, upgrade to 10.12.6 or higher"
|
||||
|
@ -46,21 +37,40 @@ EOF
|
|||
fi
|
||||
|
||||
# Determine if we could use the multi-user installer or not
|
||||
if [ "$(uname -s)" = "Darwin" ]; then
|
||||
echo "Note: a multi-user installation is possible. See https://nixos.org/nix/manual/#sect-multi-user-installation" >&2
|
||||
elif [ "$(uname -s)" = "Linux" ]; then
|
||||
if [ "$(uname -s)" = "Linux" ]; then
|
||||
echo "Note: a multi-user installation is possible. See https://nixos.org/nix/manual/#sect-multi-user-installation" >&2
|
||||
fi
|
||||
|
||||
INSTALL_MODE=no-daemon
|
||||
CREATE_DARWIN_VOLUME=0
|
||||
case "$(uname -s)" in
|
||||
"Darwin")
|
||||
INSTALL_MODE=daemon;;
|
||||
*)
|
||||
INSTALL_MODE=no-daemon;;
|
||||
esac
|
||||
|
||||
# space-separated string
|
||||
ACTIONS=
|
||||
|
||||
# handle the command line flags
|
||||
while [ $# -gt 0 ]; do
|
||||
case $1 in
|
||||
--daemon)
|
||||
INSTALL_MODE=daemon;;
|
||||
INSTALL_MODE=daemon
|
||||
ACTIONS="${ACTIONS}install "
|
||||
;;
|
||||
--no-daemon)
|
||||
INSTALL_MODE=no-daemon;;
|
||||
if [ "$(uname -s)" = "Darwin" ]; then
|
||||
printf '\e[1;31mError: --no-daemon installs are no-longer supported on Darwin/macOS!\e[0m\n' >&2
|
||||
exit 1
|
||||
fi
|
||||
INSTALL_MODE=no-daemon
|
||||
# intentional tail space
|
||||
ACTIONS="${ACTIONS}install "
|
||||
;;
|
||||
# --uninstall)
|
||||
# # intentional tail space
|
||||
# ACTIONS="${ACTIONS}uninstall "
|
||||
# ;;
|
||||
--no-channel-add)
|
||||
export NIX_INSTALLER_NO_CHANNEL_ADD=1;;
|
||||
--daemon-user-count)
|
||||
|
@ -69,13 +79,18 @@ while [ $# -gt 0 ]; do
|
|||
--no-modify-profile)
|
||||
NIX_INSTALLER_NO_MODIFY_PROFILE=1;;
|
||||
--darwin-use-unencrypted-nix-store-volume)
|
||||
CREATE_DARWIN_VOLUME=1;;
|
||||
{
|
||||
echo "Warning: the flag --darwin-use-unencrypted-nix-store-volume"
|
||||
echo " is no longer needed and will be removed in the future."
|
||||
echo ""
|
||||
} >&2;;
|
||||
--nix-extra-conf-file)
|
||||
export NIX_EXTRA_CONF="$(cat $2)"
|
||||
# shellcheck disable=SC2155
|
||||
export NIX_EXTRA_CONF="$(cat "$2")"
|
||||
shift;;
|
||||
*)
|
||||
(
|
||||
echo "Nix Installer [--daemon|--no-daemon] [--daemon-user-count INT] [--no-channel-add] [--no-modify-profile] [--darwin-use-unencrypted-nix-store-volume] [--nix-extra-conf-file FILE]"
|
||||
{
|
||||
echo "Nix Installer [--daemon|--no-daemon] [--daemon-user-count INT] [--no-channel-add] [--no-modify-profile] [--nix-extra-conf-file FILE]"
|
||||
|
||||
echo "Choose installation method."
|
||||
echo ""
|
||||
|
@ -91,55 +106,25 @@ while [ $# -gt 0 ]; do
|
|||
echo ""
|
||||
echo " --no-channel-add: Don't add any channels. nixpkgs-unstable is installed by default."
|
||||
echo ""
|
||||
echo " --no-modify-profile: Skip channel installation. When not provided nixpkgs-unstable"
|
||||
echo " is installed by default."
|
||||
echo " --no-modify-profile: Don't modify the user profile to automatically load nix."
|
||||
echo ""
|
||||
echo " --daemon-user-count: Number of build users to create. Defaults to 32."
|
||||
echo ""
|
||||
echo " --nix-extra-conf-file: Path to nix.conf to prepend when installing /etc/nix.conf"
|
||||
echo " --nix-extra-conf-file: Path to nix.conf to prepend when installing /etc/nix/nix.conf"
|
||||
echo ""
|
||||
if [ -n "${INVOKED_FROM_INSTALL_IN:-}" ]; then
|
||||
echo " --tarball-url-prefix URL: Base URL to download the Nix tarball from."
|
||||
fi
|
||||
) >&2
|
||||
} >&2
|
||||
|
||||
# darwin and Catalina+
|
||||
if [ "$(uname -s)" = "Darwin" ] && { [ "$macos_major" -gt 10 ] || { [ "$macos_major" -eq 10 ] && [ "$macos_minor" -gt 14 ]; }; }; then
|
||||
(
|
||||
echo " --darwin-use-unencrypted-nix-store-volume: Create an APFS volume for the Nix"
|
||||
echo " store and mount it at /nix. This is the recommended way to create"
|
||||
echo " /nix with a read-only / on macOS >=10.15."
|
||||
echo " See: https://nixos.org/nix/manual/#sect-macos-installation"
|
||||
echo ""
|
||||
) >&2
|
||||
fi
|
||||
exit;;
|
||||
esac
|
||||
shift
|
||||
done
|
||||
|
||||
if [ "$(uname -s)" = "Darwin" ]; then
|
||||
if [ "$CREATE_DARWIN_VOLUME" = 1 ]; then
|
||||
printf '\e[1;31mCreating volume and mountpoint /nix.\e[0m\n'
|
||||
"$self/create-darwin-volume.sh"
|
||||
fi
|
||||
|
||||
writable="$(diskutil info -plist / | xmllint --xpath "name(/plist/dict/key[text()='Writable']/following-sibling::*[1])" -)"
|
||||
if ! [ -e $dest ] && [ "$writable" = "false" ]; then
|
||||
(
|
||||
echo ""
|
||||
echo "Installing on macOS >=10.15 requires relocating the store to an apfs volume."
|
||||
echo "Use sh <(curl -L https://nixos.org/nix/install) --darwin-use-unencrypted-nix-store-volume or run the preparation steps manually."
|
||||
echo "See https://nixos.org/nix/manual/#sect-macos-installation"
|
||||
echo ""
|
||||
) >&2
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
|
||||
if [ "$INSTALL_MODE" = "daemon" ]; then
|
||||
printf '\e[1;31mSwitching to the Multi-user Installer\e[0m\n'
|
||||
exec "$self/install-multi-user"
|
||||
exec "$self/install-multi-user" $ACTIONS # let ACTIONS split
|
||||
exit 0
|
||||
fi
|
||||
|
||||
|
@ -149,7 +134,7 @@ fi
|
|||
|
||||
echo "performing a single-user installation of Nix..." >&2
|
||||
|
||||
if ! [ -e $dest ]; then
|
||||
if ! [ -e "$dest" ]; then
|
||||
cmd="mkdir -m 0755 $dest && chown $USER $dest"
|
||||
echo "directory $dest does not exist; creating it by running '$cmd' using sudo" >&2
|
||||
if ! sudo sh -c "$cmd"; then
|
||||
|
@ -158,12 +143,12 @@ if ! [ -e $dest ]; then
|
|||
fi
|
||||
fi
|
||||
|
||||
if ! [ -w $dest ]; then
|
||||
if ! [ -w "$dest" ]; then
|
||||
echo "$0: directory $dest exists, but is not writable by you. This could indicate that another user has already performed a single-user installation of Nix on this system. If you wish to enable multi-user support see https://nixos.org/nix/manual/#ssec-multi-user. If you wish to continue with a single-user install for $USER please run 'chown -R $USER $dest' as root." >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
mkdir -p $dest/store
|
||||
mkdir -p "$dest/store"
|
||||
|
||||
printf "copying Nix to %s..." "${dest}/store" >&2
|
||||
# Insert a newline if no progress is shown.
|
||||
|
@ -194,6 +179,7 @@ if ! "$nix/bin/nix-store" --load-db < "$self/.reginfo"; then
|
|||
exit 1
|
||||
fi
|
||||
|
||||
# shellcheck source=./nix-profile.sh.in
|
||||
. "$nix/etc/profile.d/nix.sh"
|
||||
|
||||
if ! "$nix/bin/nix-env" -i "$nix"; then
|
||||
|
@ -203,17 +189,17 @@ fi
|
|||
|
||||
# Install an SSL certificate bundle.
|
||||
if [ -z "$NIX_SSL_CERT_FILE" ] || ! [ -f "$NIX_SSL_CERT_FILE" ]; then
|
||||
$nix/bin/nix-env -i "$cacert"
|
||||
"$nix/bin/nix-env" -i "$cacert"
|
||||
export NIX_SSL_CERT_FILE="$HOME/.nix-profile/etc/ssl/certs/ca-bundle.crt"
|
||||
fi
|
||||
|
||||
# Subscribe the user to the Nixpkgs channel and fetch it.
|
||||
if [ -z "$NIX_INSTALLER_NO_CHANNEL_ADD" ]; then
|
||||
if ! $nix/bin/nix-channel --list | grep -q "^nixpkgs "; then
|
||||
$nix/bin/nix-channel --add https://nixos.org/channels/nixpkgs-unstable
|
||||
if ! "$nix/bin/nix-channel" --list | grep -q "^nixpkgs "; then
|
||||
"$nix/bin/nix-channel" --add https://nixos.org/channels/nixpkgs-unstable
|
||||
fi
|
||||
if [ -z "$_NIX_INSTALLER_TEST" ]; then
|
||||
if ! $nix/bin/nix-channel --update nixpkgs; then
|
||||
if ! "$nix/bin/nix-channel" --update nixpkgs; then
|
||||
echo "Fetching the nixpkgs channel failed. (Are you offline?)"
|
||||
echo "To try again later, run \"nix-channel --update nixpkgs\"."
|
||||
fi
|
||||
|
@ -229,7 +215,7 @@ if [ -z "$NIX_INSTALLER_NO_MODIFY_PROFILE" ]; then
|
|||
if [ -w "$fn" ]; then
|
||||
if ! grep -q "$p" "$fn"; then
|
||||
echo "modifying $fn..." >&2
|
||||
echo -e "\nif [ -e $p ]; then . $p; fi # added by Nix installer" >> "$fn"
|
||||
printf '\nif [ -e %s ]; then . %s; fi # added by Nix installer\n' "$p" "$p" >> "$fn"
|
||||
fi
|
||||
added=1
|
||||
break
|
||||
|
@ -240,7 +226,7 @@ if [ -z "$NIX_INSTALLER_NO_MODIFY_PROFILE" ]; then
|
|||
if [ -w "$fn" ]; then
|
||||
if ! grep -q "$p" "$fn"; then
|
||||
echo "modifying $fn..." >&2
|
||||
echo -e "\nif [ -e $p ]; then . $p; fi # added by Nix installer" >> "$fn"
|
||||
printf '\nif [ -e %s ]; then . %s; fi # added by Nix installer\n' "$p" "$p" >> "$fn"
|
||||
fi
|
||||
added=1
|
||||
break
|
||||
|
|
|
@ -15,7 +15,7 @@ readonly SERVICE_OVERRIDE=${SERVICE_DEST}.d/override.conf
|
|||
|
||||
create_systemd_override() {
|
||||
header "Configuring proxy for the nix-daemon service"
|
||||
_sudo "create directory for systemd unit override" mkdir -p "$(dirname $SERVICE_OVERRIDE)"
|
||||
_sudo "create directory for systemd unit override" mkdir -p "$(dirname "$SERVICE_OVERRIDE")"
|
||||
cat <<EOF | _sudo "create systemd unit override" tee "$SERVICE_OVERRIDE"
|
||||
[Service]
|
||||
$1
|
||||
|
@ -41,10 +41,8 @@ handle_network_proxy() {
|
|||
fi
|
||||
}
|
||||
|
||||
poly_validate_assumptions() {
|
||||
if [ "$(uname -s)" != "Linux" ]; then
|
||||
failure "This script is for use with Linux!"
|
||||
fi
|
||||
poly_cure_artifacts() {
|
||||
:
|
||||
}
|
||||
|
||||
poly_service_installed_check() {
|
||||
|
@ -72,7 +70,7 @@ poly_service_setup_note() {
|
|||
EOF
|
||||
}
|
||||
|
||||
poly_extra_try_me_commands(){
|
||||
poly_extra_try_me_commands() {
|
||||
if [ -e /run/systemd/system ]; then
|
||||
:
|
||||
else
|
||||
|
@ -81,19 +79,10 @@ poly_extra_try_me_commands(){
|
|||
EOF
|
||||
fi
|
||||
}
|
||||
poly_extra_setup_instructions(){
|
||||
if [ -e /run/systemd/system ]; then
|
||||
:
|
||||
else
|
||||
cat <<EOF
|
||||
Additionally, you may want to add nix-daemon to your init-system.
|
||||
|
||||
EOF
|
||||
fi
|
||||
}
|
||||
|
||||
poly_configure_nix_daemon_service() {
|
||||
if [ -e /run/systemd/system ]; then
|
||||
task "Setting up the nix-daemon systemd service"
|
||||
_sudo "to set up the nix-daemon service" \
|
||||
systemctl link "/nix/var/nix/profiles/default$SERVICE_SRC"
|
||||
|
||||
|
@ -110,6 +99,8 @@ poly_configure_nix_daemon_service() {
|
|||
|
||||
_sudo "to start the nix-daemon.service" \
|
||||
systemctl restart nix-daemon.service
|
||||
else
|
||||
reminder "I don't support your init system yet; you may want to add nix-daemon manually."
|
||||
fi
|
||||
}
|
||||
|
||||
|
@ -207,3 +198,7 @@ poly_create_build_user() {
|
|||
--password "!" \
|
||||
"$username"
|
||||
}
|
||||
|
||||
poly_prepare_to_install() {
|
||||
:
|
||||
}
|
||||
|
|
|
@ -40,21 +40,25 @@ case "$(uname -s).$(uname -m)" in
|
|||
path=@tarballPath_aarch64-linux@
|
||||
system=aarch64-linux
|
||||
;;
|
||||
Linux.armv6l_linux)
|
||||
hash=@tarballHash_armv6l-linux@
|
||||
path=@tarballPath_armv6l-linux@
|
||||
system=armv6l-linux
|
||||
;;
|
||||
Linux.armv7l_linux)
|
||||
hash=@tarballHash_armv7l-linux@
|
||||
path=@tarballPath_armv7l-linux@
|
||||
system=armv7l-linux
|
||||
;;
|
||||
Darwin.x86_64)
|
||||
hash=@tarballHash_x86_64-darwin@
|
||||
path=@tarballPath_x86_64-darwin@
|
||||
system=x86_64-darwin
|
||||
;;
|
||||
Darwin.arm64|Darwin.aarch64)
|
||||
# check for Rosetta 2 support
|
||||
if ! [ -f /Library/Apple/System/Library/LaunchDaemons/com.apple.oahd.plist ]; then
|
||||
oops "Rosetta 2 is not installed on this ARM64 macOS machine. Run softwareupdate --install-rosetta then restart installation"
|
||||
fi
|
||||
|
||||
hash=@binaryTarball_x86_64-darwin@
|
||||
path=@tarballPath_x86_64-darwin@
|
||||
# eventually maybe: aarch64-darwin
|
||||
system=x86_64-darwin
|
||||
hash=@tarballHash_aarch64-darwin@
|
||||
path=@tarballPath_aarch64-darwin@
|
||||
system=aarch64-darwin
|
||||
;;
|
||||
*) oops "sorry, there is no binary distribution of Nix for your platform";;
|
||||
esac
|
||||
|
@ -72,14 +76,21 @@ fi
|
|||
|
||||
tarball=$tmpDir/nix-@nixVersion@-$system.tar.xz
|
||||
|
||||
require_util curl "download the binary tarball"
|
||||
require_util tar "unpack the binary tarball"
|
||||
if [ "$(uname -s)" != "Darwin" ]; then
|
||||
require_util xz "unpack the binary tarball"
|
||||
fi
|
||||
|
||||
if command -v curl > /dev/null 2>&1; then
|
||||
fetch() { curl -L "$1" -o "$2"; }
|
||||
elif command -v wget > /dev/null 2>&1; then
|
||||
fetch() { wget "$1" -O "$2"; }
|
||||
else
|
||||
oops "you don't have wget or curl installed, which I need to download the binary tarball"
|
||||
fi
|
||||
|
||||
echo "downloading Nix @nixVersion@ binary tarball for $system from '$url' to '$tmpDir'..."
|
||||
curl -L "$url" -o "$tarball" || oops "failed to download '$url'"
|
||||
fetch "$url" "$tarball" || oops "failed to download '$url'"
|
||||
|
||||
if command -v sha256sum > /dev/null 2>&1; then
|
||||
hash2="$(sha256sum -b "$tarball" | cut -c1-64)"
|
||||
|
|
|
@ -1,7 +1,5 @@
|
|||
nix_noinst_scripts := \
|
||||
$(d)/nix-http-export.cgi \
|
||||
$(d)/nix-profile.sh \
|
||||
$(d)/nix-reduce-build
|
||||
$(d)/nix-profile.sh
|
||||
|
||||
noinst-scripts += $(nix_noinst_scripts)
|
||||
|
||||
|
|
|
@ -1,51 +0,0 @@
|
|||
#! /bin/sh
|
||||
|
||||
export HOME=/tmp
|
||||
export NIX_REMOTE=daemon
|
||||
|
||||
TMP_DIR="${TMP_DIR:-/tmp/nix-export}"
|
||||
|
||||
@coreutils@/mkdir -p "$TMP_DIR" || true
|
||||
@coreutils@/chmod a+r "$TMP_DIR"
|
||||
|
||||
needed_path="?$QUERY_STRING"
|
||||
needed_path="${needed_path#*[?&]needed_path=}"
|
||||
needed_path="${needed_path%%&*}"
|
||||
#needed_path="$(echo $needed_path | ./unhttp)"
|
||||
needed_path="${needed_path//%2B/+}"
|
||||
needed_path="${needed_path//%3D/=}"
|
||||
|
||||
echo needed_path: "$needed_path" >&2
|
||||
|
||||
NIX_STORE="${NIX_STORE_DIR:-/nix/store}"
|
||||
|
||||
echo NIX_STORE: "${NIX_STORE}" >&2
|
||||
|
||||
full_path="${NIX_STORE}"/"$needed_path"
|
||||
|
||||
if [ "$needed_path" != "${needed_path%.drv}" ]; then
|
||||
echo "Status: 403 You should create the derivation file yourself"
|
||||
echo "Content-Type: text/plain"
|
||||
echo
|
||||
echo "Refusing to disclose derivation contents"
|
||||
exit
|
||||
fi
|
||||
|
||||
if @bindir@/nix-store --check-validity "$full_path"; then
|
||||
if ! [ -e nix-export/"$needed_path".nar.gz ]; then
|
||||
@bindir@/nix-store --export "$full_path" | @gzip@ > "$TMP_DIR"/"$needed_path".nar.gz
|
||||
@coreutils@/ln -fs "$TMP_DIR"/"$needed_path".nar.gz nix-export/"$needed_path".nar.gz
|
||||
fi;
|
||||
echo "Status: 301 Moved"
|
||||
echo "Location: nix-export/"$needed_path".nar.gz"
|
||||
echo
|
||||
else
|
||||
echo "Status: 404 No such path found"
|
||||
echo "Content-Type: text/plain"
|
||||
echo
|
||||
echo "Path not found:"
|
||||
echo "$needed_path"
|
||||
echo "checked:"
|
||||
echo "$full_path"
|
||||
fi
|
||||
|
|
@ -5,7 +5,7 @@ __ETC_PROFILE_NIX_SOURCED=1
|
|||
export NIX_PROFILES="@localstatedir@/nix/profiles/default $HOME/.nix-profile"
|
||||
|
||||
# Set $NIX_SSL_CERT_FILE so that Nixpkgs applications like curl work.
|
||||
if [ ! -z "${NIX_SSL_CERT_FILE:-}" ]; then
|
||||
if [ -n "${NIX_SSL_CERT_FILE:-}" ]; then
|
||||
: # Allow users to override the NIX_SSL_CERT_FILE
|
||||
elif [ -e /etc/ssl/certs/ca-certificates.crt ]; then # NixOS, Ubuntu, Debian, Gentoo, Arch
|
||||
export NIX_SSL_CERT_FILE=/etc/ssl/certs/ca-certificates.crt
|
||||
|
@ -18,14 +18,14 @@ elif [ -e /etc/pki/tls/certs/ca-bundle.crt ]; then # Fedora, CentOS
|
|||
else
|
||||
# Fall back to what is in the nix profiles, favouring whatever is defined last.
|
||||
check_nix_profiles() {
|
||||
if [ "$ZSH_VERSION" ]; then
|
||||
if [ -n "$ZSH_VERSION" ]; then
|
||||
# Zsh by default doesn't split words in unquoted parameter expansion.
|
||||
# Set local_options for these options to be reverted at the end of the function
|
||||
# and shwordsplit to force splitting words in $NIX_PROFILES below.
|
||||
setopt local_options shwordsplit
|
||||
fi
|
||||
for i in $NIX_PROFILES; do
|
||||
if [ -e $i/etc/ssl/certs/ca-bundle.crt ]; then
|
||||
if [ -e "$i/etc/ssl/certs/ca-bundle.crt" ]; then
|
||||
export NIX_SSL_CERT_FILE=$i/etc/ssl/certs/ca-bundle.crt
|
||||
fi
|
||||
done
|
||||
|
|
|
@ -1,171 +0,0 @@
|
|||
#! @bash@
|
||||
|
||||
WORKING_DIRECTORY=$(mktemp -d "${TMPDIR:-/tmp}"/nix-reduce-build-XXXXXX);
|
||||
cd "$WORKING_DIRECTORY";
|
||||
|
||||
if test -z "$1" || test "a--help" = "a$1" ; then
|
||||
echo 'nix-reduce-build (paths or Nix expressions) -- (package sources)' >&2
|
||||
echo As in: >&2
|
||||
echo nix-reduce-build /etc/nixos/nixos -- ssh://user@somewhere.nowhere.example.org >&2
|
||||
echo nix-reduce-build /etc/nixos/nixos -- \\
|
||||
echo " " \''http://somewhere.nowhere.example.org/nix/nix-http-export.cgi?needed_path='\' >&2
|
||||
echo " store path name will be added into the end of the URL" >&2
|
||||
echo nix-reduce-build /etc/nixos/nixos -- file://home/user/nar/ >&2
|
||||
echo " that should be a directory where gzipped 'nix-store --export' ">&2
|
||||
echo " files are located (they should have .nar.gz extension)" >&2
|
||||
echo " Or all together: " >&2
|
||||
echo -e nix-reduce-build /expr.nix /e2.nix -- \\\\\\\n\
|
||||
" ssh://a@b.example.com http://n.example.com/get-nar?q= file://nar/" >&2
|
||||
echo " Also supports best-effort local builds of failing expression set:" >&2
|
||||
echo "nix-reduce-build /e.nix -- nix-daemon:// nix-self://" >&2
|
||||
echo " nix-daemon:// builds using daemon"
|
||||
echo " nix-self:// builds directly using nix-store from current installation" >&2
|
||||
echo " nix-daemon-fixed:// and nix-self-fixed:// do the same, but only for" >&2;
|
||||
echo "derivations with specified output hash (sha256, sha1 or md5)." >&2
|
||||
echo " nix-daemon-substitute:// and nix-self-substitute:// try to substitute" >&2;
|
||||
echo "maximum amount of paths" >&2;
|
||||
echo " nix-daemon-build:// and nix-self-build:// try to build (not substitute)" >&2;
|
||||
echo "maximum amount of paths" >&2;
|
||||
echo " If no package sources are specified, required paths are listed." >&2;
|
||||
exit;
|
||||
fi;
|
||||
|
||||
while ! test "$1" = "--" || test "$1" = "" ; do
|
||||
echo "$1" >> initial; >&2
|
||||
shift;
|
||||
done
|
||||
shift;
|
||||
echo Will work on $(cat initial | wc -l) targets. >&2
|
||||
|
||||
while read ; do
|
||||
case "$REPLY" in
|
||||
${NIX_STORE_DIR:-/nix/store}/*)
|
||||
echo "$REPLY" >> paths; >&2
|
||||
;;
|
||||
*)
|
||||
(
|
||||
IFS=: ;
|
||||
nix-instantiate $REPLY >> paths;
|
||||
);
|
||||
;;
|
||||
esac;
|
||||
done < initial;
|
||||
echo Proceeding $(cat paths | wc -l) paths. >&2
|
||||
|
||||
while read; do
|
||||
case "$REPLY" in
|
||||
*.drv)
|
||||
echo "$REPLY" >> derivers; >&2
|
||||
;;
|
||||
*)
|
||||
nix-store --query --deriver "$REPLY" >>derivers;
|
||||
;;
|
||||
esac;
|
||||
done < paths;
|
||||
echo Found $(cat derivers | wc -l) derivers. >&2
|
||||
|
||||
cat derivers | xargs nix-store --query -R > derivers-closure;
|
||||
echo Proceeding at most $(cat derivers-closure | wc -l) derivers. >&2
|
||||
|
||||
cat derivers-closure | egrep '[.]drv$' | xargs nix-store --query --outputs > wanted-paths;
|
||||
cat derivers-closure | egrep -v '[.]drv$' >> wanted-paths;
|
||||
echo Prepared $(cat wanted-paths | wc -l) paths to get. >&2
|
||||
|
||||
cat wanted-paths | xargs nix-store --check-validity --print-invalid > needed-paths;
|
||||
echo We need $(cat needed-paths | wc -l) paths. >&2
|
||||
|
||||
egrep '[.]drv$' derivers-closure > critical-derivers;
|
||||
|
||||
if test -z "$1" ; then
|
||||
cat needed-paths;
|
||||
fi;
|
||||
|
||||
refresh_critical_derivers() {
|
||||
echo "Finding needed derivers..." >&2;
|
||||
cat critical-derivers | while read; do
|
||||
if ! (nix-store --query --outputs "$REPLY" | xargs nix-store --check-validity &> /dev/null;); then
|
||||
echo "$REPLY";
|
||||
fi;
|
||||
done > new-critical-derivers;
|
||||
mv new-critical-derivers critical-derivers;
|
||||
echo The needed paths are realized by $(cat critical-derivers | wc -l) derivers. >&2
|
||||
}
|
||||
|
||||
build_here() {
|
||||
cat critical-derivers | while read; do
|
||||
echo "Realising $REPLY using nix-daemon" >&2
|
||||
@bindir@/nix-store -r "${REPLY}"
|
||||
done;
|
||||
}
|
||||
|
||||
try_to_substitute(){
|
||||
cat needed-paths | while read ; do
|
||||
echo "Building $REPLY using nix-daemon" >&2
|
||||
@bindir@/nix-store -r "${NIX_STORE_DIR:-/nix/store}/${REPLY##*/}"
|
||||
done;
|
||||
}
|
||||
|
||||
for i in "$@"; do
|
||||
sshHost="${i#ssh://}";
|
||||
httpHost="${i#http://}";
|
||||
httpsHost="${i#https://}";
|
||||
filePath="${i#file:/}";
|
||||
if [ "$i" != "$sshHost" ]; then
|
||||
cat needed-paths | while read; do
|
||||
echo "Getting $REPLY and its closure over ssh" >&2
|
||||
nix-copy-closure --from "$sshHost" --gzip "$REPLY" </dev/null || true;
|
||||
done;
|
||||
elif [ "$i" != "$httpHost" ] || [ "$i" != "$httpsHost" ]; then
|
||||
cat needed-paths | while read; do
|
||||
echo "Getting $REPLY over http/https" >&2
|
||||
curl ${BAD_CERTIFICATE:+-k} -L "$i${REPLY##*/}" | gunzip | nix-store --import;
|
||||
done;
|
||||
elif [ "$i" != "$filePath" ] ; then
|
||||
cat needed-paths | while read; do
|
||||
echo "Installing $REPLY from file" >&2
|
||||
gunzip < "$filePath/${REPLY##*/}".nar.gz | nix-store --import;
|
||||
done;
|
||||
elif [ "$i" = "nix-daemon://" ] ; then
|
||||
NIX_REMOTE=daemon try_to_substitute;
|
||||
refresh_critical_derivers;
|
||||
NIX_REMOTE=daemon build_here;
|
||||
elif [ "$i" = "nix-self://" ] ; then
|
||||
NIX_REMOTE= try_to_substitute;
|
||||
refresh_critical_derivers;
|
||||
NIX_REMOTE= build_here;
|
||||
elif [ "$i" = "nix-daemon-fixed://" ] ; then
|
||||
refresh_critical_derivers;
|
||||
|
||||
cat critical-derivers | while read; do
|
||||
if egrep '"(md5|sha1|sha256)"' "$REPLY" &>/dev/null; then
|
||||
echo "Realising $REPLY using nix-daemon" >&2
|
||||
NIX_REMOTE=daemon @bindir@/nix-store -r "${REPLY}"
|
||||
fi;
|
||||
done;
|
||||
elif [ "$i" = "nix-self-fixed://" ] ; then
|
||||
refresh_critical_derivers;
|
||||
|
||||
cat critical-derivers | while read; do
|
||||
if egrep '"(md5|sha1|sha256)"' "$REPLY" &>/dev/null; then
|
||||
echo "Realising $REPLY using direct Nix build" >&2
|
||||
NIX_REMOTE= @bindir@/nix-store -r "${REPLY}"
|
||||
fi;
|
||||
done;
|
||||
elif [ "$i" = "nix-daemon-substitute://" ] ; then
|
||||
NIX_REMOTE=daemon try_to_substitute;
|
||||
elif [ "$i" = "nix-self-substitute://" ] ; then
|
||||
NIX_REMOTE= try_to_substitute;
|
||||
elif [ "$i" = "nix-daemon-build://" ] ; then
|
||||
refresh_critical_derivers;
|
||||
NIX_REMOTE=daemon build_here;
|
||||
elif [ "$i" = "nix-self-build://" ] ; then
|
||||
refresh_critical_derivers;
|
||||
NIX_REMOTE= build_here;
|
||||
fi;
|
||||
mv needed-paths wanted-paths;
|
||||
cat wanted-paths | xargs nix-store --check-validity --print-invalid > needed-paths;
|
||||
echo We still need $(cat needed-paths | wc -l) paths. >&2
|
||||
done;
|
||||
|
||||
cd /
|
||||
rm -r "$WORKING_DIRECTORY"
|
|
@ -3,7 +3,7 @@
|
|||
set -e
|
||||
|
||||
script=$(nix-build -A outputs.hydraJobs.installerScriptForGHA --no-out-link)
|
||||
installerHash=$(echo $script | cut -b12-43 -)
|
||||
installerHash=$(echo "$script" | cut -b12-43 -)
|
||||
|
||||
installerURL=https://$CACHIX_NAME.cachix.org/serve/$installerHash/install
|
||||
|
||||
|
|
|
@ -18,6 +18,7 @@
|
|||
#include "derivations.hh"
|
||||
#include "local-store.hh"
|
||||
#include "legacy.hh"
|
||||
#include "experimental-features.hh"
|
||||
|
||||
using namespace nix;
|
||||
using std::cin;
|
||||
|
@ -130,11 +131,14 @@ static int main_build_remote(int argc, char * * argv)
|
|||
for (auto & m : machines) {
|
||||
debug("considering building on remote machine '%s'", m.storeUri);
|
||||
|
||||
if (m.enabled && std::find(m.systemTypes.begin(),
|
||||
m.systemTypes.end(),
|
||||
neededSystem) != m.systemTypes.end() &&
|
||||
if (m.enabled
|
||||
&& (neededSystem == "builtin"
|
||||
|| std::find(m.systemTypes.begin(),
|
||||
m.systemTypes.end(),
|
||||
neededSystem) != m.systemTypes.end()) &&
|
||||
m.allSupported(requiredFeatures) &&
|
||||
m.mandatoryMet(requiredFeatures)) {
|
||||
m.mandatoryMet(requiredFeatures))
|
||||
{
|
||||
rightType = true;
|
||||
AutoCloseFD free;
|
||||
uint64_t load = 0;
|
||||
|
@ -270,14 +274,23 @@ connected:
|
|||
|
||||
{
|
||||
Activity act(*logger, lvlTalkative, actUnknown, fmt("copying dependencies to '%s'", storeUri));
|
||||
copyPaths(store, ref<Store>(sshStore), store->parseStorePathSet(inputs), NoRepair, NoCheckSigs, substitute);
|
||||
copyPaths(*store, *sshStore, store->parseStorePathSet(inputs), NoRepair, NoCheckSigs, substitute);
|
||||
}
|
||||
|
||||
uploadLock = -1;
|
||||
|
||||
auto drv = store->readDerivation(*drvPath);
|
||||
auto outputHashes = staticOutputHashes(*store, drv);
|
||||
drv.inputSrcs = store->parseStorePathSet(inputs);
|
||||
|
||||
// Hijack the inputs paths of the derivation to include all the paths
|
||||
// that come from the `inputDrvs` set.
|
||||
// We don’t do that for the derivations whose `inputDrvs` is empty
|
||||
// because
|
||||
// 1. It’s not needed
|
||||
// 2. Changing the `inputSrcs` set changes the associated output ids,
|
||||
// which break CA derivations
|
||||
if (!drv.inputDrvs.empty())
|
||||
drv.inputSrcs = store->parseStorePathSet(inputs);
|
||||
|
||||
auto result = sshStore->buildDerivation(*drvPath, drv);
|
||||
|
||||
|
@ -286,7 +299,7 @@ connected:
|
|||
|
||||
std::set<Realisation> missingRealisations;
|
||||
StorePathSet missingPaths;
|
||||
if (settings.isExperimentalFeatureEnabled("ca-derivations") && !derivationHasKnownOutputPaths(drv.type())) {
|
||||
if (settings.isExperimentalFeatureEnabled(Xp::CaDerivations) && !derivationHasKnownOutputPaths(drv.type())) {
|
||||
for (auto & outputName : wantedOutputs) {
|
||||
auto thisOutputHash = outputHashes.at(outputName);
|
||||
auto thisOutputId = DrvOutput{ thisOutputHash, outputName };
|
||||
|
@ -312,13 +325,13 @@ connected:
|
|||
if (auto localStore = store.dynamic_pointer_cast<LocalStore>())
|
||||
for (auto & path : missingPaths)
|
||||
localStore->locksHeld.insert(store->printStorePath(path)); /* FIXME: ugly */
|
||||
copyPaths(ref<Store>(sshStore), store, missingPaths, NoRepair, NoCheckSigs, NoSubstitute);
|
||||
copyPaths(*sshStore, *store, missingPaths, NoRepair, NoCheckSigs, NoSubstitute);
|
||||
}
|
||||
// XXX: Should be done as part of `copyPaths`
|
||||
for (auto & realisation : missingRealisations) {
|
||||
// Should hold, because if the feature isn't enabled the set
|
||||
// of missing realisations should be empty
|
||||
settings.requireExperimentalFeature("ca-derivations");
|
||||
settings.requireExperimentalFeature(Xp::CaDerivations);
|
||||
store->registerDrvOutput(realisation);
|
||||
}
|
||||
|
||||
|
|
|
@ -95,8 +95,21 @@ EvalCommand::~EvalCommand()
|
|||
evalState->printStats();
|
||||
}
|
||||
|
||||
ref<Store> EvalCommand::getEvalStore()
|
||||
{
|
||||
if (!evalStore)
|
||||
evalStore = evalStoreUrl ? openStore(*evalStoreUrl) : getStore();
|
||||
return ref<Store>(evalStore);
|
||||
}
|
||||
|
||||
RealisedPathsCommand::RealisedPathsCommand(bool recursive)
|
||||
ref<EvalState> EvalCommand::getEvalState()
|
||||
{
|
||||
if (!evalState)
|
||||
evalState = std::make_shared<EvalState>(searchPath, getEvalStore(), getStore());
|
||||
return ref<EvalState>(evalState);
|
||||
}
|
||||
|
||||
BuiltPathsCommand::BuiltPathsCommand(bool recursive)
|
||||
: recursive(recursive)
|
||||
{
|
||||
if (recursive)
|
||||
|
@ -123,44 +136,53 @@ RealisedPathsCommand::RealisedPathsCommand(bool recursive)
|
|||
});
|
||||
}
|
||||
|
||||
void RealisedPathsCommand::run(ref<Store> store)
|
||||
void BuiltPathsCommand::run(ref<Store> store)
|
||||
{
|
||||
std::vector<RealisedPath> paths;
|
||||
BuiltPaths paths;
|
||||
if (all) {
|
||||
if (installables.size())
|
||||
throw UsageError("'--all' does not expect arguments");
|
||||
// XXX: Only uses opaque paths, ignores all the realisations
|
||||
for (auto & p : store->queryAllValidPaths())
|
||||
paths.push_back(p);
|
||||
paths.push_back(BuiltPath::Opaque{p});
|
||||
} else {
|
||||
auto pathSet = toRealisedPaths(store, realiseMode, operateOn, installables);
|
||||
paths = toBuiltPaths(getEvalStore(), store, realiseMode, operateOn, installables);
|
||||
if (recursive) {
|
||||
auto roots = std::move(pathSet);
|
||||
pathSet = {};
|
||||
RealisedPath::closure(*store, roots, pathSet);
|
||||
// XXX: This only computes the store path closure, ignoring
|
||||
// intermediate realisations
|
||||
StorePathSet pathsRoots, pathsClosure;
|
||||
for (auto & root : paths) {
|
||||
auto rootFromThis = root.outPaths();
|
||||
pathsRoots.insert(rootFromThis.begin(), rootFromThis.end());
|
||||
}
|
||||
store->computeFSClosure(pathsRoots, pathsClosure);
|
||||
for (auto & path : pathsClosure)
|
||||
paths.push_back(BuiltPath::Opaque{path});
|
||||
}
|
||||
for (auto & path : pathSet)
|
||||
paths.push_back(path);
|
||||
}
|
||||
|
||||
run(store, std::move(paths));
|
||||
}
|
||||
|
||||
StorePathsCommand::StorePathsCommand(bool recursive)
|
||||
: RealisedPathsCommand(recursive)
|
||||
: BuiltPathsCommand(recursive)
|
||||
{
|
||||
}
|
||||
|
||||
void StorePathsCommand::run(ref<Store> store, std::vector<RealisedPath> paths)
|
||||
void StorePathsCommand::run(ref<Store> store, BuiltPaths && paths)
|
||||
{
|
||||
StorePaths storePaths;
|
||||
for (auto & p : paths)
|
||||
storePaths.push_back(p.path());
|
||||
StorePathSet storePaths;
|
||||
for (auto & builtPath : paths)
|
||||
for (auto & p : builtPath.outPaths())
|
||||
storePaths.insert(p);
|
||||
|
||||
run(store, std::move(storePaths));
|
||||
auto sorted = store->topoSortPaths(storePaths);
|
||||
std::reverse(sorted.begin(), sorted.end());
|
||||
|
||||
run(store, std::move(sorted));
|
||||
}
|
||||
|
||||
void StorePathCommand::run(ref<Store> store, std::vector<StorePath> storePaths)
|
||||
void StorePathCommand::run(ref<Store> store, std::vector<StorePath> && storePaths)
|
||||
{
|
||||
if (storePaths.size() != 1)
|
||||
throw UsageError("this command requires exactly one store path");
|
||||
|
@ -204,7 +226,7 @@ void MixProfile::updateProfile(const StorePath & storePath)
|
|||
profile2, storePath));
|
||||
}
|
||||
|
||||
void MixProfile::updateProfile(const DerivedPathsWithHints & buildables)
|
||||
void MixProfile::updateProfile(const BuiltPaths & buildables)
|
||||
{
|
||||
if (!profile) return;
|
||||
|
||||
|
@ -212,22 +234,19 @@ void MixProfile::updateProfile(const DerivedPathsWithHints & buildables)
|
|||
|
||||
for (auto & buildable : buildables) {
|
||||
std::visit(overloaded {
|
||||
[&](DerivedPathWithHints::Opaque bo) {
|
||||
[&](const BuiltPath::Opaque & bo) {
|
||||
result.push_back(bo.path);
|
||||
},
|
||||
[&](DerivedPathWithHints::Built bfd) {
|
||||
[&](const BuiltPath::Built & bfd) {
|
||||
for (auto & output : bfd.outputs) {
|
||||
/* Output path should be known because we just tried to
|
||||
build it. */
|
||||
assert(output.second);
|
||||
result.push_back(*output.second);
|
||||
result.push_back(output.second);
|
||||
}
|
||||
},
|
||||
}, buildable.raw());
|
||||
}
|
||||
|
||||
if (result.size() != 1)
|
||||
throw Error("'--profile' requires that the arguments produce a single store path, but there are %d", result.size());
|
||||
throw UsageError("'--profile' requires that the arguments produce a single store path, but there are %d", result.size());
|
||||
|
||||
updateProfile(result[0]);
|
||||
}
|
||||
|
|
|
@ -47,13 +47,18 @@ struct EvalCommand : virtual StoreCommand, MixEvalArgs
|
|||
{
|
||||
bool startReplOnEvalErrors = false;
|
||||
|
||||
ref<EvalState> getEvalState();
|
||||
|
||||
EvalCommand();
|
||||
|
||||
std::shared_ptr<EvalState> evalState;
|
||||
|
||||
~EvalCommand();
|
||||
|
||||
ref<Store> getEvalStore();
|
||||
|
||||
ref<EvalState> getEvalState();
|
||||
|
||||
private:
|
||||
std::shared_ptr<Store> evalStore;
|
||||
|
||||
std::shared_ptr<EvalState> evalState;
|
||||
};
|
||||
|
||||
struct MixFlakeOptions : virtual Args, EvalCommand
|
||||
|
@ -105,6 +110,8 @@ enum class Realise {
|
|||
exists. */
|
||||
Derivation,
|
||||
/* Evaluate in dry-run mode. Postcondition: nothing. */
|
||||
// FIXME: currently unused, but could be revived if we can
|
||||
// evaluate derivations in-memory.
|
||||
Nothing
|
||||
};
|
||||
|
||||
|
@ -147,7 +154,7 @@ private:
|
|||
};
|
||||
|
||||
/* A command that operates on zero or more store paths. */
|
||||
struct RealisedPathsCommand : public InstallablesCommand
|
||||
struct BuiltPathsCommand : public InstallablesCommand
|
||||
{
|
||||
private:
|
||||
|
||||
|
@ -160,26 +167,26 @@ protected:
|
|||
|
||||
public:
|
||||
|
||||
RealisedPathsCommand(bool recursive = false);
|
||||
BuiltPathsCommand(bool recursive = false);
|
||||
|
||||
using StoreCommand::run;
|
||||
|
||||
virtual void run(ref<Store> store, std::vector<RealisedPath> paths) = 0;
|
||||
virtual void run(ref<Store> store, BuiltPaths && paths) = 0;
|
||||
|
||||
void run(ref<Store> store) override;
|
||||
|
||||
bool useDefaultInstallables() override { return !all; }
|
||||
};
|
||||
|
||||
struct StorePathsCommand : public RealisedPathsCommand
|
||||
struct StorePathsCommand : public BuiltPathsCommand
|
||||
{
|
||||
StorePathsCommand(bool recursive = false);
|
||||
|
||||
using RealisedPathsCommand::run;
|
||||
using BuiltPathsCommand::run;
|
||||
|
||||
virtual void run(ref<Store> store, std::vector<StorePath> storePaths) = 0;
|
||||
virtual void run(ref<Store> store, std::vector<StorePath> && storePaths) = 0;
|
||||
|
||||
void run(ref<Store> store, std::vector<RealisedPath> paths) override;
|
||||
void run(ref<Store> store, BuiltPaths && paths) override;
|
||||
};
|
||||
|
||||
/* A command that operates on exactly one store path. */
|
||||
|
@ -189,7 +196,7 @@ struct StorePathCommand : public StorePathsCommand
|
|||
|
||||
virtual void run(ref<Store> store, const StorePath & storePath) = 0;
|
||||
|
||||
void run(ref<Store> store, std::vector<StorePath> storePaths) override;
|
||||
void run(ref<Store> store, std::vector<StorePath> && storePaths) override;
|
||||
};
|
||||
|
||||
/* A helper class for registering commands globally. */
|
||||
|
@ -220,26 +227,37 @@ static RegisterCommand registerCommand2(std::vector<std::string> && name)
|
|||
return RegisterCommand(std::move(name), [](){ return make_ref<T>(); });
|
||||
}
|
||||
|
||||
DerivedPathsWithHints build(ref<Store> store, Realise mode,
|
||||
std::vector<std::shared_ptr<Installable>> installables, BuildMode bMode = bmNormal);
|
||||
BuiltPaths build(
|
||||
ref<Store> evalStore,
|
||||
ref<Store> store, Realise mode,
|
||||
const std::vector<std::shared_ptr<Installable>> & installables,
|
||||
BuildMode bMode = bmNormal);
|
||||
|
||||
std::set<StorePath> toStorePaths(ref<Store> store,
|
||||
Realise mode, OperateOn operateOn,
|
||||
std::vector<std::shared_ptr<Installable>> installables);
|
||||
|
||||
StorePath toStorePath(ref<Store> store,
|
||||
Realise mode, OperateOn operateOn,
|
||||
std::shared_ptr<Installable> installable);
|
||||
|
||||
std::set<StorePath> toDerivations(ref<Store> store,
|
||||
std::vector<std::shared_ptr<Installable>> installables,
|
||||
bool useDeriver = false);
|
||||
|
||||
std::set<RealisedPath> toRealisedPaths(
|
||||
std::set<StorePath> toStorePaths(
|
||||
ref<Store> evalStore,
|
||||
ref<Store> store,
|
||||
Realise mode,
|
||||
OperateOn operateOn,
|
||||
std::vector<std::shared_ptr<Installable>> installables);
|
||||
const std::vector<std::shared_ptr<Installable>> & installables);
|
||||
|
||||
StorePath toStorePath(
|
||||
ref<Store> evalStore,
|
||||
ref<Store> store,
|
||||
Realise mode,
|
||||
OperateOn operateOn,
|
||||
std::shared_ptr<Installable> installable);
|
||||
|
||||
std::set<StorePath> toDerivations(
|
||||
ref<Store> store,
|
||||
const std::vector<std::shared_ptr<Installable>> & installables,
|
||||
bool useDeriver = false);
|
||||
|
||||
BuiltPaths toBuiltPaths(
|
||||
ref<Store> evalStore,
|
||||
ref<Store> store,
|
||||
Realise mode,
|
||||
OperateOn operateOn,
|
||||
const std::vector<std::shared_ptr<Installable>> & installables);
|
||||
|
||||
/* Helper function to generate args that invoke $EDITOR on
|
||||
filename:lineno. */
|
||||
|
@ -256,7 +274,7 @@ struct MixProfile : virtual StoreCommand
|
|||
|
||||
/* If 'profile' is set, make it point at the store path produced
|
||||
by 'buildables'. */
|
||||
void updateProfile(const DerivedPathsWithHints & buildables);
|
||||
void updateProfile(const BuiltPaths & buildables);
|
||||
};
|
||||
|
||||
struct MixDefaultProfile : MixProfile
|
||||
|
|
|
@ -58,9 +58,13 @@ MixFlakeOptions::MixFlakeOptions()
|
|||
|
||||
addFlag({
|
||||
.longName = "no-registries",
|
||||
.description = "Don't allow lookups in the flake registries.",
|
||||
.description =
|
||||
"Don't allow lookups in the flake registries. This option is deprecated; use `--no-use-registries`.",
|
||||
.category = category,
|
||||
.handler = {&lockFlags.useRegistries, false}
|
||||
.handler = {[&]() {
|
||||
lockFlags.useRegistries = false;
|
||||
warn("'--no-registries' is deprecated; use '--no-use-registries'");
|
||||
}}
|
||||
});
|
||||
|
||||
addFlag({
|
||||
|
@ -171,14 +175,50 @@ Strings SourceExprCommand::getDefaultFlakeAttrPathPrefixes()
|
|||
|
||||
void SourceExprCommand::completeInstallable(std::string_view prefix)
|
||||
{
|
||||
if (file) return; // FIXME
|
||||
if (file) {
|
||||
evalSettings.pureEval = false;
|
||||
auto state = getEvalState();
|
||||
Expr *e = state->parseExprFromFile(
|
||||
resolveExprPath(state->checkSourcePath(lookupFileArg(*state, *file)))
|
||||
);
|
||||
|
||||
completeFlakeRefWithFragment(
|
||||
getEvalState(),
|
||||
lockFlags,
|
||||
getDefaultFlakeAttrPathPrefixes(),
|
||||
getDefaultFlakeAttrPaths(),
|
||||
prefix);
|
||||
Value root;
|
||||
state->eval(e, root);
|
||||
|
||||
auto autoArgs = getAutoArgs(*state);
|
||||
|
||||
std::string prefix_ = std::string(prefix);
|
||||
auto sep = prefix_.rfind('.');
|
||||
std::string searchWord;
|
||||
if (sep != std::string::npos) {
|
||||
searchWord = prefix_.substr(sep, std::string::npos);
|
||||
prefix_ = prefix_.substr(0, sep);
|
||||
} else {
|
||||
searchWord = prefix_;
|
||||
prefix_ = "";
|
||||
}
|
||||
|
||||
Value &v1(*findAlongAttrPath(*state, prefix_, *autoArgs, root).first);
|
||||
state->forceValue(v1);
|
||||
Value v2;
|
||||
state->autoCallFunction(*autoArgs, v1, v2);
|
||||
|
||||
if (v2.type() == nAttrs) {
|
||||
for (auto & i : *v2.attrs) {
|
||||
std::string name = i.name;
|
||||
if (name.find(searchWord) == 0) {
|
||||
completions->add(i.name);
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
completeFlakeRefWithFragment(
|
||||
getEvalState(),
|
||||
lockFlags,
|
||||
getDefaultFlakeAttrPathPrefixes(),
|
||||
getDefaultFlakeAttrPaths(),
|
||||
prefix);
|
||||
}
|
||||
}
|
||||
|
||||
void completeFlakeRefWithFragment(
|
||||
|
@ -249,7 +289,6 @@ void completeFlakeRefWithFragment(
|
|||
completeFlakeRef(evalState->store, prefix);
|
||||
}
|
||||
|
||||
|
||||
void completeFlakeRef(ref<Store> store, std::string_view prefix)
|
||||
{
|
||||
if (prefix == "")
|
||||
|
@ -273,9 +312,9 @@ void completeFlakeRef(ref<Store> store, std::string_view prefix)
|
|||
}
|
||||
}
|
||||
|
||||
DerivedPathWithHints Installable::toDerivedPathWithHints()
|
||||
DerivedPath Installable::toDerivedPath()
|
||||
{
|
||||
auto buildables = toDerivedPathsWithHints();
|
||||
auto buildables = toDerivedPaths();
|
||||
if (buildables.size() != 1)
|
||||
throw Error("installable '%s' evaluates to %d derivations, where only one is expected", what(), buildables.size());
|
||||
return std::move(buildables[0]);
|
||||
|
@ -309,22 +348,19 @@ struct InstallableStorePath : Installable
|
|||
|
||||
std::string what() override { return store->printStorePath(storePath); }
|
||||
|
||||
DerivedPathsWithHints toDerivedPathsWithHints() override
|
||||
DerivedPaths toDerivedPaths() override
|
||||
{
|
||||
if (storePath.isDerivation()) {
|
||||
std::map<std::string, std::optional<StorePath>> outputs;
|
||||
auto drv = store->readDerivation(storePath);
|
||||
for (auto & [name, output] : drv.outputsAndOptPaths(*store))
|
||||
outputs.emplace(name, output.second);
|
||||
return {
|
||||
DerivedPathWithHints::Built {
|
||||
DerivedPath::Built {
|
||||
.drvPath = storePath,
|
||||
.outputs = std::move(outputs)
|
||||
.outputs = drv.outputNames(),
|
||||
}
|
||||
};
|
||||
} else {
|
||||
return {
|
||||
DerivedPathWithHints::Opaque {
|
||||
DerivedPath::Opaque {
|
||||
.path = storePath,
|
||||
}
|
||||
};
|
||||
|
@ -337,22 +373,24 @@ struct InstallableStorePath : Installable
|
|||
}
|
||||
};
|
||||
|
||||
DerivedPathsWithHints InstallableValue::toDerivedPathsWithHints()
|
||||
DerivedPaths InstallableValue::toDerivedPaths()
|
||||
{
|
||||
DerivedPathsWithHints res;
|
||||
DerivedPaths res;
|
||||
|
||||
std::map<StorePath, std::map<std::string, std::optional<StorePath>>> drvsToOutputs;
|
||||
std::map<StorePath, std::set<std::string>> drvsToOutputs;
|
||||
RealisedPath::Set drvsToCopy;
|
||||
|
||||
// Group by derivation, helps with .all in particular
|
||||
for (auto & drv : toDerivations()) {
|
||||
auto outputName = drv.outputName;
|
||||
if (outputName == "")
|
||||
throw Error("derivation '%s' lacks an 'outputName' attribute", state->store->printStorePath(drv.drvPath));
|
||||
drvsToOutputs[drv.drvPath].insert_or_assign(outputName, drv.outPath);
|
||||
drvsToOutputs[drv.drvPath].insert(outputName);
|
||||
drvsToCopy.insert(drv.drvPath);
|
||||
}
|
||||
|
||||
for (auto & i : drvsToOutputs)
|
||||
res.push_back(DerivedPathWithHints::Built { i.first, i.second });
|
||||
res.push_back(DerivedPath::Built { i.first, i.second });
|
||||
|
||||
return res;
|
||||
}
|
||||
|
@ -564,10 +602,10 @@ InstallableFlake::getCursors(EvalState & state)
|
|||
|
||||
std::shared_ptr<flake::LockedFlake> InstallableFlake::getLockedFlake() const
|
||||
{
|
||||
flake::LockFlags lockFlagsApplyConfig = lockFlags;
|
||||
lockFlagsApplyConfig.applyNixConfig = true;
|
||||
if (!_lockedFlake) {
|
||||
_lockedFlake = std::make_shared<flake::LockedFlake>(lockFlake(*state, flakeRef, lockFlags));
|
||||
_lockedFlake->flake.config.apply();
|
||||
// FIXME: send new config to the daemon.
|
||||
_lockedFlake = std::make_shared<flake::LockedFlake>(lockFlake(*state, flakeRef, lockFlagsApplyConfig));
|
||||
}
|
||||
return _lockedFlake;
|
||||
}
|
||||
|
@ -616,6 +654,17 @@ std::vector<std::shared_ptr<Installable>> SourceExprCommand::parseInstallables(
|
|||
for (auto & s : ss) {
|
||||
std::exception_ptr ex;
|
||||
|
||||
if (s.find('/') != std::string::npos) {
|
||||
try {
|
||||
result.push_back(std::make_shared<InstallableStorePath>(store, store->followLinksToStorePath(s)));
|
||||
continue;
|
||||
} catch (BadStorePath &) {
|
||||
} catch (...) {
|
||||
if (!ex)
|
||||
ex = std::current_exception();
|
||||
}
|
||||
}
|
||||
|
||||
try {
|
||||
auto [flakeRef, fragment] = parseFlakeRefWithFragment(s, absPath("."));
|
||||
result.push_back(std::make_shared<InstallableFlake>(
|
||||
|
@ -630,25 +679,7 @@ std::vector<std::shared_ptr<Installable>> SourceExprCommand::parseInstallables(
|
|||
ex = std::current_exception();
|
||||
}
|
||||
|
||||
if (s.find('/') != std::string::npos) {
|
||||
try {
|
||||
result.push_back(std::make_shared<InstallableStorePath>(store, store->followLinksToStorePath(s)));
|
||||
continue;
|
||||
} catch (BadStorePath &) {
|
||||
} catch (...) {
|
||||
if (!ex)
|
||||
ex = std::current_exception();
|
||||
}
|
||||
}
|
||||
|
||||
std::rethrow_exception(ex);
|
||||
|
||||
/*
|
||||
throw Error(
|
||||
pathExists(s)
|
||||
? "path '%s' is not a flake or a store path"
|
||||
: "don't know how to handle argument '%s'", s);
|
||||
*/
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -663,107 +694,121 @@ std::shared_ptr<Installable> SourceExprCommand::parseInstallable(
|
|||
return installables.front();
|
||||
}
|
||||
|
||||
DerivedPathsWithHints build(ref<Store> store, Realise mode,
|
||||
std::vector<std::shared_ptr<Installable>> installables, BuildMode bMode)
|
||||
BuiltPaths getBuiltPaths(ref<Store> evalStore, ref<Store> store, const DerivedPaths & hopefullyBuiltPaths)
|
||||
{
|
||||
if (mode == Realise::Nothing)
|
||||
settings.readOnlyMode = true;
|
||||
|
||||
DerivedPathsWithHints buildables;
|
||||
|
||||
std::vector<DerivedPath> pathsToBuild;
|
||||
|
||||
for (auto & i : installables) {
|
||||
for (auto & b : i->toDerivedPathsWithHints()) {
|
||||
std::visit(overloaded {
|
||||
[&](DerivedPathWithHints::Opaque bo) {
|
||||
pathsToBuild.push_back(bo);
|
||||
BuiltPaths res;
|
||||
for (const auto & b : hopefullyBuiltPaths)
|
||||
std::visit(
|
||||
overloaded{
|
||||
[&](const DerivedPath::Opaque & bo) {
|
||||
res.push_back(BuiltPath::Opaque{bo.path});
|
||||
},
|
||||
[&](DerivedPathWithHints::Built bfd) {
|
||||
StringSet outputNames;
|
||||
for (auto & output : bfd.outputs)
|
||||
outputNames.insert(output.first);
|
||||
pathsToBuild.push_back(
|
||||
DerivedPath::Built{bfd.drvPath, outputNames});
|
||||
},
|
||||
}, b.raw());
|
||||
buildables.push_back(std::move(b));
|
||||
}
|
||||
}
|
||||
|
||||
if (mode == Realise::Nothing)
|
||||
printMissing(store, pathsToBuild, lvlError);
|
||||
else if (mode == Realise::Outputs)
|
||||
store->buildPaths(pathsToBuild, bMode);
|
||||
|
||||
return buildables;
|
||||
}
|
||||
|
||||
std::set<RealisedPath> toRealisedPaths(
|
||||
ref<Store> store,
|
||||
Realise mode,
|
||||
OperateOn operateOn,
|
||||
std::vector<std::shared_ptr<Installable>> installables)
|
||||
{
|
||||
std::set<RealisedPath> res;
|
||||
if (operateOn == OperateOn::Output) {
|
||||
for (auto & b : build(store, mode, installables))
|
||||
std::visit(overloaded {
|
||||
[&](DerivedPathWithHints::Opaque bo) {
|
||||
res.insert(bo.path);
|
||||
},
|
||||
[&](DerivedPathWithHints::Built bfd) {
|
||||
auto drv = store->readDerivation(bfd.drvPath);
|
||||
auto outputHashes = staticOutputHashes(*store, drv);
|
||||
[&](const DerivedPath::Built & bfd) {
|
||||
OutputPathMap outputs;
|
||||
auto drv = evalStore->readDerivation(bfd.drvPath);
|
||||
auto outputHashes = staticOutputHashes(*evalStore, drv); // FIXME: expensive
|
||||
auto drvOutputs = drv.outputsAndOptPaths(*store);
|
||||
for (auto & output : bfd.outputs) {
|
||||
if (settings.isExperimentalFeatureEnabled("ca-derivations")) {
|
||||
if (!outputHashes.count(output.first))
|
||||
throw Error(
|
||||
"the derivation '%s' doesn't have an output named '%s'",
|
||||
store->printStorePath(bfd.drvPath),
|
||||
output.first);
|
||||
auto outputId = DrvOutput{outputHashes.at(output.first), output.first};
|
||||
auto realisation = store->queryRealisation(outputId);
|
||||
if (!outputHashes.count(output))
|
||||
throw Error(
|
||||
"the derivation '%s' doesn't have an output named '%s'",
|
||||
store->printStorePath(bfd.drvPath), output);
|
||||
if (settings.isExperimentalFeatureEnabled(
|
||||
Xp::CaDerivations)) {
|
||||
auto outputId =
|
||||
DrvOutput{outputHashes.at(output), output};
|
||||
auto realisation =
|
||||
store->queryRealisation(outputId);
|
||||
if (!realisation)
|
||||
throw Error("cannot operate on an output of unbuilt content-addresed derivation '%s'", outputId.to_string());
|
||||
res.insert(RealisedPath{*realisation});
|
||||
}
|
||||
else {
|
||||
// If ca-derivations isn't enabled, behave as if
|
||||
// all the paths are opaque to keep the default
|
||||
// behavior
|
||||
assert(output.second);
|
||||
res.insert(*output.second);
|
||||
throw Error(
|
||||
"cannot operate on an output of unbuilt "
|
||||
"content-addressed derivation '%s'",
|
||||
outputId.to_string());
|
||||
outputs.insert_or_assign(
|
||||
output, realisation->outPath);
|
||||
} else {
|
||||
// If ca-derivations isn't enabled, assume that
|
||||
// the output path is statically known.
|
||||
assert(drvOutputs.count(output));
|
||||
assert(drvOutputs.at(output).second);
|
||||
outputs.insert_or_assign(
|
||||
output, *drvOutputs.at(output).second);
|
||||
}
|
||||
}
|
||||
res.push_back(BuiltPath::Built{bfd.drvPath, outputs});
|
||||
},
|
||||
}, b.raw());
|
||||
} else {
|
||||
if (mode == Realise::Nothing)
|
||||
settings.readOnlyMode = true;
|
||||
|
||||
auto drvPaths = toDerivations(store, installables, true);
|
||||
res.insert(drvPaths.begin(), drvPaths.end());
|
||||
}
|
||||
},
|
||||
b.raw());
|
||||
|
||||
return res;
|
||||
}
|
||||
|
||||
StorePathSet toStorePaths(ref<Store> store,
|
||||
BuiltPaths build(
|
||||
ref<Store> evalStore,
|
||||
ref<Store> store,
|
||||
Realise mode,
|
||||
const std::vector<std::shared_ptr<Installable>> & installables,
|
||||
BuildMode bMode)
|
||||
{
|
||||
if (mode == Realise::Nothing)
|
||||
settings.readOnlyMode = true;
|
||||
|
||||
std::vector<DerivedPath> pathsToBuild;
|
||||
|
||||
for (auto & i : installables) {
|
||||
auto b = i->toDerivedPaths();
|
||||
pathsToBuild.insert(pathsToBuild.end(), b.begin(), b.end());
|
||||
}
|
||||
|
||||
if (mode == Realise::Nothing || mode == Realise::Derivation)
|
||||
printMissing(store, pathsToBuild, lvlError);
|
||||
else if (mode == Realise::Outputs)
|
||||
store->buildPaths(pathsToBuild, bMode, evalStore);
|
||||
|
||||
return getBuiltPaths(evalStore, store, pathsToBuild);
|
||||
}
|
||||
|
||||
BuiltPaths toBuiltPaths(
|
||||
ref<Store> evalStore,
|
||||
ref<Store> store,
|
||||
Realise mode,
|
||||
OperateOn operateOn,
|
||||
const std::vector<std::shared_ptr<Installable>> & installables)
|
||||
{
|
||||
if (operateOn == OperateOn::Output)
|
||||
return build(evalStore, store, mode, installables);
|
||||
else {
|
||||
if (mode == Realise::Nothing)
|
||||
settings.readOnlyMode = true;
|
||||
|
||||
BuiltPaths res;
|
||||
for (auto & drvPath : toDerivations(store, installables, true))
|
||||
res.push_back(BuiltPath::Opaque{drvPath});
|
||||
return res;
|
||||
}
|
||||
}
|
||||
|
||||
StorePathSet toStorePaths(
|
||||
ref<Store> evalStore,
|
||||
ref<Store> store,
|
||||
Realise mode, OperateOn operateOn,
|
||||
std::vector<std::shared_ptr<Installable>> installables)
|
||||
const std::vector<std::shared_ptr<Installable>> & installables)
|
||||
{
|
||||
StorePathSet outPaths;
|
||||
for (auto & path : toRealisedPaths(store, mode, operateOn, installables))
|
||||
outPaths.insert(path.path());
|
||||
for (auto & path : toBuiltPaths(evalStore, store, mode, operateOn, installables)) {
|
||||
auto thisOutPaths = path.outPaths();
|
||||
outPaths.insert(thisOutPaths.begin(), thisOutPaths.end());
|
||||
}
|
||||
return outPaths;
|
||||
}
|
||||
|
||||
StorePath toStorePath(ref<Store> store,
|
||||
StorePath toStorePath(
|
||||
ref<Store> evalStore,
|
||||
ref<Store> store,
|
||||
Realise mode, OperateOn operateOn,
|
||||
std::shared_ptr<Installable> installable)
|
||||
{
|
||||
auto paths = toStorePaths(store, mode, operateOn, {installable});
|
||||
auto paths = toStorePaths(evalStore, store, mode, operateOn, {installable});
|
||||
|
||||
if (paths.size() != 1)
|
||||
throw Error("argument '%s' should evaluate to one store path", installable->what());
|
||||
|
@ -771,15 +816,17 @@ StorePath toStorePath(ref<Store> store,
|
|||
return *paths.begin();
|
||||
}
|
||||
|
||||
StorePathSet toDerivations(ref<Store> store,
|
||||
std::vector<std::shared_ptr<Installable>> installables, bool useDeriver)
|
||||
StorePathSet toDerivations(
|
||||
ref<Store> store,
|
||||
const std::vector<std::shared_ptr<Installable>> & installables,
|
||||
bool useDeriver)
|
||||
{
|
||||
StorePathSet drvPaths;
|
||||
|
||||
for (auto & i : installables)
|
||||
for (auto & b : i->toDerivedPathsWithHints())
|
||||
for (const auto & i : installables)
|
||||
for (const auto & b : i->toDerivedPaths())
|
||||
std::visit(overloaded {
|
||||
[&](DerivedPathWithHints::Opaque bo) {
|
||||
[&](const DerivedPath::Opaque & bo) {
|
||||
if (!useDeriver)
|
||||
throw Error("argument '%s' did not evaluate to a derivation", i->what());
|
||||
auto derivers = store->queryValidDerivers(bo.path);
|
||||
|
@ -788,7 +835,7 @@ StorePathSet toDerivations(ref<Store> store,
|
|||
// FIXME: use all derivers?
|
||||
drvPaths.insert(*derivers.begin());
|
||||
},
|
||||
[&](DerivedPathWithHints::Built bfd) {
|
||||
[&](const DerivedPath::Built & bfd) {
|
||||
drvPaths.insert(bfd.drvPath);
|
||||
},
|
||||
}, b.raw());
|
||||
|
|
|
@ -23,17 +23,23 @@ struct App
|
|||
// FIXME: add args, sandbox settings, metadata, ...
|
||||
};
|
||||
|
||||
struct UnresolvedApp
|
||||
{
|
||||
App unresolved;
|
||||
App resolve(ref<Store> evalStore, ref<Store> store);
|
||||
};
|
||||
|
||||
struct Installable
|
||||
{
|
||||
virtual ~Installable() { }
|
||||
|
||||
virtual std::string what() = 0;
|
||||
|
||||
virtual DerivedPathsWithHints toDerivedPathsWithHints() = 0;
|
||||
virtual DerivedPaths toDerivedPaths() = 0;
|
||||
|
||||
DerivedPathWithHints toDerivedPathWithHints();
|
||||
DerivedPath toDerivedPath();
|
||||
|
||||
App toApp(EvalState & state);
|
||||
UnresolvedApp toApp(EvalState & state);
|
||||
|
||||
virtual std::pair<Value *, Pos> toValue(EvalState & state)
|
||||
{
|
||||
|
@ -74,7 +80,7 @@ struct InstallableValue : Installable
|
|||
|
||||
virtual std::vector<DerivationInfo> toDerivations() = 0;
|
||||
|
||||
DerivedPathsWithHints toDerivedPathsWithHints() override;
|
||||
DerivedPaths toDerivedPaths() override;
|
||||
};
|
||||
|
||||
struct InstallableFlake : InstallableValue
|
||||
|
|
|
@ -8,8 +8,9 @@ libcmd_SOURCES := $(wildcard $(d)/*.cc)
|
|||
|
||||
libcmd_CXXFLAGS += -I src/libutil -I src/libstore -I src/libexpr -I src/libmain -I src/libfetchers -I src/nix
|
||||
|
||||
libcmd_LDFLAGS = $(EDITLINE_LIBS) -llowdown
|
||||
# libcmd_LDFLAGS = $(EDITLINE_LIBS) -llowdown
|
||||
libcmd_LDFLAGS += -llowdown -pthread
|
||||
|
||||
libcmd_LIBS = libstore libutil libexpr libmain libfetchers libnix
|
||||
|
||||
$(eval $(call install-file-in, $(d)/nix-cmd.pc, $(prefix)/lib/pkgconfig, 0644))
|
||||
$(eval $(call install-file-in, $(d)/nix-cmd.pc, $(libdir)/pkgconfig, 0644))
|
||||
|
|
|
@ -12,7 +12,7 @@ std::string renderMarkdownToTerminal(std::string_view markdown)
|
|||
struct lowdown_opts opts {
|
||||
.type = LOWDOWN_TERM,
|
||||
.maxdepth = 20,
|
||||
.cols = std::min(getWindowSize().second, (unsigned short) 80),
|
||||
.cols = std::max(getWindowSize().second, (unsigned short) 80),
|
||||
.hmargin = 0,
|
||||
.vmargin = 0,
|
||||
.feat = LOWDOWN_COMMONMARK | LOWDOWN_FENCED | LOWDOWN_DEFLIST | LOWDOWN_TABLES,
|
||||
|
@ -25,7 +25,7 @@ std::string renderMarkdownToTerminal(std::string_view markdown)
|
|||
Finally freeDoc([&]() { lowdown_doc_free(doc); });
|
||||
|
||||
size_t maxn = 0;
|
||||
auto node = lowdown_doc_parse(doc, &maxn, markdown.data(), markdown.size());
|
||||
auto node = lowdown_doc_parse(doc, &maxn, markdown.data(), markdown.size(), nullptr);
|
||||
if (!node)
|
||||
throw Error("cannot parse Markdown document");
|
||||
Finally freeNode([&]() { lowdown_node_free(node); });
|
||||
|
@ -40,11 +40,11 @@ std::string renderMarkdownToTerminal(std::string_view markdown)
|
|||
throw Error("cannot allocate Markdown output buffer");
|
||||
Finally freeBuffer([&]() { lowdown_buf_free(buf); });
|
||||
|
||||
int rndr_res = lowdown_term_rndr(buf, nullptr, renderer, node);
|
||||
int rndr_res = lowdown_term_rndr(buf, renderer, node);
|
||||
if (!rndr_res)
|
||||
throw Error("allocation error while rendering Markdown");
|
||||
|
||||
return std::string(buf->data, buf->size);
|
||||
return filterANSIEscapes(std::string(buf->data, buf->size), !shouldANSI());
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -68,6 +68,7 @@ struct NixRepl
|
|||
StorePath getDerivationPath(Value & v);
|
||||
bool processLine(string line);
|
||||
void loadFile(const Path & path);
|
||||
void loadFlake(const std::string & flakeRef);
|
||||
void initEnv();
|
||||
void reloadFiles();
|
||||
void addAttrsToScope(Value & attrs);
|
||||
|
@ -104,6 +105,25 @@ NixRepl::~NixRepl()
|
|||
write_history(historyFile.c_str());
|
||||
}
|
||||
|
||||
string runNix(Path program, const Strings & args,
|
||||
const std::optional<std::string> & input = {})
|
||||
{
|
||||
auto subprocessEnv = getEnv();
|
||||
subprocessEnv["NIX_CONFIG"] = globalConfig.toKeyValue();
|
||||
|
||||
auto res = runProgram(RunOptions {
|
||||
.program = settings.nixBinDir+ "/" + program,
|
||||
.args = args,
|
||||
.environment = subprocessEnv,
|
||||
.input = input,
|
||||
});
|
||||
|
||||
if (!statusOk(res.first))
|
||||
throw ExecError(res.first, fmt("program '%1%' %2%", program, statusToString(res.first)));
|
||||
|
||||
return res.second;
|
||||
}
|
||||
|
||||
static NixRepl * curRepl; // ugly
|
||||
|
||||
static char * completionCallback(char * s, int *match) {
|
||||
|
@ -180,15 +200,14 @@ namespace {
|
|||
void NixRepl::mainLoop(const std::vector<std::string> & files)
|
||||
{
|
||||
string error = ANSI_RED "error:" ANSI_NORMAL " ";
|
||||
std::cout << "Welcome to Nix version " << nixVersion << ". Type :? for help." << std::endl << std::endl;
|
||||
notice("Welcome to Nix " + nixVersion + ". Type :? for help.\n");
|
||||
|
||||
if (!files.empty()) {
|
||||
for (auto & i : files)
|
||||
loadedFiles.push_back(i);
|
||||
|
||||
reloadFiles();
|
||||
if (!loadedFiles.empty()) std::cout << std::endl;
|
||||
}
|
||||
reloadFiles();
|
||||
if (!loadedFiles.empty()) notice("");
|
||||
|
||||
// Allow nix-repl specific settings in .inputrc
|
||||
rl_readline_name = "nix-repl";
|
||||
|
@ -378,6 +397,8 @@ bool NixRepl::processLine(string line)
|
|||
{
|
||||
if (line == "") return true;
|
||||
|
||||
_isInterrupted = false;
|
||||
|
||||
string command, arg;
|
||||
|
||||
if (line[0] == ':') {
|
||||
|
@ -397,9 +418,10 @@ bool NixRepl::processLine(string line)
|
|||
<< " <x> = <expr> Bind expression to variable\n"
|
||||
<< " :a <expr> Add attributes from resulting set to scope\n"
|
||||
<< " :b <expr> Build derivation\n"
|
||||
<< " :e <expr> Open the derivation in $EDITOR\n"
|
||||
<< " :e <expr> Open package or function in $EDITOR\n"
|
||||
<< " :i <expr> Build derivation, then install result into current profile\n"
|
||||
<< " :l <path> Load Nix expression and add it to scope\n"
|
||||
<< " :lf <ref> Load Nix flake and add it to scope\n"
|
||||
<< " :p <expr> Evaluate and print expression recursively\n"
|
||||
<< " :q Exit nix-repl\n"
|
||||
<< " :r Reload all files\n"
|
||||
|
@ -420,6 +442,10 @@ bool NixRepl::processLine(string line)
|
|||
loadFile(arg);
|
||||
}
|
||||
|
||||
else if (command == ":lf" || command == ":load-flake") {
|
||||
loadFlake(arg);
|
||||
}
|
||||
|
||||
else if (command == ":r" || command == ":reload") {
|
||||
state->resetFileCache();
|
||||
reloadFiles();
|
||||
|
@ -439,14 +465,17 @@ bool NixRepl::processLine(string line)
|
|||
pos = v.lambda.fun->pos;
|
||||
} else {
|
||||
// assume it's a derivation
|
||||
pos = findDerivationFilename(*state, v, arg);
|
||||
pos = findPackageFilename(*state, v, arg);
|
||||
}
|
||||
|
||||
// Open in EDITOR
|
||||
auto args = editorFor(pos);
|
||||
auto editor = args.front();
|
||||
args.pop_front();
|
||||
runProgram(editor, true, args);
|
||||
|
||||
// runProgram redirects stdout to a StringSink,
|
||||
// using runProgram2 to allow editors to display their UI
|
||||
runProgram2(RunOptions { .program = editor, .searchPath = true, .args = args });
|
||||
|
||||
// Reload right after exiting the editor
|
||||
state->resetFileCache();
|
||||
|
@ -456,16 +485,17 @@ bool NixRepl::processLine(string line)
|
|||
else if (command == ":t") {
|
||||
Value v;
|
||||
evalString(arg, v);
|
||||
std::cout << showType(v) << std::endl;
|
||||
logger->cout(showType(v));
|
||||
}
|
||||
|
||||
} else if (command == ":u") {
|
||||
else if (command == ":u") {
|
||||
Value v, f, result;
|
||||
evalString(arg, v);
|
||||
evalString("drv: (import <nixpkgs> {}).runCommand \"shell\" { buildInputs = [ drv ]; } \"\"", f);
|
||||
state->callFunction(f, v, result, Pos());
|
||||
|
||||
StorePath drvPath = getDerivationPath(result);
|
||||
runProgram(settings.nixBinDir + "/nix-shell", true, {state->store->printStorePath(drvPath)});
|
||||
runNix("nix-shell", {state->store->printStorePath(drvPath)});
|
||||
}
|
||||
|
||||
else if (command == ":b" || command == ":i" || command == ":s") {
|
||||
|
@ -475,21 +505,15 @@ bool NixRepl::processLine(string line)
|
|||
Path drvPathRaw = state->store->printStorePath(drvPath);
|
||||
|
||||
if (command == ":b") {
|
||||
/* We could do the build in this process using buildPaths(),
|
||||
but doing it in a child makes it easier to recover from
|
||||
problems / SIGINT. */
|
||||
try {
|
||||
runProgram(settings.nixBinDir + "/nix", true, {"build", "--no-link", drvPathRaw});
|
||||
auto drv = state->store->readDerivation(drvPath);
|
||||
std::cout << std::endl << "this derivation produced the following outputs:" << std::endl;
|
||||
for (auto & i : drv.outputsAndOptPaths(*state->store))
|
||||
std::cout << fmt(" %s -> %s\n", i.first, state->store->printStorePath(*i.second.second));
|
||||
} catch (ExecError &) {
|
||||
}
|
||||
state->store->buildPaths({DerivedPath::Built{drvPath}});
|
||||
auto drv = state->store->readDerivation(drvPath);
|
||||
logger->cout("\nThis derivation produced the following outputs:");
|
||||
for (auto & [outputName, outputPath] : state->store->queryDerivationOutputMap(drvPath))
|
||||
logger->cout(" %s -> %s", outputName, state->store->printStorePath(outputPath));
|
||||
} else if (command == ":i") {
|
||||
runProgram(settings.nixBinDir + "/nix-env", true, {"-i", drvPathRaw});
|
||||
runNix("nix-env", {"-i", drvPathRaw});
|
||||
} else {
|
||||
runProgram(settings.nixBinDir + "/nix-shell", true, {drvPathRaw});
|
||||
runNix("nix-shell", {drvPathRaw});
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -518,9 +542,9 @@ bool NixRepl::processLine(string line)
|
|||
+ concatStringsSep(" ", args) + "\n\n";
|
||||
}
|
||||
|
||||
markdown += trim(stripIndentation(doc->doc));
|
||||
markdown += stripIndentation(doc->doc);
|
||||
|
||||
std::cout << renderMarkdownToTerminal(markdown);
|
||||
logger->cout(trim(renderMarkdownToTerminal(markdown)));
|
||||
} else
|
||||
throw Error("value does not have documentation");
|
||||
}
|
||||
|
@ -561,6 +585,25 @@ void NixRepl::loadFile(const Path & path)
|
|||
addAttrsToScope(v2);
|
||||
}
|
||||
|
||||
void NixRepl::loadFlake(const std::string & flakeRefS)
|
||||
{
|
||||
auto flakeRef = parseFlakeRef(flakeRefS, absPath("."), true);
|
||||
if (evalSettings.pureEval && !flakeRef.input.isImmutable())
|
||||
throw Error("cannot use ':load-flake' on mutable flake reference '%s' (use --impure to override)", flakeRefS);
|
||||
|
||||
Value v;
|
||||
|
||||
flake::callFlake(*state,
|
||||
flake::lockFlake(*state, flakeRef,
|
||||
flake::LockFlags {
|
||||
.updateLockFile = false,
|
||||
.useRegistries = !evalSettings.pureEval,
|
||||
.allowMutable = !evalSettings.pureEval,
|
||||
}),
|
||||
v);
|
||||
addAttrsToScope(v);
|
||||
}
|
||||
|
||||
|
||||
void NixRepl::initEnv()
|
||||
{
|
||||
|
@ -584,9 +627,9 @@ void NixRepl::reloadFiles()
|
|||
|
||||
bool first = true;
|
||||
for (auto & i : old) {
|
||||
if (!first) std::cout << std::endl;
|
||||
if (!first) notice("");
|
||||
first = false;
|
||||
std::cout << format("Loading '%1%'...") % i << std::endl;
|
||||
notice("Loading '%1%'...", i);
|
||||
loadFile(i);
|
||||
}
|
||||
}
|
||||
|
@ -596,8 +639,8 @@ void NixRepl::addAttrsToScope(Value & attrs)
|
|||
{
|
||||
state->forceAttrs(attrs);
|
||||
for (auto & i : *attrs.attrs)
|
||||
addVarToScope(i.name, i.value);
|
||||
std::cout << format("Added %1% variables.") % attrs.attrs->size() << std::endl;
|
||||
addVarToScope(i.name, *i.value);
|
||||
notice("Added %1% variables.", attrs.attrs->size());
|
||||
}
|
||||
|
||||
|
||||
|
@ -605,8 +648,9 @@ void NixRepl::addVarToScope(const Symbol & name, Value * v)
|
|||
{
|
||||
if (displ >= envSize)
|
||||
throw Error("environment full; cannot add more variables");
|
||||
staticEnv->vars[name] = displ;
|
||||
env->values[displ++] = v;
|
||||
staticEnv->vars.emplace_back(name, displ);
|
||||
staticEnv->sort();
|
||||
env->values[displ++] = &v;
|
||||
varNames.insert((string) name);
|
||||
}
|
||||
|
||||
|
@ -665,7 +709,7 @@ std::ostream & NixRepl::printValue(std::ostream & str, Value & v, unsigned int m
|
|||
break;
|
||||
|
||||
case nString:
|
||||
str << ANSI_YELLOW;
|
||||
str << ANSI_WARNING;
|
||||
printStringValue(str, v.string.s);
|
||||
str << ANSI_NORMAL;
|
||||
break;
|
||||
|
|
|
@ -19,7 +19,7 @@ static Strings parseAttrPath(std::string_view s)
|
|||
++i;
|
||||
while (1) {
|
||||
if (i == s.end())
|
||||
throw Error("missing closing quote in selection path '%1%'", s);
|
||||
throw ParseError("missing closing quote in selection path '%1%'", s);
|
||||
if (*i == '"') break;
|
||||
cur.push_back(*i++);
|
||||
}
|
||||
|
@ -100,7 +100,7 @@ std::pair<Value *, Pos> findAlongAttrPath(EvalState & state, const string & attr
|
|||
}
|
||||
|
||||
|
||||
Pos findDerivationFilename(EvalState & state, Value & v, std::string what)
|
||||
Pos findPackageFilename(EvalState & state, Value & v, std::string what)
|
||||
{
|
||||
Value * v2;
|
||||
try {
|
||||
|
@ -116,14 +116,14 @@ Pos findDerivationFilename(EvalState & state, Value & v, std::string what)
|
|||
|
||||
auto colon = pos.rfind(':');
|
||||
if (colon == std::string::npos)
|
||||
throw Error("cannot parse meta.position attribute '%s'", pos);
|
||||
throw ParseError("cannot parse meta.position attribute '%s'", pos);
|
||||
|
||||
std::string filename(pos, 0, colon);
|
||||
unsigned int lineno;
|
||||
try {
|
||||
lineno = std::stoi(std::string(pos, colon + 1));
|
||||
} catch (std::invalid_argument & e) {
|
||||
throw Error("cannot parse line number '%s'", pos);
|
||||
throw ParseError("cannot parse line number '%s'", pos);
|
||||
}
|
||||
|
||||
Symbol file = state.symbols.create(filename);
|
||||
|
|
|
@ -14,7 +14,7 @@ std::pair<Value *, Pos> findAlongAttrPath(EvalState & state, const string & attr
|
|||
Bindings & autoArgs, Value & vIn);
|
||||
|
||||
/* Heuristic to find the filename and lineno or a nix value. */
|
||||
Pos findDerivationFilename(EvalState & state, Value & v, std::string what);
|
||||
Pos findPackageFilename(EvalState & state, Value & v, std::string what);
|
||||
|
||||
std::vector<Symbol> parseAttrPath(EvalState & state, std::string_view s);
|
||||
|
||||
|
|
|
@ -17,8 +17,8 @@ struct Attr
|
|||
{
|
||||
Symbol name;
|
||||
Value * value;
|
||||
Pos * pos;
|
||||
Attr(Symbol name, Value * value, Pos * pos = &noPos)
|
||||
ptr<Pos> pos;
|
||||
Attr(Symbol name, Value * value, ptr<Pos> pos = ptr(&noPos))
|
||||
: name(name), value(value), pos(pos) { };
|
||||
Attr() : pos(&noPos) { };
|
||||
bool operator < (const Attr & a) const
|
||||
|
@ -35,13 +35,13 @@ class Bindings
|
|||
{
|
||||
public:
|
||||
typedef uint32_t size_t;
|
||||
Pos *pos;
|
||||
ptr<Pos> pos;
|
||||
|
||||
private:
|
||||
size_t size_, capacity_;
|
||||
Attr attrs[0];
|
||||
|
||||
Bindings(size_t capacity) : size_(0), capacity_(capacity) { }
|
||||
Bindings(size_t capacity) : pos(&noPos), size_(0), capacity_(capacity) { }
|
||||
Bindings(const Bindings & bindings) = delete;
|
||||
|
||||
public:
|
||||
|
|
|
@ -61,6 +61,14 @@ MixEvalArgs::MixEvalArgs()
|
|||
fetchers::overrideRegistry(from.input, to.input, extraAttrs);
|
||||
}}
|
||||
});
|
||||
|
||||
addFlag({
|
||||
.longName = "eval-store",
|
||||
.description = "The Nix store to use for evaluations.",
|
||||
.category = category,
|
||||
.labels = {"store-url"},
|
||||
.handler = {&evalStoreUrl},
|
||||
});
|
||||
}
|
||||
|
||||
Bindings * MixEvalArgs::getAutoArgs(EvalState & state)
|
||||
|
|
|
@ -16,8 +16,9 @@ struct MixEvalArgs : virtual Args
|
|||
|
||||
Strings searchPath;
|
||||
|
||||
private:
|
||||
std::optional<std::string> evalStoreUrl;
|
||||
|
||||
private:
|
||||
std::map<std::string, std::string> autoArgs;
|
||||
};
|
||||
|
||||
|
|
|
@ -65,7 +65,11 @@ static char * dupStringWithLen(const char * s, size_t size)
|
|||
|
||||
RootValue allocRootValue(Value * v)
|
||||
{
|
||||
#if HAVE_BOEHMGC
|
||||
return std::allocate_shared<Value *>(traceable_allocator<Value *>(), v);
|
||||
#else
|
||||
return std::make_shared<Value *>(v);
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
|
@ -234,22 +238,34 @@ static void * oomHandler(size_t requested)
|
|||
}
|
||||
|
||||
class BoehmGCStackAllocator : public StackAllocator {
|
||||
boost::coroutines2::protected_fixedsize_stack stack {
|
||||
// We allocate 8 MB, the default max stack size on NixOS.
|
||||
// A smaller stack might be quicker to allocate but reduces the stack
|
||||
// depth available for source filter expressions etc.
|
||||
std::max(boost::context::stack_traits::default_size(), static_cast<std::size_t>(8 * 1024 * 1024))
|
||||
boost::coroutines2::protected_fixedsize_stack stack {
|
||||
// We allocate 8 MB, the default max stack size on NixOS.
|
||||
// A smaller stack might be quicker to allocate but reduces the stack
|
||||
// depth available for source filter expressions etc.
|
||||
std::max(boost::context::stack_traits::default_size(), static_cast<std::size_t>(8 * 1024 * 1024))
|
||||
};
|
||||
|
||||
// This is specific to boost::coroutines2::protected_fixedsize_stack.
|
||||
// The stack protection page is included in sctx.size, so we have to
|
||||
// subtract one page size from the stack size.
|
||||
std::size_t pfss_usable_stack_size(boost::context::stack_context &sctx) {
|
||||
return sctx.size - boost::context::stack_traits::page_size();
|
||||
}
|
||||
|
||||
public:
|
||||
boost::context::stack_context allocate() override {
|
||||
auto sctx = stack.allocate();
|
||||
GC_add_roots(static_cast<char *>(sctx.sp) - sctx.size, sctx.sp);
|
||||
|
||||
// Stacks generally start at a high address and grow to lower addresses.
|
||||
// Architectures that do the opposite are rare; in fact so rare that
|
||||
// boost_routine does not implement it.
|
||||
// So we subtract the stack size.
|
||||
GC_add_roots(static_cast<char *>(sctx.sp) - pfss_usable_stack_size(sctx), sctx.sp);
|
||||
return sctx;
|
||||
}
|
||||
|
||||
void deallocate(boost::context::stack_context sctx) override {
|
||||
GC_remove_roots(static_cast<char *>(sctx.sp) - sctx.size, sctx.sp);
|
||||
GC_remove_roots(static_cast<char *>(sctx.sp) - pfss_usable_stack_size(sctx), sctx.sp);
|
||||
stack.deallocate(sctx);
|
||||
}
|
||||
|
||||
|
@ -363,7 +379,10 @@ static Strings parseNixPath(const string & s)
|
|||
}
|
||||
|
||||
|
||||
EvalState::EvalState(const Strings & _searchPath, ref<Store> store)
|
||||
EvalState::EvalState(
|
||||
const Strings & _searchPath,
|
||||
ref<Store> store,
|
||||
std::shared_ptr<Store> buildStore)
|
||||
: sWith(symbols.create("<with>"))
|
||||
, sOutPath(symbols.create("outPath"))
|
||||
, sDrvPath(symbols.create("drvPath"))
|
||||
|
@ -396,6 +415,7 @@ EvalState::EvalState(const Strings & _searchPath, ref<Store> store)
|
|||
, sEpsilon(symbols.create(""))
|
||||
, repair(NoRepair)
|
||||
, store(store)
|
||||
, buildStore(buildStore ? buildStore : store)
|
||||
, regexCache(makeRegexCache())
|
||||
, baseEnv(allocEnv(128))
|
||||
, staticBaseEnv(new StaticEnv(false, 0))
|
||||
|
@ -426,12 +446,12 @@ EvalState::EvalState(const Strings & _searchPath, ref<Store> store)
|
|||
StorePathSet closure;
|
||||
store->computeFSClosure(store->toStorePath(r.second).first, closure);
|
||||
for (auto & path : closure)
|
||||
allowedPaths->insert(store->printStorePath(path));
|
||||
allowPath(path);
|
||||
} catch (InvalidPath &) {
|
||||
allowedPaths->insert(r.second);
|
||||
allowPath(r.second);
|
||||
}
|
||||
} else
|
||||
allowedPaths->insert(r.second);
|
||||
allowPath(r.second);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -446,6 +466,35 @@ EvalState::~EvalState()
|
|||
}
|
||||
|
||||
|
||||
void EvalState::requireExperimentalFeatureOnEvaluation(
|
||||
const ExperimentalFeature & feature,
|
||||
const std::string_view fName,
|
||||
const Pos & pos)
|
||||
{
|
||||
if (!settings.isExperimentalFeatureEnabled(feature)) {
|
||||
throw EvalError({
|
||||
.msg = hintfmt(
|
||||
"Cannot call '%2%' because experimental Nix feature '%1%' is disabled. You can enable it via '--extra-experimental-features %1%'.",
|
||||
feature,
|
||||
fName
|
||||
),
|
||||
.errPos = pos
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
void EvalState::allowPath(const Path & path)
|
||||
{
|
||||
if (allowedPaths)
|
||||
allowedPaths->insert(path);
|
||||
}
|
||||
|
||||
void EvalState::allowPath(const StorePath & storePath)
|
||||
{
|
||||
if (allowedPaths)
|
||||
allowedPaths->insert(store->toRealPath(storePath));
|
||||
}
|
||||
|
||||
Path EvalState::checkSourcePath(const Path & path_)
|
||||
{
|
||||
if (!allowedPaths) return path_;
|
||||
|
@ -472,7 +521,7 @@ Path EvalState::checkSourcePath(const Path & path_)
|
|||
}
|
||||
|
||||
if (!found)
|
||||
throw RestrictedPathError("access to path '%1%' is forbidden in restricted mode", abspath);
|
||||
throw RestrictedPathError("access to absolute path '%1%' is forbidden in restricted mode", abspath);
|
||||
|
||||
/* Resolve symlinks. */
|
||||
debug(format("checking access to '%s'") % abspath);
|
||||
|
@ -485,7 +534,7 @@ Path EvalState::checkSourcePath(const Path & path_)
|
|||
}
|
||||
}
|
||||
|
||||
throw RestrictedPathError("access to path '%1%' is forbidden in restricted mode", path);
|
||||
throw RestrictedPathError("access to canonical path '%1%' is forbidden in restricted mode", path);
|
||||
}
|
||||
|
||||
|
||||
|
@ -535,14 +584,20 @@ Value * EvalState::addConstant(const string & name, Value & v)
|
|||
{
|
||||
Value * v2 = allocValue();
|
||||
*v2 = v;
|
||||
staticBaseEnv->vars[symbols.create(name)] = baseEnvDispl;
|
||||
baseEnv.values[baseEnvDispl++] = v2;
|
||||
string name2 = string(name, 0, 2) == "__" ? string(name, 2) : name;
|
||||
baseEnv.values[0]->attrs->push_back(Attr(symbols.create(name2), v2));
|
||||
addConstant(name, v2);
|
||||
return v2;
|
||||
}
|
||||
|
||||
|
||||
void EvalState::addConstant(const string & name, Value * v)
|
||||
{
|
||||
staticBaseEnv.vars.emplace_back(symbols.create(name), baseEnvDispl);
|
||||
baseEnv.values[baseEnvDispl++] = v;
|
||||
string name2 = string(name, 0, 2) == "__" ? string(name, 2) : name;
|
||||
baseEnv.values[0]->attrs->push_back(Attr(symbols.create(name2), v));
|
||||
}
|
||||
|
||||
|
||||
Value * EvalState::addPrimOp(const string & name,
|
||||
size_t arity, PrimOpFun primOp)
|
||||
{
|
||||
|
@ -561,7 +616,7 @@ Value * EvalState::addPrimOp(const string & name,
|
|||
|
||||
Value * v = allocValue();
|
||||
v->mkPrimOp(new PrimOp { .fun = primOp, .arity = arity, .name = sym });
|
||||
staticBaseEnv->vars[symbols.create(name)] = baseEnvDispl;
|
||||
staticBaseEnv->vars.emplace_back(symbols.create(name), baseEnvDispl);
|
||||
baseEnv.values[baseEnvDispl++] = v;
|
||||
baseEnv.values[0]->attrs->push_back(Attr(sym, v));
|
||||
return v;
|
||||
|
@ -587,7 +642,7 @@ Value * EvalState::addPrimOp(PrimOp && primOp)
|
|||
|
||||
Value * v = allocValue();
|
||||
v->mkPrimOp(new PrimOp(std::move(primOp)));
|
||||
staticBaseEnv->vars[envName] = baseEnvDispl;
|
||||
staticBaseEnv->vars.emplace_back(envName, baseEnvDispl);
|
||||
baseEnv.values[baseEnvDispl++] = v;
|
||||
baseEnv.values[0]->attrs->push_back(Attr(primOp.name, v));
|
||||
return v;
|
||||
|
@ -861,7 +916,7 @@ void mkPath(Value & v, const char * s)
|
|||
|
||||
inline Value * EvalState::lookupVar(Env * env, const ExprVar & var, bool noEval)
|
||||
{
|
||||
for (size_t l = var.level; l; --l, env = env->up) ;
|
||||
for (auto l = var.level; l; --l, env = env->up) ;
|
||||
|
||||
if (!var.fromWith) return env->values[var.displ];
|
||||
|
||||
|
@ -875,7 +930,7 @@ inline Value * EvalState::lookupVar(Env * env, const ExprVar & var, bool noEval)
|
|||
}
|
||||
Bindings::iterator j = env->values[0]->attrs->find(var.name);
|
||||
if (j != env->values[0]->attrs->end()) {
|
||||
if (countCalls && j->pos) attrSelects[*j->pos]++;
|
||||
if (countCalls) attrSelects[*j->pos]++;
|
||||
return j->value;
|
||||
}
|
||||
if (!env->prevWith) {
|
||||
|
@ -886,18 +941,10 @@ inline Value * EvalState::lookupVar(Env * env, const ExprVar & var, bool noEval)
|
|||
}
|
||||
|
||||
|
||||
std::atomic<uint64_t> nrValuesFreed{0};
|
||||
|
||||
void finalizeValue(void * obj, void * data)
|
||||
{
|
||||
nrValuesFreed++;
|
||||
}
|
||||
|
||||
Value * EvalState::allocValue()
|
||||
{
|
||||
nrValues++;
|
||||
auto v = (Value *) allocBytes(sizeof(Value));
|
||||
//GC_register_finalizer_no_order(v, finalizeValue, nullptr, nullptr, nullptr);
|
||||
return v;
|
||||
}
|
||||
|
||||
|
@ -949,9 +996,9 @@ void EvalState::mkThunk_(Value & v, Expr * expr)
|
|||
}
|
||||
|
||||
|
||||
void EvalState::mkPos(Value & v, Pos * pos)
|
||||
void EvalState::mkPos(Value & v, ptr<Pos> pos)
|
||||
{
|
||||
if (pos && pos->file.set()) {
|
||||
if (pos->file.set()) {
|
||||
mkAttrs(v, 3);
|
||||
mkString(*allocAttr(v, sFile), pos->file);
|
||||
mkInt(*allocAttr(v, sLine), pos->line);
|
||||
|
@ -974,39 +1021,37 @@ Value * Expr::maybeThunk(EvalState & state, Env & env)
|
|||
}
|
||||
|
||||
|
||||
unsigned long nrAvoided = 0;
|
||||
|
||||
Value * ExprVar::maybeThunk(EvalState & state, Env & env)
|
||||
{
|
||||
Value * v = state.lookupVar(&env, *this, true);
|
||||
/* The value might not be initialised in the environment yet.
|
||||
In that case, ignore it. */
|
||||
if (v) { nrAvoided++; return v; }
|
||||
if (v) { state.nrAvoided++; return v; }
|
||||
return Expr::maybeThunk(state, env);
|
||||
}
|
||||
|
||||
|
||||
Value * ExprString::maybeThunk(EvalState & state, Env & env)
|
||||
{
|
||||
nrAvoided++;
|
||||
state.nrAvoided++;
|
||||
return &v;
|
||||
}
|
||||
|
||||
Value * ExprInt::maybeThunk(EvalState & state, Env & env)
|
||||
{
|
||||
nrAvoided++;
|
||||
state.nrAvoided++;
|
||||
return &v;
|
||||
}
|
||||
|
||||
Value * ExprFloat::maybeThunk(EvalState & state, Env & env)
|
||||
{
|
||||
nrAvoided++;
|
||||
state.nrAvoided++;
|
||||
return &v;
|
||||
}
|
||||
|
||||
Value * ExprPath::maybeThunk(EvalState & state, Env & env)
|
||||
{
|
||||
nrAvoided++;
|
||||
state.nrAvoided++;
|
||||
return &v;
|
||||
}
|
||||
|
||||
|
@ -1021,38 +1066,23 @@ void EvalState::evalFile(const Path & path_, Value & v, bool mustBeTrivial)
|
|||
return;
|
||||
}
|
||||
|
||||
Path path2 = resolveExprPath(path);
|
||||
if ((i = fileEvalCache.find(path2)) != fileEvalCache.end()) {
|
||||
Path resolvedPath = resolveExprPath(path);
|
||||
if ((i = fileEvalCache.find(resolvedPath)) != fileEvalCache.end()) {
|
||||
v = i->second;
|
||||
return;
|
||||
}
|
||||
|
||||
printTalkative("evaluating file '%1%'", path2);
|
||||
printTalkative("evaluating file '%1%'", resolvedPath);
|
||||
Expr * e = nullptr;
|
||||
|
||||
auto j = fileParseCache.find(path2);
|
||||
auto j = fileParseCache.find(resolvedPath);
|
||||
if (j != fileParseCache.end())
|
||||
e = j->second;
|
||||
|
||||
if (!e)
|
||||
e = parseExprFromFile(checkSourcePath(path2));
|
||||
e = parseExprFromFile(checkSourcePath(resolvedPath));
|
||||
|
||||
fileParseCache[path2] = e;
|
||||
|
||||
try {
|
||||
// Enforce that 'flake.nix' is a direct attrset, not a
|
||||
// computation.
|
||||
if (mustBeTrivial &&
|
||||
!(dynamic_cast<ExprAttrs *>(e)))
|
||||
throw Error("file '%s' must be an attribute set", path);
|
||||
eval(e, v);
|
||||
} catch (Error & e) {
|
||||
addErrorTrace(e, "while evaluating the file '%1%':", path2);
|
||||
throw;
|
||||
}
|
||||
|
||||
fileEvalCache[path2] = v;
|
||||
if (path != path2) fileEvalCache[path] = v;
|
||||
cacheFile(path, resolvedPath, e, v, mustBeTrivial);
|
||||
}
|
||||
|
||||
|
||||
|
@ -1063,6 +1093,32 @@ void EvalState::resetFileCache()
|
|||
}
|
||||
|
||||
|
||||
void EvalState::cacheFile(
|
||||
const Path & path,
|
||||
const Path & resolvedPath,
|
||||
Expr * e,
|
||||
Value & v,
|
||||
bool mustBeTrivial)
|
||||
{
|
||||
fileParseCache[resolvedPath] = e;
|
||||
|
||||
try {
|
||||
// Enforce that 'flake.nix' is a direct attrset, not a
|
||||
// computation.
|
||||
if (mustBeTrivial &&
|
||||
!(dynamic_cast<ExprAttrs *>(e)))
|
||||
throw EvalError("file '%s' must be an attribute set", path);
|
||||
eval(e, v);
|
||||
} catch (Error & e) {
|
||||
addErrorTrace(e, "while evaluating the file '%1%':", resolvedPath);
|
||||
throw;
|
||||
}
|
||||
|
||||
fileEvalCache[resolvedPath] = v;
|
||||
if (path != resolvedPath) fileEvalCache[path] = v;
|
||||
}
|
||||
|
||||
|
||||
void EvalState::eval(Expr * e, Value & v)
|
||||
{
|
||||
e->eval(*this, baseEnv, v);
|
||||
|
@ -1144,7 +1200,7 @@ void ExprAttrs::eval(EvalState & state, Env & env, Value & v)
|
|||
/* The recursive attributes are evaluated in the new
|
||||
environment, while the inherited attributes are evaluated
|
||||
in the original environment. */
|
||||
size_t displ = 0;
|
||||
Displacement displ = 0;
|
||||
for (auto & i : attrs) {
|
||||
Value * vAttr;
|
||||
if (hasOverrides && !i.second.inherited) {
|
||||
|
@ -1153,7 +1209,7 @@ void ExprAttrs::eval(EvalState & state, Env & env, Value & v)
|
|||
} else
|
||||
vAttr = i.second.e->maybeThunk(state, i.second.inherited ? env : env2);
|
||||
env2.values[displ++] = vAttr;
|
||||
v.attrs->push_back(Attr(i.first, vAttr, &i.second.pos));
|
||||
v.attrs->push_back(Attr(i.first, vAttr, ptr(&i.second.pos)));
|
||||
}
|
||||
|
||||
/* If the rec contains an attribute called `__overrides', then
|
||||
|
@ -1185,7 +1241,7 @@ void ExprAttrs::eval(EvalState & state, Env & env, Value & v)
|
|||
|
||||
else
|
||||
for (auto & i : attrs)
|
||||
v.attrs->push_back(Attr(i.first, i.second.e->maybeThunk(state, env), &i.second.pos));
|
||||
v.attrs->push_back(Attr(i.first, i.second.e->maybeThunk(state, env), ptr(&i.second.pos)));
|
||||
|
||||
/* Dynamic attrs apply *after* rec and __overrides. */
|
||||
for (auto & i : dynamicAttrs) {
|
||||
|
@ -1203,11 +1259,11 @@ void ExprAttrs::eval(EvalState & state, Env & env, Value & v)
|
|||
|
||||
i.valueExpr->setName(nameSym);
|
||||
/* Keep sorted order so find can catch duplicates */
|
||||
v.attrs->push_back(Attr(nameSym, i.valueExpr->maybeThunk(state, *dynamicEnv), &i.pos));
|
||||
v.attrs->push_back(Attr(nameSym, i.valueExpr->maybeThunk(state, *dynamicEnv), ptr(&i.pos)));
|
||||
v.attrs->sort(); // FIXME: inefficient
|
||||
}
|
||||
|
||||
v.attrs->pos = &pos;
|
||||
v.attrs->pos = ptr(&pos);
|
||||
}
|
||||
|
||||
|
||||
|
@ -1221,7 +1277,7 @@ void ExprLet::eval(EvalState & state, Env & env, Value & v)
|
|||
/* The recursive attributes are evaluated in the new environment,
|
||||
while the inherited attributes are evaluated in the original
|
||||
environment. */
|
||||
size_t displ = 0;
|
||||
Displacement displ = 0;
|
||||
for (auto & i : attrs->attrs)
|
||||
env2.values[displ++] = i.second.e->maybeThunk(state, i.second.inherited ? env : env2);
|
||||
|
||||
|
@ -1262,12 +1318,10 @@ static string showAttrPath(EvalState & state, Env & env, const AttrPath & attrPa
|
|||
}
|
||||
|
||||
|
||||
unsigned long nrLookups = 0;
|
||||
|
||||
void ExprSelect::eval(EvalState & state, Env & env, Value & v)
|
||||
{
|
||||
Value vTmp;
|
||||
Pos * pos2 = 0;
|
||||
ptr<Pos> pos2(&noPos);
|
||||
Value * vAttrs = &vTmp;
|
||||
|
||||
e->eval(state, env, vTmp);
|
||||
|
@ -1275,7 +1329,7 @@ void ExprSelect::eval(EvalState & state, Env & env, Value & v)
|
|||
try {
|
||||
|
||||
for (auto & i : attrPath) {
|
||||
nrLookups++;
|
||||
state.nrLookups++;
|
||||
Bindings::iterator j;
|
||||
Symbol name = getName(i, state, env);
|
||||
if (def) {
|
||||
|
@ -1293,13 +1347,13 @@ void ExprSelect::eval(EvalState & state, Env & env, Value & v)
|
|||
}
|
||||
vAttrs = j->value;
|
||||
pos2 = j->pos;
|
||||
if (state.countCalls && pos2) state.attrSelects[*pos2]++;
|
||||
if (state.countCalls) state.attrSelects[*pos2]++;
|
||||
}
|
||||
|
||||
state.forceValue(*vAttrs, ( pos2 != NULL ? *pos2 : this->pos ) );
|
||||
state.forceValue(*vAttrs, (*pos2 != noPos ? *pos2 : this->pos ) );
|
||||
|
||||
} catch (Error & e) {
|
||||
if (pos2 && pos2->file != state.sDerivationNix)
|
||||
if (*pos2 != noPos && pos2->file != state.sDerivationNix)
|
||||
addErrorTrace(e, *pos2, "while evaluating the attribute '%1%'",
|
||||
showAttrPath(state, env, attrPath));
|
||||
throw;
|
||||
|
@ -1340,160 +1394,183 @@ void ExprLambda::eval(EvalState & state, Env & env, Value & v)
|
|||
}
|
||||
|
||||
|
||||
void ExprApp::eval(EvalState & state, Env & env, Value & v)
|
||||
{
|
||||
/* FIXME: vFun prevents GCC from doing tail call optimisation. */
|
||||
Value vFun;
|
||||
e1->eval(state, env, vFun);
|
||||
state.callFunction(vFun, *(e2->maybeThunk(state, env)), v, pos);
|
||||
}
|
||||
|
||||
|
||||
void EvalState::callPrimOp(Value & fun, Value & arg, Value & v, const Pos & pos)
|
||||
{
|
||||
/* Figure out the number of arguments still needed. */
|
||||
size_t argsDone = 0;
|
||||
Value * primOp = &fun;
|
||||
while (primOp->isPrimOpApp()) {
|
||||
argsDone++;
|
||||
primOp = primOp->primOpApp.left;
|
||||
}
|
||||
assert(primOp->isPrimOp());
|
||||
auto arity = primOp->primOp->arity;
|
||||
auto argsLeft = arity - argsDone;
|
||||
|
||||
if (argsLeft == 1) {
|
||||
/* We have all the arguments, so call the primop. */
|
||||
|
||||
/* Put all the arguments in an array. */
|
||||
Value * vArgs[arity];
|
||||
auto n = arity - 1;
|
||||
vArgs[n--] = &arg;
|
||||
for (Value * arg = &fun; arg->isPrimOpApp(); arg = arg->primOpApp.left)
|
||||
vArgs[n--] = arg->primOpApp.right;
|
||||
|
||||
/* And call the primop. */
|
||||
nrPrimOpCalls++;
|
||||
if (countCalls) primOpCalls[primOp->primOp->name]++;
|
||||
primOp->primOp->fun(*this, pos, vArgs, v);
|
||||
} else {
|
||||
Value * fun2 = allocValue();
|
||||
*fun2 = fun;
|
||||
v.mkPrimOpApp(fun2, &arg);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void EvalState::callFunction(Value & fun, Value & arg, Value & v, const Pos & pos)
|
||||
void EvalState::callFunction(Value & fun, size_t nrArgs, Value * * args, Value & vRes, const Pos & pos)
|
||||
{
|
||||
auto trace = evalSettings.traceFunctionCalls ? std::make_unique<FunctionCallTrace>(pos) : nullptr;
|
||||
|
||||
forceValue(fun, pos);
|
||||
|
||||
if (fun.isPrimOp() || fun.isPrimOpApp()) {
|
||||
callPrimOp(fun, arg, v, pos);
|
||||
return;
|
||||
}
|
||||
Value vCur(fun);
|
||||
|
||||
if (fun.type() == nAttrs) {
|
||||
auto found = fun.attrs->find(sFunctor);
|
||||
if (found != fun.attrs->end()) {
|
||||
/* fun may be allocated on the stack of the calling function,
|
||||
* but for functors we may keep a reference, so heap-allocate
|
||||
* a copy and use that instead.
|
||||
*/
|
||||
auto & fun2 = *allocValue();
|
||||
fun2 = fun;
|
||||
/* !!! Should we use the attr pos here? */
|
||||
Value v2;
|
||||
callFunction(*found->value, fun2, v2, pos);
|
||||
return callFunction(v2, arg, v, pos);
|
||||
}
|
||||
}
|
||||
auto makeAppChain = [&]()
|
||||
{
|
||||
vRes = vCur;
|
||||
for (size_t i = 0; i < nrArgs; ++i) {
|
||||
auto fun2 = allocValue();
|
||||
*fun2 = vRes;
|
||||
vRes.mkPrimOpApp(fun2, args[i]);
|
||||
}
|
||||
};
|
||||
|
||||
if (!fun.isLambda()) {
|
||||
throwTypeError(
|
||||
pos,
|
||||
"attempt to call something which is not a function but %1%",
|
||||
showType(fun).c_str(),
|
||||
fakeEnv(1), 0);
|
||||
}
|
||||
Attr * functor;
|
||||
|
||||
ExprLambda & lambda(*fun.lambda.fun);
|
||||
while (nrArgs > 0) {
|
||||
|
||||
auto size =
|
||||
(lambda.arg.empty() ? 0 : 1) +
|
||||
(lambda.matchAttrs ? lambda.formals->formals.size() : 0);
|
||||
Env & env2(allocEnv(size));
|
||||
env2.up = fun.lambda.env;
|
||||
if (vCur.isLambda()) {
|
||||
|
||||
size_t displ = 0;
|
||||
ExprLambda & lambda(*vCur.lambda.fun);
|
||||
|
||||
if (!lambda.matchAttrs){
|
||||
env2.values[displ++] = &arg;
|
||||
}
|
||||
else {
|
||||
forceAttrs(arg, pos);
|
||||
auto size =
|
||||
(lambda.arg.empty() ? 0 : 1) +
|
||||
(lambda.hasFormals() ? lambda.formals->formals.size() : 0);
|
||||
Env & env2(allocEnv(size));
|
||||
env2.up = vCur.lambda.env;
|
||||
|
||||
if (!lambda.arg.empty())
|
||||
env2.values[displ++] = &arg;
|
||||
Displacement displ = 0;
|
||||
|
||||
/* For each formal argument, get the actual argument. If
|
||||
there is no matching actual argument but the formal
|
||||
argument has a default, use the default. */
|
||||
size_t attrsUsed = 0;
|
||||
for (auto & i : lambda.formals->formals) {
|
||||
Bindings::iterator j = arg.attrs->find(i.name);
|
||||
if (j == arg.attrs->end()) {
|
||||
if (!i.def)
|
||||
throwTypeError(
|
||||
pos,
|
||||
"%1% called without required argument '%2%'",
|
||||
lambda,
|
||||
i.name,
|
||||
*fun.lambda.env, &lambda);
|
||||
env2.values[displ++] = i.def->maybeThunk(*this, env2);
|
||||
if (!lambda.hasFormals())
|
||||
env2.values[displ++] = args[0];
|
||||
else {
|
||||
forceAttrs(*args[0], pos);
|
||||
|
||||
if (!lambda.arg.empty())
|
||||
env2.values[displ++] = args[0];
|
||||
|
||||
/* For each formal argument, get the actual argument. If
|
||||
there is no matching actual argument but the formal
|
||||
argument has a default, use the default. */
|
||||
size_t attrsUsed = 0;
|
||||
for (auto & i : lambda.formals->formals) {
|
||||
auto j = args[0]->attrs->get(i.name);
|
||||
if (!j) {
|
||||
if (!i.def) throwTypeError(pos, "%1% called without required argument '%2%'",
|
||||
lambda, i.name, *fun.lambda.env, &lambda);
|
||||
env2.values[displ++] = i.def->maybeThunk(*this, env2);
|
||||
} else {
|
||||
attrsUsed++;
|
||||
env2.values[displ++] = j->value;
|
||||
}
|
||||
}
|
||||
|
||||
/* Check that each actual argument is listed as a formal
|
||||
argument (unless the attribute match specifies a `...'). */
|
||||
if (!lambda.formals->ellipsis && attrsUsed != args[0]->attrs->size()) {
|
||||
/* Nope, so show the first unexpected argument to the
|
||||
user. */
|
||||
for (auto & i : *args[0]->attrs)
|
||||
if (lambda.formals->argNames.find(i.name) == lambda.formals->argNames.end())
|
||||
throwTypeError(pos, "%1% called with unexpected argument '%2%'", lambda, i.name);
|
||||
abort(); // can't happen
|
||||
}
|
||||
}
|
||||
|
||||
nrFunctionCalls++;
|
||||
if (countCalls) incrFunctionCall(&lambda);
|
||||
|
||||
/* Evaluate the body. */
|
||||
try {
|
||||
lambda.body->eval(*this, env2, vCur);
|
||||
} catch (Error & e) {
|
||||
if (loggerSettings.showTrace.get()) {
|
||||
addErrorTrace(e, lambda.pos, "while evaluating %s",
|
||||
(lambda.name.set()
|
||||
? "'" + (string) lambda.name + "'"
|
||||
: "anonymous lambda"));
|
||||
addErrorTrace(e, pos, "from call site%s", "");
|
||||
}
|
||||
throw;
|
||||
}
|
||||
|
||||
nrArgs--;
|
||||
args += 1;
|
||||
}
|
||||
|
||||
else if (vCur.isPrimOp()) {
|
||||
|
||||
size_t argsLeft = vCur.primOp->arity;
|
||||
|
||||
if (nrArgs < argsLeft) {
|
||||
/* We don't have enough arguments, so create a tPrimOpApp chain. */
|
||||
makeAppChain();
|
||||
return;
|
||||
} else {
|
||||
attrsUsed++;
|
||||
env2.values[displ++] = j->value;
|
||||
/* We have all the arguments, so call the primop. */
|
||||
nrPrimOpCalls++;
|
||||
if (countCalls) primOpCalls[vCur.primOp->name]++;
|
||||
vCur.primOp->fun(*this, pos, args, vCur);
|
||||
|
||||
nrArgs -= argsLeft;
|
||||
args += argsLeft;
|
||||
}
|
||||
}
|
||||
|
||||
else if (vCur.isPrimOpApp()) {
|
||||
/* Figure out the number of arguments still needed. */
|
||||
size_t argsDone = 0;
|
||||
Value * primOp = &vCur;
|
||||
while (primOp->isPrimOpApp()) {
|
||||
argsDone++;
|
||||
primOp = primOp->primOpApp.left;
|
||||
}
|
||||
assert(primOp->isPrimOp());
|
||||
auto arity = primOp->primOp->arity;
|
||||
auto argsLeft = arity - argsDone;
|
||||
|
||||
/* Check that each actual argument is listed as a formal
|
||||
argument (unless the attribute match specifies a `...'). */
|
||||
if (!lambda.formals->ellipsis && attrsUsed != arg.attrs->size()) {
|
||||
/* Nope, so show the first unexpected argument to the
|
||||
user. */
|
||||
for (auto & i : *arg.attrs)
|
||||
if (lambda.formals->argNames.find(i.name) == lambda.formals->argNames.end())
|
||||
throwTypeError(pos,
|
||||
"%1% called with unexpected argument '%2%'",
|
||||
lambda,
|
||||
i.name,
|
||||
*fun.lambda.env, &lambda);
|
||||
abort(); // can't happen
|
||||
if (nrArgs < argsLeft) {
|
||||
/* We still don't have enough arguments, so extend the tPrimOpApp chain. */
|
||||
makeAppChain();
|
||||
return;
|
||||
} else {
|
||||
/* We have all the arguments, so call the primop with
|
||||
the previous and new arguments. */
|
||||
|
||||
Value * vArgs[arity];
|
||||
auto n = argsDone;
|
||||
for (Value * arg = &vCur; arg->isPrimOpApp(); arg = arg->primOpApp.left)
|
||||
vArgs[--n] = arg->primOpApp.right;
|
||||
|
||||
for (size_t i = 0; i < argsLeft; ++i)
|
||||
vArgs[argsDone + i] = args[i];
|
||||
|
||||
nrPrimOpCalls++;
|
||||
if (countCalls) primOpCalls[primOp->primOp->name]++;
|
||||
primOp->primOp->fun(*this, pos, vArgs, vCur);
|
||||
|
||||
nrArgs -= argsLeft;
|
||||
args += argsLeft;
|
||||
}
|
||||
}
|
||||
|
||||
else if (vCur.type() == nAttrs && (functor = vCur.attrs->get(sFunctor))) {
|
||||
/* 'vCur' may be allocated on the stack of the calling
|
||||
function, but for functors we may keep a reference, so
|
||||
heap-allocate a copy and use that instead. */
|
||||
Value * args2[] = {allocValue(), args[0]};
|
||||
*args2[0] = vCur;
|
||||
/* !!! Should we use the attr pos here? */
|
||||
callFunction(*functor->value, 2, args2, vCur, pos);
|
||||
nrArgs--;
|
||||
args++;
|
||||
}
|
||||
|
||||
else
|
||||
throwTypeError(pos, "attempt to call something which is not a function but %1%", vCur);
|
||||
}
|
||||
|
||||
nrFunctionCalls++;
|
||||
if (countCalls) incrFunctionCall(&lambda);
|
||||
vRes = vCur;
|
||||
}
|
||||
|
||||
/* Evaluate the body. This is conditional on showTrace, because
|
||||
catching exceptions makes this function not tail-recursive. */
|
||||
if (loggerSettings.showTrace.get())
|
||||
try {
|
||||
lambda.body->eval(*this, env2, v);
|
||||
} catch (Error & e) {
|
||||
addErrorTrace(e, lambda.pos, "while evaluating %s",
|
||||
(lambda.name.set()
|
||||
? "'" + (string) lambda.name + "'"
|
||||
: "anonymous lambda"));
|
||||
addErrorTrace(e, pos, "from call site%s", "");
|
||||
throw;
|
||||
}
|
||||
else
|
||||
fun.lambda.fun->body->eval(*this, env2, v);
|
||||
|
||||
void ExprCall::eval(EvalState & state, Env & env, Value & v)
|
||||
{
|
||||
Value vFun;
|
||||
fun->eval(state, env, vFun);
|
||||
|
||||
Value * vArgs[args.size()];
|
||||
for (size_t i = 0; i < args.size(); ++i)
|
||||
vArgs[i] = args[i]->maybeThunk(state, env);
|
||||
|
||||
state.callFunction(vFun, args.size(), vArgs, v, pos);
|
||||
}
|
||||
|
||||
|
||||
|
@ -1519,7 +1596,7 @@ void EvalState::autoCallFunction(Bindings & args, Value & fun, Value & res)
|
|||
}
|
||||
}
|
||||
|
||||
if (!fun.isLambda() || !fun.lambda.fun->matchAttrs) {
|
||||
if (!fun.isLambda() || !fun.lambda.fun->hasFormals()) {
|
||||
res = fun;
|
||||
return;
|
||||
}
|
||||
|
@ -1722,7 +1799,6 @@ void ExprConcatStrings::eval(EvalState & state, Env & env, Value & v)
|
|||
and none of the strings are allowed to have contexts. */
|
||||
if (first) {
|
||||
firstType = vTmp.type();
|
||||
first = false;
|
||||
}
|
||||
|
||||
if (firstType == nInt) {
|
||||
|
@ -1744,7 +1820,12 @@ void ExprConcatStrings::eval(EvalState & state, Env & env, Value & v)
|
|||
} else
|
||||
throwEvalError(pos, "cannot add %1% to a float", showType(vTmp), env, this);
|
||||
} else
|
||||
s << state.coerceToString(pos, vTmp, context, false, firstType == nString);
|
||||
/* skip canonization of first path, which would only be not
|
||||
canonized in the first place if it's coming from a ./${foo} type
|
||||
path */
|
||||
s << state.coerceToString(pos, vTmp, context, false, firstType == nString, !first);
|
||||
|
||||
first = false;
|
||||
}
|
||||
|
||||
if (firstType == nInt)
|
||||
|
@ -1763,7 +1844,7 @@ void ExprConcatStrings::eval(EvalState & state, Env & env, Value & v)
|
|||
|
||||
void ExprPos::eval(EvalState & state, Env & env, Value & v)
|
||||
{
|
||||
state.mkPos(v, &pos);
|
||||
state.mkPos(v, ptr(&pos));
|
||||
}
|
||||
|
||||
|
||||
|
@ -1935,7 +2016,7 @@ std::optional<string> EvalState::tryAttrsToString(const Pos & pos, Value & v,
|
|||
}
|
||||
|
||||
string EvalState::coerceToString(const Pos & pos, Value & v, PathSet & context,
|
||||
bool coerceMore, bool copyToStore)
|
||||
bool coerceMore, bool copyToStore, bool canonicalizePath)
|
||||
{
|
||||
forceValue(v, pos);
|
||||
|
||||
|
@ -1947,7 +2028,7 @@ string EvalState::coerceToString(const Pos & pos, Value & v, PathSet & context,
|
|||
}
|
||||
|
||||
if (v.type() == nPath) {
|
||||
Path path(canonPath(v.path));
|
||||
Path path(canonicalizePath ? canonPath(v.path) : v.path);
|
||||
return copyToStore ? copyPathToStore(context, path) : path;
|
||||
}
|
||||
|
||||
|
@ -2010,6 +2091,7 @@ string EvalState::copyPathToStore(PathSet & context, const Path & path)
|
|||
? store->computeStorePathForPath(std::string(baseNameOf(path)), checkSourcePath(path)).first
|
||||
: store->addToStore(std::string(baseNameOf(path)), checkSourcePath(path), FileIngestionMethod::Recursive, htSHA256, defaultPathFilter, repair);
|
||||
dstPath = store->printStorePath(p);
|
||||
allowPath(p);
|
||||
srcToStore.insert_or_assign(path, std::move(p));
|
||||
printMsg(lvlChatty, "copied source '%1%' -> '%2%'", path, dstPath);
|
||||
}
|
||||
|
|
|
@ -5,6 +5,7 @@
|
|||
#include "nixexpr.hh"
|
||||
#include "symbol-table.hh"
|
||||
#include "config.hh"
|
||||
#include "experimental-features.hh"
|
||||
|
||||
#include <map>
|
||||
#include <optional>
|
||||
|
@ -98,8 +99,14 @@ public:
|
|||
|
||||
Value vEmptySet;
|
||||
|
||||
/* Store used to materialise .drv files. */
|
||||
const ref<Store> store;
|
||||
|
||||
/* Store used to build stuff. */
|
||||
const ref<Store> buildStore;
|
||||
|
||||
RootValue vCallFlake = nullptr;
|
||||
RootValue vImportedDrvToDerivation = nullptr;
|
||||
|
||||
private:
|
||||
SrcToStore srcToStore;
|
||||
|
@ -132,13 +139,31 @@ private:
|
|||
|
||||
public:
|
||||
|
||||
EvalState(const Strings & _searchPath, ref<Store> store);
|
||||
EvalState(
|
||||
const Strings & _searchPath,
|
||||
ref<Store> store,
|
||||
std::shared_ptr<Store> buildStore = nullptr);
|
||||
~EvalState();
|
||||
|
||||
void requireExperimentalFeatureOnEvaluation(
|
||||
const ExperimentalFeature &,
|
||||
const std::string_view fName,
|
||||
const Pos & pos
|
||||
);
|
||||
|
||||
void addToSearchPath(const string & s);
|
||||
|
||||
SearchPath getSearchPath() { return searchPath; }
|
||||
|
||||
/* Allow access to a path. */
|
||||
void allowPath(const Path & path);
|
||||
|
||||
/* Allow access to a store path. Note that this gets remapped to
|
||||
the real store path if `store` is a chroot store. */
|
||||
void allowPath(const StorePath & storePath);
|
||||
|
||||
/* Check whether access to a path is allowed and throw an error if
|
||||
not. Otherwise return the canonicalised path. */
|
||||
Path checkSourcePath(const Path & path);
|
||||
|
||||
void checkURI(const std::string & uri);
|
||||
|
@ -167,6 +192,14 @@ public:
|
|||
trivial (i.e. doesn't require arbitrary computation). */
|
||||
void evalFile(const Path & path, Value & v, bool mustBeTrivial = false);
|
||||
|
||||
/* Like `cacheFile`, but with an already parsed expression. */
|
||||
void cacheFile(
|
||||
const Path & path,
|
||||
const Path & resolvedPath,
|
||||
Expr * e,
|
||||
Value & v,
|
||||
bool mustBeTrivial = false);
|
||||
|
||||
void resetFileCache();
|
||||
|
||||
/* Look up a file in the search path. */
|
||||
|
@ -221,7 +254,8 @@ public:
|
|||
booleans and lists to a string. If `copyToStore' is set,
|
||||
referenced paths are copied to the Nix store as a side effect. */
|
||||
string coerceToString(const Pos & pos, Value & v, PathSet & context,
|
||||
bool coerceMore = false, bool copyToStore = true);
|
||||
bool coerceMore = false, bool copyToStore = true,
|
||||
bool canonicalizePath = true);
|
||||
|
||||
string copyPathToStore(PathSet & context, const Path & path);
|
||||
|
||||
|
@ -247,6 +281,8 @@ private:
|
|||
|
||||
Value * addConstant(const string & name, Value & v);
|
||||
|
||||
void addConstant(const string & name, Value * v);
|
||||
|
||||
Value * addPrimOp(const string & name,
|
||||
size_t arity, PrimOpFun primOp);
|
||||
|
||||
|
@ -286,8 +322,14 @@ public:
|
|||
|
||||
bool isFunctor(Value & fun);
|
||||
|
||||
void callFunction(Value & fun, Value & arg, Value & v, const Pos & pos);
|
||||
void callPrimOp(Value & fun, Value & arg, Value & v, const Pos & pos);
|
||||
// FIXME: use std::span
|
||||
void callFunction(Value & fun, size_t nrArgs, Value * * args, Value & vRes, const Pos & pos);
|
||||
|
||||
void callFunction(Value & fun, Value & arg, Value & vRes, const Pos & pos)
|
||||
{
|
||||
Value * args[] = {&arg};
|
||||
callFunction(fun, 1, args, vRes, pos);
|
||||
}
|
||||
|
||||
/* Automatically call a function for which each argument has a
|
||||
default value or has a binding in the `args' map. */
|
||||
|
@ -305,7 +347,7 @@ public:
|
|||
void mkList(Value & v, size_t length);
|
||||
void mkAttrs(Value & v, size_t capacity);
|
||||
void mkThunk_(Value & v, Expr * expr);
|
||||
void mkPos(Value & v, Pos * pos);
|
||||
void mkPos(Value & v, ptr<Pos> pos);
|
||||
|
||||
void concatLists(Value & v, size_t nrLists, Value * * lists, const Pos & pos);
|
||||
|
||||
|
@ -320,8 +362,10 @@ private:
|
|||
unsigned long nrValuesInEnvs = 0;
|
||||
unsigned long nrValues = 0;
|
||||
unsigned long nrListElems = 0;
|
||||
unsigned long nrLookups = 0;
|
||||
unsigned long nrAttrsets = 0;
|
||||
unsigned long nrAttrsInAttrsets = 0;
|
||||
unsigned long nrAvoided = 0;
|
||||
unsigned long nrOpUpdates = 0;
|
||||
unsigned long nrOpUpdateValuesCopied = 0;
|
||||
unsigned long nrListConcats = 0;
|
||||
|
@ -343,6 +387,11 @@ private:
|
|||
|
||||
friend struct ExprOpUpdate;
|
||||
friend struct ExprOpConcatLists;
|
||||
friend struct ExprVar;
|
||||
friend struct ExprString;
|
||||
friend struct ExprInt;
|
||||
friend struct ExprFloat;
|
||||
friend struct ExprPath;
|
||||
friend struct ExprSelect;
|
||||
friend void prim_getAttr(EvalState & state, const Pos & pos, Value * * args, Value & v);
|
||||
friend void prim_match(EvalState & state, const Pos & pos, Value * * args, Value & v);
|
||||
|
|
|
@ -1,4 +1,5 @@
|
|||
#include "flake.hh"
|
||||
#include "globals.hh"
|
||||
|
||||
#include <nlohmann/json.hpp>
|
||||
|
||||
|
@ -22,12 +23,14 @@ static TrustedList readTrustedList()
|
|||
|
||||
static void writeTrustedList(const TrustedList & trustedList)
|
||||
{
|
||||
writeFile(trustedListPath(), nlohmann::json(trustedList).dump());
|
||||
auto path = trustedListPath();
|
||||
createDirs(dirOf(path));
|
||||
writeFile(path, nlohmann::json(trustedList).dump());
|
||||
}
|
||||
|
||||
void ConfigFile::apply()
|
||||
{
|
||||
std::set<std::string> whitelist{"bash-prompt", "bash-prompt-suffix"};
|
||||
std::set<std::string> whitelist{"bash-prompt", "bash-prompt-suffix", "flake-registry"};
|
||||
|
||||
for (auto & [name, value] : settings) {
|
||||
|
||||
|
@ -50,21 +53,19 @@ void ConfigFile::apply()
|
|||
auto trustedList = readTrustedList();
|
||||
|
||||
bool trusted = false;
|
||||
|
||||
if (auto saved = get(get(trustedList, name).value_or(std::map<std::string, bool>()), valueS)) {
|
||||
if (nix::settings.acceptFlakeConfig){
|
||||
trusted = true;
|
||||
} else if (auto saved = get(get(trustedList, name).value_or(std::map<std::string, bool>()), valueS)) {
|
||||
trusted = *saved;
|
||||
warn("Using saved setting for '%s = %s' from ~/.local/share/nix/trusted-settings.json.", name,valueS);
|
||||
} else {
|
||||
// FIXME: filter ANSI escapes, newlines, \r, etc.
|
||||
if (std::tolower(logger->ask(fmt("do you want to allow configuration setting '%s' to be set to '" ANSI_RED "%s" ANSI_NORMAL "' (y/N)?", name, valueS)).value_or('n')) != 'y') {
|
||||
if (std::tolower(logger->ask("do you want to permanently mark this value as untrusted (y/N)?").value_or('n')) == 'y') {
|
||||
trustedList[name][valueS] = false;
|
||||
writeTrustedList(trustedList);
|
||||
}
|
||||
} else {
|
||||
if (std::tolower(logger->ask("do you want to permanently mark this value as trusted (y/N)?").value_or('n')) == 'y') {
|
||||
trustedList[name][valueS] = trusted = true;
|
||||
writeTrustedList(trustedList);
|
||||
}
|
||||
if (std::tolower(logger->ask(fmt("do you want to allow configuration setting '%s' to be set to '" ANSI_RED "%s" ANSI_NORMAL "' (y/N)?", name, valueS)).value_or('n')) == 'y') {
|
||||
trusted = true;
|
||||
}
|
||||
if (std::tolower(logger->ask(fmt("do you want to permanently mark this value as %s (y/N)?", trusted ? "trusted": "untrusted" )).value_or('n')) == 'y') {
|
||||
trustedList[name][valueS] = trusted;
|
||||
writeTrustedList(trustedList);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -1,4 +1,5 @@
|
|||
#include "flake.hh"
|
||||
#include "eval.hh"
|
||||
#include "lockfile.hh"
|
||||
#include "primops.hh"
|
||||
#include "eval-inline.hh"
|
||||
|
@ -63,8 +64,7 @@ static std::tuple<fetchers::Tree, FlakeRef, FlakeRef> fetchOrSubstituteTree(
|
|||
debug("got tree '%s' from '%s'",
|
||||
state.store->printStorePath(tree.storePath), lockedRef);
|
||||
|
||||
if (state.allowedPaths)
|
||||
state.allowedPaths->insert(tree.actualPath);
|
||||
state.allowPath(tree.storePath);
|
||||
|
||||
assert(!originalRef.input.getNarHash() || tree.storePath == originalRef.input.computeStorePath(*state.store));
|
||||
|
||||
|
@ -88,10 +88,12 @@ static void expectType(EvalState & state, ValueType type,
|
|||
}
|
||||
|
||||
static std::map<FlakeId, FlakeInput> parseFlakeInputs(
|
||||
EvalState & state, Value * value, const Pos & pos);
|
||||
EvalState & state, Value * value, const Pos & pos,
|
||||
const std::optional<Path> & baseDir);
|
||||
|
||||
static FlakeInput parseFlakeInput(EvalState & state,
|
||||
const std::string & inputName, Value * value, const Pos & pos)
|
||||
const std::string & inputName, Value * value, const Pos & pos,
|
||||
const std::optional<Path> & baseDir)
|
||||
{
|
||||
expectType(state, nAttrs, *value, pos);
|
||||
|
||||
|
@ -115,7 +117,7 @@ static FlakeInput parseFlakeInput(EvalState & state,
|
|||
expectType(state, nBool, *attr.value, *attr.pos);
|
||||
input.isFlake = attr.value->boolean;
|
||||
} else if (attr.name == sInputs) {
|
||||
input.overrides = parseFlakeInputs(state, attr.value, *attr.pos);
|
||||
input.overrides = parseFlakeInputs(state, attr.value, *attr.pos, baseDir);
|
||||
} else if (attr.name == sFollows) {
|
||||
expectType(state, nString, *attr.value, *attr.pos);
|
||||
input.follows = parseInputPath(attr.value->string.s);
|
||||
|
@ -153,7 +155,7 @@ static FlakeInput parseFlakeInput(EvalState & state,
|
|||
if (!attrs.empty())
|
||||
throw Error("unexpected flake input attribute '%s', at %s", attrs.begin()->first, pos);
|
||||
if (url)
|
||||
input.ref = parseFlakeRef(*url, {}, true);
|
||||
input.ref = parseFlakeRef(*url, baseDir, true);
|
||||
}
|
||||
|
||||
if (!input.follows && !input.ref)
|
||||
|
@ -163,7 +165,8 @@ static FlakeInput parseFlakeInput(EvalState & state,
|
|||
}
|
||||
|
||||
static std::map<FlakeId, FlakeInput> parseFlakeInputs(
|
||||
EvalState & state, Value * value, const Pos & pos)
|
||||
EvalState & state, Value * value, const Pos & pos,
|
||||
const std::optional<Path> & baseDir)
|
||||
{
|
||||
std::map<FlakeId, FlakeInput> inputs;
|
||||
|
||||
|
@ -174,7 +177,8 @@ static std::map<FlakeId, FlakeInput> parseFlakeInputs(
|
|||
parseFlakeInput(state,
|
||||
inputAttr.name,
|
||||
inputAttr.value,
|
||||
*inputAttr.pos));
|
||||
*inputAttr.pos,
|
||||
baseDir));
|
||||
}
|
||||
|
||||
return inputs;
|
||||
|
@ -190,7 +194,8 @@ static Flake getFlake(
|
|||
state, originalRef, allowLookup, flakeCache);
|
||||
|
||||
// Guard against symlink attacks.
|
||||
auto flakeFile = canonPath(sourceInfo.actualPath + "/" + lockedRef.subdir + "/flake.nix");
|
||||
auto flakeDir = canonPath(sourceInfo.actualPath + "/" + lockedRef.subdir);
|
||||
auto flakeFile = canonPath(flakeDir + "/flake.nix");
|
||||
if (!isInDir(flakeFile, sourceInfo.actualPath))
|
||||
throw Error("'flake.nix' file of flake '%s' escapes from '%s'",
|
||||
lockedRef, state.store->printStorePath(sourceInfo.storePath));
|
||||
|
@ -218,14 +223,14 @@ static Flake getFlake(
|
|||
auto sInputs = state.symbols.create("inputs");
|
||||
|
||||
if (auto inputs = vInfo.attrs->get(sInputs))
|
||||
flake.inputs = parseFlakeInputs(state, inputs->value, *inputs->pos);
|
||||
flake.inputs = parseFlakeInputs(state, inputs->value, *inputs->pos, flakeDir);
|
||||
|
||||
auto sOutputs = state.symbols.create("outputs");
|
||||
|
||||
if (auto outputs = vInfo.attrs->get(sOutputs)) {
|
||||
expectType(state, nFunction, *outputs->value, *outputs->pos);
|
||||
|
||||
if (outputs->value->isLambda() && outputs->value->lambda.fun->matchAttrs) {
|
||||
if (outputs->value->isLambda() && outputs->value->lambda.fun->hasFormals()) {
|
||||
for (auto & formal : outputs->value->lambda.fun->formals->formals) {
|
||||
if (formal.name != state.sSelf)
|
||||
flake.inputs.emplace(formal.name, FlakeInput {
|
||||
|
@ -292,11 +297,18 @@ LockedFlake lockFlake(
|
|||
const FlakeRef & topRef,
|
||||
const LockFlags & lockFlags)
|
||||
{
|
||||
settings.requireExperimentalFeature("flakes");
|
||||
settings.requireExperimentalFeature(Xp::Flakes);
|
||||
|
||||
FlakeCache flakeCache;
|
||||
|
||||
auto flake = getFlake(state, topRef, lockFlags.useRegistries, flakeCache);
|
||||
auto useRegistries = lockFlags.useRegistries.value_or(settings.useRegistries);
|
||||
|
||||
auto flake = getFlake(state, topRef, useRegistries, flakeCache);
|
||||
|
||||
if (lockFlags.applyNixConfig) {
|
||||
flake.config.apply();
|
||||
state.store->setOptions();
|
||||
}
|
||||
|
||||
try {
|
||||
|
||||
|
@ -317,25 +329,38 @@ LockedFlake lockFlake(
|
|||
|
||||
std::vector<FlakeRef> parents;
|
||||
|
||||
struct LockParent {
|
||||
/* The path to this parent. */
|
||||
InputPath path;
|
||||
|
||||
/* Whether we are currently inside a top-level lockfile
|
||||
(inputs absolute) or subordinate lockfile (inputs
|
||||
relative). */
|
||||
bool absolute;
|
||||
};
|
||||
|
||||
std::function<void(
|
||||
const FlakeInputs & flakeInputs,
|
||||
std::shared_ptr<Node> node,
|
||||
const InputPath & inputPathPrefix,
|
||||
std::shared_ptr<const Node> oldNode)>
|
||||
std::shared_ptr<const Node> oldNode,
|
||||
const LockParent & parent,
|
||||
const Path & parentPath)>
|
||||
computeLocks;
|
||||
|
||||
computeLocks = [&](
|
||||
const FlakeInputs & flakeInputs,
|
||||
std::shared_ptr<Node> node,
|
||||
const InputPath & inputPathPrefix,
|
||||
std::shared_ptr<const Node> oldNode)
|
||||
std::shared_ptr<const Node> oldNode,
|
||||
const LockParent & parent,
|
||||
const Path & parentPath)
|
||||
{
|
||||
debug("computing lock file node '%s'", printInputPath(inputPathPrefix));
|
||||
|
||||
/* Get the overrides (i.e. attributes of the form
|
||||
'inputs.nixops.inputs.nixpkgs.url = ...'). */
|
||||
// FIXME: check this
|
||||
for (auto & [id, input] : flake.inputs) {
|
||||
for (auto & [id, input] : flakeInputs) {
|
||||
for (auto & [idOverride, inputOverride] : input.overrides) {
|
||||
auto inputPath(inputPathPrefix);
|
||||
inputPath.push_back(id);
|
||||
|
@ -359,22 +384,31 @@ LockedFlake lockFlake(
|
|||
ancestors? */
|
||||
auto i = overrides.find(inputPath);
|
||||
bool hasOverride = i != overrides.end();
|
||||
if (hasOverride) overridesUsed.insert(inputPath);
|
||||
if (hasOverride) {
|
||||
overridesUsed.insert(inputPath);
|
||||
// Respect the “flakeness” of the input even if we
|
||||
// override it
|
||||
i->second.isFlake = input2.isFlake;
|
||||
}
|
||||
auto & input = hasOverride ? i->second : input2;
|
||||
|
||||
/* Resolve 'follows' later (since it may refer to an input
|
||||
path we haven't processed yet. */
|
||||
if (input.follows) {
|
||||
InputPath target;
|
||||
if (hasOverride || input.absolute)
|
||||
/* 'follows' from an override is relative to the
|
||||
root of the graph. */
|
||||
|
||||
if (parent.absolute && !hasOverride) {
|
||||
target = *input.follows;
|
||||
else {
|
||||
/* Otherwise, it's relative to the current flake. */
|
||||
target = inputPathPrefix;
|
||||
} else {
|
||||
if (hasOverride) {
|
||||
target = inputPathPrefix;
|
||||
target.pop_back();
|
||||
} else
|
||||
target = parent.path;
|
||||
|
||||
for (auto & i : *input.follows) target.push_back(i);
|
||||
}
|
||||
|
||||
debug("input '%s' follows '%s'", inputPathS, printInputPath(target));
|
||||
node->inputs.insert_or_assign(id, target);
|
||||
continue;
|
||||
|
@ -412,22 +446,18 @@ LockedFlake lockFlake(
|
|||
update it. */
|
||||
auto lb = lockFlags.inputUpdates.lower_bound(inputPath);
|
||||
|
||||
auto hasChildUpdate =
|
||||
auto mustRefetch =
|
||||
lb != lockFlags.inputUpdates.end()
|
||||
&& lb->size() > inputPath.size()
|
||||
&& std::equal(inputPath.begin(), inputPath.end(), lb->begin());
|
||||
|
||||
if (hasChildUpdate) {
|
||||
auto inputFlake = getFlake(
|
||||
state, oldLock->lockedRef, false, flakeCache);
|
||||
computeLocks(inputFlake.inputs, childNode, inputPath, oldLock);
|
||||
} else {
|
||||
FlakeInputs fakeInputs;
|
||||
|
||||
if (!mustRefetch) {
|
||||
/* No need to fetch this flake, we can be
|
||||
lazy. However there may be new overrides on the
|
||||
inputs of this flake, so we need to check
|
||||
those. */
|
||||
FlakeInputs fakeInputs;
|
||||
|
||||
for (auto & i : oldLock->inputs) {
|
||||
if (auto lockedNode = std::get_if<0>(&i.second)) {
|
||||
fakeInputs.emplace(i.first, FlakeInput {
|
||||
|
@ -435,16 +465,28 @@ LockedFlake lockFlake(
|
|||
.isFlake = (*lockedNode)->isFlake,
|
||||
});
|
||||
} else if (auto follows = std::get_if<1>(&i.second)) {
|
||||
auto o = input.overrides.find(i.first);
|
||||
// If the override disappeared, we have to refetch the flake,
|
||||
// since some of the inputs may not be present in the lockfile.
|
||||
if (o == input.overrides.end()) {
|
||||
mustRefetch = true;
|
||||
// There's no point populating the rest of the fake inputs,
|
||||
// since we'll refetch the flake anyways.
|
||||
break;
|
||||
}
|
||||
fakeInputs.emplace(i.first, FlakeInput {
|
||||
.follows = *follows,
|
||||
.absolute = true
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
computeLocks(fakeInputs, childNode, inputPath, oldLock);
|
||||
}
|
||||
|
||||
computeLocks(
|
||||
mustRefetch
|
||||
? getFlake(state, oldLock->lockedRef, false, flakeCache).inputs
|
||||
: fakeInputs,
|
||||
childNode, inputPath, oldLock, parent, parentPath);
|
||||
|
||||
} else {
|
||||
/* We need to create a new lock file entry. So fetch
|
||||
this input. */
|
||||
|
@ -454,7 +496,15 @@ LockedFlake lockFlake(
|
|||
throw Error("cannot update flake input '%s' in pure mode", inputPathS);
|
||||
|
||||
if (input.isFlake) {
|
||||
auto inputFlake = getFlake(state, *input.ref, lockFlags.useRegistries, flakeCache);
|
||||
Path localPath = parentPath;
|
||||
FlakeRef localRef = *input.ref;
|
||||
|
||||
// If this input is a path, recurse it down.
|
||||
// This allows us to resolve path inputs relative to the current flake.
|
||||
if (localRef.input.getType() == "path")
|
||||
localPath = absPath(*input.ref->input.getSourcePath(), parentPath);
|
||||
|
||||
auto inputFlake = getFlake(state, localRef, useRegistries, flakeCache);
|
||||
|
||||
/* Note: in case of an --override-input, we use
|
||||
the *original* ref (input2.ref) for the
|
||||
|
@ -475,6 +525,13 @@ LockedFlake lockFlake(
|
|||
parents.push_back(*input.ref);
|
||||
Finally cleanup([&]() { parents.pop_back(); });
|
||||
|
||||
// Follows paths from existing inputs in the top-level lockfile are absolute,
|
||||
// whereas paths in subordinate lockfiles are relative to those lockfiles.
|
||||
LockParent newParent {
|
||||
.path = inputPath,
|
||||
.absolute = oldLock ? true : false
|
||||
};
|
||||
|
||||
/* Recursively process the inputs of this
|
||||
flake. Also, unless we already have this flake
|
||||
in the top-level lock file, use this flake's
|
||||
|
@ -484,12 +541,13 @@ LockedFlake lockFlake(
|
|||
oldLock
|
||||
? std::dynamic_pointer_cast<const Node>(oldLock)
|
||||
: LockFile::read(
|
||||
inputFlake.sourceInfo->actualPath + "/" + inputFlake.lockedRef.subdir + "/flake.lock").root);
|
||||
inputFlake.sourceInfo->actualPath + "/" + inputFlake.lockedRef.subdir + "/flake.lock").root,
|
||||
newParent, localPath);
|
||||
}
|
||||
|
||||
else {
|
||||
auto [sourceInfo, resolvedRef, lockedRef] = fetchOrSubstituteTree(
|
||||
state, *input.ref, lockFlags.useRegistries, flakeCache);
|
||||
state, *input.ref, useRegistries, flakeCache);
|
||||
node->inputs.insert_or_assign(id,
|
||||
std::make_shared<LockedNode>(lockedRef, *input.ref, false));
|
||||
}
|
||||
|
@ -502,9 +560,17 @@ LockedFlake lockFlake(
|
|||
}
|
||||
};
|
||||
|
||||
LockParent parent {
|
||||
.path = {},
|
||||
.absolute = true
|
||||
};
|
||||
|
||||
// Bring in the current ref for relative path resolution if we have it
|
||||
auto parentPath = canonPath(flake.sourceInfo->actualPath + "/" + flake.lockedRef.subdir);
|
||||
|
||||
computeLocks(
|
||||
flake.inputs, newLockFile.root, {},
|
||||
lockFlags.recreateLockFile ? nullptr : oldLockFile.root);
|
||||
lockFlags.recreateLockFile ? nullptr : oldLockFile.root, parent, parentPath);
|
||||
|
||||
for (auto & i : lockFlags.inputOverrides)
|
||||
if (!overridesUsed.count(i.first))
|
||||
|
@ -554,8 +620,8 @@ LockedFlake lockFlake(
|
|||
topRef.input.markChangedFile(
|
||||
(topRef.subdir == "" ? "" : topRef.subdir + "/") + "flake.lock",
|
||||
lockFlags.commitLockFile
|
||||
? std::optional<std::string>(fmt("%s: %s\n\nFlake input changes:\n\n%s",
|
||||
relPath, lockFileExists ? "Update" : "Add", diff))
|
||||
? std::optional<std::string>(fmt("%s: %s\n\nFlake lock file changes:\n\n%s",
|
||||
relPath, lockFileExists ? "Update" : "Add", filterANSIEscapes(diff, true)))
|
||||
: std::nullopt);
|
||||
|
||||
/* Rewriting the lockfile changed the top-level
|
||||
|
@ -563,7 +629,7 @@ LockedFlake lockFlake(
|
|||
also just clear the 'rev' field... */
|
||||
auto prevLockedRef = flake.lockedRef;
|
||||
FlakeCache dummyCache;
|
||||
flake = getFlake(state, topRef, lockFlags.useRegistries, dummyCache);
|
||||
flake = getFlake(state, topRef, useRegistries, dummyCache);
|
||||
|
||||
if (lockFlags.commitLockFile &&
|
||||
flake.lockedRef.input.getRev() &&
|
||||
|
@ -580,8 +646,10 @@ LockedFlake lockFlake(
|
|||
}
|
||||
} else
|
||||
throw Error("cannot write modified lock file of flake '%s' (use '--no-write-lock-file' to ignore)", topRef);
|
||||
} else
|
||||
} else {
|
||||
warn("not writing modified lock file of flake '%s':\n%s", topRef, chomp(diff));
|
||||
flake.forceDirty = true;
|
||||
}
|
||||
}
|
||||
|
||||
return LockedFlake { .flake = std::move(flake), .lockFile = std::move(newLockFile) };
|
||||
|
@ -604,26 +672,32 @@ void callFlake(EvalState & state,
|
|||
|
||||
mkString(*vLocks, lockedFlake.lockFile.to_string());
|
||||
|
||||
emitTreeAttrs(state, *lockedFlake.flake.sourceInfo, lockedFlake.flake.lockedRef.input, *vRootSrc);
|
||||
emitTreeAttrs(
|
||||
state,
|
||||
*lockedFlake.flake.sourceInfo,
|
||||
lockedFlake.flake.lockedRef.input,
|
||||
*vRootSrc,
|
||||
false,
|
||||
lockedFlake.flake.forceDirty);
|
||||
|
||||
mkString(*vRootSubdir, lockedFlake.flake.lockedRef.subdir);
|
||||
|
||||
static RootValue vCallFlake = nullptr;
|
||||
|
||||
if (!vCallFlake) {
|
||||
vCallFlake = allocRootValue(state.allocValue());
|
||||
if (!state.vCallFlake) {
|
||||
state.vCallFlake = allocRootValue(state.allocValue());
|
||||
state.eval(state.parseExprFromString(
|
||||
#include "call-flake.nix.gen.hh"
|
||||
, "/"), **vCallFlake);
|
||||
, "/"), **state.vCallFlake);
|
||||
}
|
||||
|
||||
state.callFunction(**vCallFlake, *vLocks, *vTmp1, noPos);
|
||||
state.callFunction(**state.vCallFlake, *vLocks, *vTmp1, noPos);
|
||||
state.callFunction(*vTmp1, *vRootSrc, *vTmp2, noPos);
|
||||
state.callFunction(*vTmp2, *vRootSubdir, vRes, noPos);
|
||||
}
|
||||
|
||||
static void prim_getFlake(EvalState & state, const Pos & pos, Value * * args, Value & v)
|
||||
{
|
||||
state.requireExperimentalFeatureOnEvaluation(Xp::Flakes, "builtins.getFlake", pos);
|
||||
|
||||
auto flakeRefS = state.forceStringNoCtx(*args[0], pos);
|
||||
auto flakeRef = parseFlakeRef(flakeRefS, {}, true);
|
||||
if (evalSettings.pureEval && !flakeRef.input.isImmutable())
|
||||
|
@ -633,13 +707,13 @@ static void prim_getFlake(EvalState & state, const Pos & pos, Value * * args, Va
|
|||
lockFlake(state, flakeRef,
|
||||
LockFlags {
|
||||
.updateLockFile = false,
|
||||
.useRegistries = !evalSettings.pureEval,
|
||||
.useRegistries = !evalSettings.pureEval && settings.useRegistries,
|
||||
.allowMutable = !evalSettings.pureEval,
|
||||
}),
|
||||
v);
|
||||
}
|
||||
|
||||
static RegisterPrimOp r2("__getFlake", 1, prim_getFlake, "flakes");
|
||||
static RegisterPrimOp r2("__getFlake", 1, prim_getFlake);
|
||||
|
||||
}
|
||||
|
||||
|
@ -649,8 +723,9 @@ Fingerprint LockedFlake::getFingerprint() const
|
|||
// and we haven't changed it, then it's sufficient to use
|
||||
// flake.sourceInfo.storePath for the fingerprint.
|
||||
return hashString(htSHA256,
|
||||
fmt("%s;%d;%d;%s",
|
||||
fmt("%s;%s;%d;%d;%s",
|
||||
flake.sourceInfo->storePath.to_string(),
|
||||
flake.lockedRef.subdir,
|
||||
flake.lockedRef.input.getRevCount().value_or(0),
|
||||
flake.lockedRef.input.getLastModified().value_or(0),
|
||||
lockFile));
|
||||
|
|
|
@ -43,7 +43,6 @@ struct FlakeInput
|
|||
std::optional<FlakeRef> ref;
|
||||
bool isFlake = true; // true = process flake to get outputs, false = (fetched) static source path
|
||||
std::optional<InputPath> follows;
|
||||
bool absolute = false; // whether 'follows' is relative to the flake root
|
||||
FlakeInputs overrides;
|
||||
};
|
||||
|
||||
|
@ -59,9 +58,10 @@ struct ConfigFile
|
|||
/* The contents of a flake.nix file. */
|
||||
struct Flake
|
||||
{
|
||||
FlakeRef originalRef; // the original flake specification (by the user)
|
||||
FlakeRef resolvedRef; // registry references and caching resolved to the specific underlying flake
|
||||
FlakeRef lockedRef; // the specific local store result of invoking the fetcher
|
||||
FlakeRef originalRef; // the original flake specification (by the user)
|
||||
FlakeRef resolvedRef; // registry references and caching resolved to the specific underlying flake
|
||||
FlakeRef lockedRef; // the specific local store result of invoking the fetcher
|
||||
bool forceDirty = false; // pretend that 'lockedRef' is dirty
|
||||
std::optional<std::string> description;
|
||||
std::shared_ptr<const fetchers::Tree> sourceInfo;
|
||||
FlakeInputs inputs;
|
||||
|
@ -102,7 +102,11 @@ struct LockFlags
|
|||
|
||||
/* Whether to use the registries to lookup indirect flake
|
||||
references like 'nixpkgs'. */
|
||||
bool useRegistries = true;
|
||||
std::optional<bool> useRegistries = std::nullopt;
|
||||
|
||||
/* Whether to apply flake's nixConfig attribute to the configuration */
|
||||
|
||||
bool applyNixConfig = false;
|
||||
|
||||
/* Whether mutable flake references (i.e. those without a Git
|
||||
revision or similar) without a corresponding lock are
|
||||
|
@ -137,6 +141,8 @@ void emitTreeAttrs(
|
|||
EvalState & state,
|
||||
const fetchers::Tree & tree,
|
||||
const fetchers::Input & input,
|
||||
Value & v, bool emptyRevFallback = false);
|
||||
Value & v,
|
||||
bool emptyRevFallback = false,
|
||||
bool forceDirty = false);
|
||||
|
||||
}
|
||||
|
|
|
@ -172,8 +172,12 @@ std::pair<FlakeRef, std::string> parseFlakeRefWithFragment(
|
|||
auto parsedURL = parseURL(url);
|
||||
std::string fragment;
|
||||
std::swap(fragment, parsedURL.fragment);
|
||||
|
||||
auto input = Input::fromURL(parsedURL);
|
||||
input.parent = baseDir;
|
||||
|
||||
return std::make_pair(
|
||||
FlakeRef(Input::fromURL(parsedURL), get(parsedURL.query, "dir").value_or("")),
|
||||
FlakeRef(std::move(input), get(parsedURL.query, "dir").value_or("")),
|
||||
fragment);
|
||||
}
|
||||
}
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue