forked from lix-project/lix
Merge remote-tracking branch 'upstream/master' into trustless-remote-builder-simple
This commit is contained in:
commit
bd96403da6
2
.github/workflows/test.yml
vendored
2
.github/workflows/test.yml
vendored
|
@ -12,6 +12,6 @@ jobs:
|
||||||
- uses: actions/checkout@v2
|
- uses: actions/checkout@v2
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
- uses: cachix/install-nix-action@v11
|
- uses: cachix/install-nix-action@v12
|
||||||
#- run: nix flake check
|
#- run: nix flake check
|
||||||
- run: nix-build -A checks.$(if [[ `uname` = Linux ]]; then echo x86_64-linux; else echo x86_64-darwin; fi)
|
- run: nix-build -A checks.$(if [[ `uname` = Linux ]]; then echo x86_64-linux; else echo x86_64-darwin; fi)
|
||||||
|
|
4
.gitignore
vendored
4
.gitignore
vendored
|
@ -18,13 +18,13 @@ perl/Makefile.config
|
||||||
/doc/manual/nix.json
|
/doc/manual/nix.json
|
||||||
/doc/manual/conf-file.json
|
/doc/manual/conf-file.json
|
||||||
/doc/manual/builtins.json
|
/doc/manual/builtins.json
|
||||||
/doc/manual/src/command-ref/nix.md
|
/doc/manual/src/SUMMARY.md
|
||||||
|
/doc/manual/src/command-ref/new-cli
|
||||||
/doc/manual/src/command-ref/conf-file.md
|
/doc/manual/src/command-ref/conf-file.md
|
||||||
/doc/manual/src/expressions/builtins.md
|
/doc/manual/src/expressions/builtins.md
|
||||||
|
|
||||||
# /scripts/
|
# /scripts/
|
||||||
/scripts/nix-profile.sh
|
/scripts/nix-profile.sh
|
||||||
/scripts/nix-copy-closure
|
|
||||||
/scripts/nix-reduce-build
|
/scripts/nix-reduce-build
|
||||||
/scripts/nix-http-export.cgi
|
/scripts/nix-http-export.cgi
|
||||||
/scripts/nix-profile-daemon.sh
|
/scripts/nix-profile-daemon.sh
|
||||||
|
|
500
config/config.guess
vendored
500
config/config.guess
vendored
|
@ -1,8 +1,8 @@
|
||||||
#! /bin/sh
|
#! /bin/sh
|
||||||
# Attempt to guess a canonical system name.
|
# Attempt to guess a canonical system name.
|
||||||
# Copyright 1992-2018 Free Software Foundation, Inc.
|
# Copyright 1992-2020 Free Software Foundation, Inc.
|
||||||
|
|
||||||
timestamp='2018-08-02'
|
timestamp='2020-11-19'
|
||||||
|
|
||||||
# This file is free software; you can redistribute it and/or modify it
|
# This file is free software; you can redistribute it and/or modify it
|
||||||
# under the terms of the GNU General Public License as published by
|
# under the terms of the GNU General Public License as published by
|
||||||
|
@ -27,12 +27,12 @@ timestamp='2018-08-02'
|
||||||
# Originally written by Per Bothner; maintained since 2000 by Ben Elliston.
|
# Originally written by Per Bothner; maintained since 2000 by Ben Elliston.
|
||||||
#
|
#
|
||||||
# You can get the latest version of this script from:
|
# You can get the latest version of this script from:
|
||||||
# https://git.savannah.gnu.org/gitweb/?p=config.git;a=blob_plain;f=config.guess
|
# https://git.savannah.gnu.org/cgit/config.git/plain/config.guess
|
||||||
#
|
#
|
||||||
# Please send patches to <config-patches@gnu.org>.
|
# Please send patches to <config-patches@gnu.org>.
|
||||||
|
|
||||||
|
|
||||||
me=`echo "$0" | sed -e 's,.*/,,'`
|
me=$(echo "$0" | sed -e 's,.*/,,')
|
||||||
|
|
||||||
usage="\
|
usage="\
|
||||||
Usage: $0 [OPTION]
|
Usage: $0 [OPTION]
|
||||||
|
@ -50,7 +50,7 @@ version="\
|
||||||
GNU config.guess ($timestamp)
|
GNU config.guess ($timestamp)
|
||||||
|
|
||||||
Originally written by Per Bothner.
|
Originally written by Per Bothner.
|
||||||
Copyright 1992-2018 Free Software Foundation, Inc.
|
Copyright 1992-2020 Free Software Foundation, Inc.
|
||||||
|
|
||||||
This is free software; see the source for copying conditions. There is NO
|
This is free software; see the source for copying conditions. There is NO
|
||||||
warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE."
|
warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE."
|
||||||
|
@ -96,13 +96,14 @@ fi
|
||||||
|
|
||||||
tmp=
|
tmp=
|
||||||
# shellcheck disable=SC2172
|
# shellcheck disable=SC2172
|
||||||
trap 'test -z "$tmp" || rm -fr "$tmp"' 1 2 13 15
|
trap 'test -z "$tmp" || rm -fr "$tmp"' 0 1 2 13 15
|
||||||
trap 'exitcode=$?; test -z "$tmp" || rm -fr "$tmp"; exit $exitcode' 0
|
|
||||||
|
|
||||||
set_cc_for_build() {
|
set_cc_for_build() {
|
||||||
|
# prevent multiple calls if $tmp is already set
|
||||||
|
test "$tmp" && return 0
|
||||||
: "${TMPDIR=/tmp}"
|
: "${TMPDIR=/tmp}"
|
||||||
# shellcheck disable=SC2039
|
# shellcheck disable=SC2039
|
||||||
{ tmp=`(umask 077 && mktemp -d "$TMPDIR/cgXXXXXX") 2>/dev/null` && test -n "$tmp" && test -d "$tmp" ; } ||
|
{ tmp=$( (umask 077 && mktemp -d "$TMPDIR/cgXXXXXX") 2>/dev/null) && test -n "$tmp" && test -d "$tmp" ; } ||
|
||||||
{ test -n "$RANDOM" && tmp=$TMPDIR/cg$$-$RANDOM && (umask 077 && mkdir "$tmp" 2>/dev/null) ; } ||
|
{ test -n "$RANDOM" && tmp=$TMPDIR/cg$$-$RANDOM && (umask 077 && mkdir "$tmp" 2>/dev/null) ; } ||
|
||||||
{ tmp=$TMPDIR/cg-$$ && (umask 077 && mkdir "$tmp" 2>/dev/null) && echo "Warning: creating insecure temp directory" >&2 ; } ||
|
{ tmp=$TMPDIR/cg-$$ && (umask 077 && mkdir "$tmp" 2>/dev/null) && echo "Warning: creating insecure temp directory" >&2 ; } ||
|
||||||
{ echo "$me: cannot create a temporary directory in $TMPDIR" >&2 ; exit 1 ; }
|
{ echo "$me: cannot create a temporary directory in $TMPDIR" >&2 ; exit 1 ; }
|
||||||
|
@ -130,16 +131,14 @@ if test -f /.attbin/uname ; then
|
||||||
PATH=$PATH:/.attbin ; export PATH
|
PATH=$PATH:/.attbin ; export PATH
|
||||||
fi
|
fi
|
||||||
|
|
||||||
UNAME_MACHINE=`(uname -m) 2>/dev/null` || UNAME_MACHINE=unknown
|
UNAME_MACHINE=$( (uname -m) 2>/dev/null) || UNAME_MACHINE=unknown
|
||||||
UNAME_RELEASE=`(uname -r) 2>/dev/null` || UNAME_RELEASE=unknown
|
UNAME_RELEASE=$( (uname -r) 2>/dev/null) || UNAME_RELEASE=unknown
|
||||||
UNAME_SYSTEM=`(uname -s) 2>/dev/null` || UNAME_SYSTEM=unknown
|
UNAME_SYSTEM=$( (uname -s) 2>/dev/null) || UNAME_SYSTEM=unknown
|
||||||
UNAME_VERSION=`(uname -v) 2>/dev/null` || UNAME_VERSION=unknown
|
UNAME_VERSION=$( (uname -v) 2>/dev/null) || UNAME_VERSION=unknown
|
||||||
|
|
||||||
case "$UNAME_SYSTEM" in
|
case "$UNAME_SYSTEM" in
|
||||||
Linux|GNU|GNU/*)
|
Linux|GNU|GNU/*)
|
||||||
# If the system lacks a compiler, then just pick glibc.
|
LIBC=unknown
|
||||||
# We could probably try harder.
|
|
||||||
LIBC=gnu
|
|
||||||
|
|
||||||
set_cc_for_build
|
set_cc_for_build
|
||||||
cat <<-EOF > "$dummy.c"
|
cat <<-EOF > "$dummy.c"
|
||||||
|
@ -148,18 +147,30 @@ Linux|GNU|GNU/*)
|
||||||
LIBC=uclibc
|
LIBC=uclibc
|
||||||
#elif defined(__dietlibc__)
|
#elif defined(__dietlibc__)
|
||||||
LIBC=dietlibc
|
LIBC=dietlibc
|
||||||
#else
|
#elif defined(__GLIBC__)
|
||||||
LIBC=gnu
|
LIBC=gnu
|
||||||
|
#else
|
||||||
|
#include <stdarg.h>
|
||||||
|
/* First heuristic to detect musl libc. */
|
||||||
|
#ifdef __DEFINED_va_list
|
||||||
|
LIBC=musl
|
||||||
|
#endif
|
||||||
#endif
|
#endif
|
||||||
EOF
|
EOF
|
||||||
eval "`$CC_FOR_BUILD -E "$dummy.c" 2>/dev/null | grep '^LIBC' | sed 's, ,,g'`"
|
eval "$($CC_FOR_BUILD -E "$dummy.c" 2>/dev/null | grep '^LIBC' | sed 's, ,,g')"
|
||||||
|
|
||||||
# If ldd exists, use it to detect musl libc.
|
# Second heuristic to detect musl libc.
|
||||||
if command -v ldd >/dev/null && \
|
if [ "$LIBC" = unknown ] &&
|
||||||
ldd --version 2>&1 | grep -q ^musl
|
command -v ldd >/dev/null &&
|
||||||
then
|
ldd --version 2>&1 | grep -q ^musl; then
|
||||||
LIBC=musl
|
LIBC=musl
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
# If the system lacks a compiler, then just pick glibc.
|
||||||
|
# We could probably try harder.
|
||||||
|
if [ "$LIBC" = unknown ]; then
|
||||||
|
LIBC=gnu
|
||||||
|
fi
|
||||||
;;
|
;;
|
||||||
esac
|
esac
|
||||||
|
|
||||||
|
@ -178,19 +189,20 @@ case "$UNAME_MACHINE:$UNAME_SYSTEM:$UNAME_RELEASE:$UNAME_VERSION" in
|
||||||
# Note: NetBSD doesn't particularly care about the vendor
|
# Note: NetBSD doesn't particularly care about the vendor
|
||||||
# portion of the name. We always set it to "unknown".
|
# portion of the name. We always set it to "unknown".
|
||||||
sysctl="sysctl -n hw.machine_arch"
|
sysctl="sysctl -n hw.machine_arch"
|
||||||
UNAME_MACHINE_ARCH=`(uname -p 2>/dev/null || \
|
UNAME_MACHINE_ARCH=$( (uname -p 2>/dev/null || \
|
||||||
"/sbin/$sysctl" 2>/dev/null || \
|
"/sbin/$sysctl" 2>/dev/null || \
|
||||||
"/usr/sbin/$sysctl" 2>/dev/null || \
|
"/usr/sbin/$sysctl" 2>/dev/null || \
|
||||||
echo unknown)`
|
echo unknown))
|
||||||
case "$UNAME_MACHINE_ARCH" in
|
case "$UNAME_MACHINE_ARCH" in
|
||||||
|
aarch64eb) machine=aarch64_be-unknown ;;
|
||||||
armeb) machine=armeb-unknown ;;
|
armeb) machine=armeb-unknown ;;
|
||||||
arm*) machine=arm-unknown ;;
|
arm*) machine=arm-unknown ;;
|
||||||
sh3el) machine=shl-unknown ;;
|
sh3el) machine=shl-unknown ;;
|
||||||
sh3eb) machine=sh-unknown ;;
|
sh3eb) machine=sh-unknown ;;
|
||||||
sh5el) machine=sh5le-unknown ;;
|
sh5el) machine=sh5le-unknown ;;
|
||||||
earmv*)
|
earmv*)
|
||||||
arch=`echo "$UNAME_MACHINE_ARCH" | sed -e 's,^e\(armv[0-9]\).*$,\1,'`
|
arch=$(echo "$UNAME_MACHINE_ARCH" | sed -e 's,^e\(armv[0-9]\).*$,\1,')
|
||||||
endian=`echo "$UNAME_MACHINE_ARCH" | sed -ne 's,^.*\(eb\)$,\1,p'`
|
endian=$(echo "$UNAME_MACHINE_ARCH" | sed -ne 's,^.*\(eb\)$,\1,p')
|
||||||
machine="${arch}${endian}"-unknown
|
machine="${arch}${endian}"-unknown
|
||||||
;;
|
;;
|
||||||
*) machine="$UNAME_MACHINE_ARCH"-unknown ;;
|
*) machine="$UNAME_MACHINE_ARCH"-unknown ;;
|
||||||
|
@ -221,7 +233,7 @@ case "$UNAME_MACHINE:$UNAME_SYSTEM:$UNAME_RELEASE:$UNAME_VERSION" in
|
||||||
case "$UNAME_MACHINE_ARCH" in
|
case "$UNAME_MACHINE_ARCH" in
|
||||||
earm*)
|
earm*)
|
||||||
expr='s/^earmv[0-9]/-eabi/;s/eb$//'
|
expr='s/^earmv[0-9]/-eabi/;s/eb$//'
|
||||||
abi=`echo "$UNAME_MACHINE_ARCH" | sed -e "$expr"`
|
abi=$(echo "$UNAME_MACHINE_ARCH" | sed -e "$expr")
|
||||||
;;
|
;;
|
||||||
esac
|
esac
|
||||||
# The OS release
|
# The OS release
|
||||||
|
@ -234,7 +246,7 @@ case "$UNAME_MACHINE:$UNAME_SYSTEM:$UNAME_RELEASE:$UNAME_VERSION" in
|
||||||
release='-gnu'
|
release='-gnu'
|
||||||
;;
|
;;
|
||||||
*)
|
*)
|
||||||
release=`echo "$UNAME_RELEASE" | sed -e 's/[-_].*//' | cut -d. -f1,2`
|
release=$(echo "$UNAME_RELEASE" | sed -e 's/[-_].*//' | cut -d. -f1,2)
|
||||||
;;
|
;;
|
||||||
esac
|
esac
|
||||||
# Since CPU_TYPE-MANUFACTURER-KERNEL-OPERATING_SYSTEM:
|
# Since CPU_TYPE-MANUFACTURER-KERNEL-OPERATING_SYSTEM:
|
||||||
|
@ -243,15 +255,15 @@ case "$UNAME_MACHINE:$UNAME_SYSTEM:$UNAME_RELEASE:$UNAME_VERSION" in
|
||||||
echo "$machine-${os}${release}${abi-}"
|
echo "$machine-${os}${release}${abi-}"
|
||||||
exit ;;
|
exit ;;
|
||||||
*:Bitrig:*:*)
|
*:Bitrig:*:*)
|
||||||
UNAME_MACHINE_ARCH=`arch | sed 's/Bitrig.//'`
|
UNAME_MACHINE_ARCH=$(arch | sed 's/Bitrig.//')
|
||||||
echo "$UNAME_MACHINE_ARCH"-unknown-bitrig"$UNAME_RELEASE"
|
echo "$UNAME_MACHINE_ARCH"-unknown-bitrig"$UNAME_RELEASE"
|
||||||
exit ;;
|
exit ;;
|
||||||
*:OpenBSD:*:*)
|
*:OpenBSD:*:*)
|
||||||
UNAME_MACHINE_ARCH=`arch | sed 's/OpenBSD.//'`
|
UNAME_MACHINE_ARCH=$(arch | sed 's/OpenBSD.//')
|
||||||
echo "$UNAME_MACHINE_ARCH"-unknown-openbsd"$UNAME_RELEASE"
|
echo "$UNAME_MACHINE_ARCH"-unknown-openbsd"$UNAME_RELEASE"
|
||||||
exit ;;
|
exit ;;
|
||||||
*:LibertyBSD:*:*)
|
*:LibertyBSD:*:*)
|
||||||
UNAME_MACHINE_ARCH=`arch | sed 's/^.*BSD\.//'`
|
UNAME_MACHINE_ARCH=$(arch | sed 's/^.*BSD\.//')
|
||||||
echo "$UNAME_MACHINE_ARCH"-unknown-libertybsd"$UNAME_RELEASE"
|
echo "$UNAME_MACHINE_ARCH"-unknown-libertybsd"$UNAME_RELEASE"
|
||||||
exit ;;
|
exit ;;
|
||||||
*:MidnightBSD:*:*)
|
*:MidnightBSD:*:*)
|
||||||
|
@ -263,6 +275,9 @@ case "$UNAME_MACHINE:$UNAME_SYSTEM:$UNAME_RELEASE:$UNAME_VERSION" in
|
||||||
*:SolidBSD:*:*)
|
*:SolidBSD:*:*)
|
||||||
echo "$UNAME_MACHINE"-unknown-solidbsd"$UNAME_RELEASE"
|
echo "$UNAME_MACHINE"-unknown-solidbsd"$UNAME_RELEASE"
|
||||||
exit ;;
|
exit ;;
|
||||||
|
*:OS108:*:*)
|
||||||
|
echo "$UNAME_MACHINE"-unknown-os108_"$UNAME_RELEASE"
|
||||||
|
exit ;;
|
||||||
macppc:MirBSD:*:*)
|
macppc:MirBSD:*:*)
|
||||||
echo powerpc-unknown-mirbsd"$UNAME_RELEASE"
|
echo powerpc-unknown-mirbsd"$UNAME_RELEASE"
|
||||||
exit ;;
|
exit ;;
|
||||||
|
@ -272,6 +287,9 @@ case "$UNAME_MACHINE:$UNAME_SYSTEM:$UNAME_RELEASE:$UNAME_VERSION" in
|
||||||
*:Sortix:*:*)
|
*:Sortix:*:*)
|
||||||
echo "$UNAME_MACHINE"-unknown-sortix
|
echo "$UNAME_MACHINE"-unknown-sortix
|
||||||
exit ;;
|
exit ;;
|
||||||
|
*:Twizzler:*:*)
|
||||||
|
echo "$UNAME_MACHINE"-unknown-twizzler
|
||||||
|
exit ;;
|
||||||
*:Redox:*:*)
|
*:Redox:*:*)
|
||||||
echo "$UNAME_MACHINE"-unknown-redox
|
echo "$UNAME_MACHINE"-unknown-redox
|
||||||
exit ;;
|
exit ;;
|
||||||
|
@ -281,17 +299,17 @@ case "$UNAME_MACHINE:$UNAME_SYSTEM:$UNAME_RELEASE:$UNAME_VERSION" in
|
||||||
alpha:OSF1:*:*)
|
alpha:OSF1:*:*)
|
||||||
case $UNAME_RELEASE in
|
case $UNAME_RELEASE in
|
||||||
*4.0)
|
*4.0)
|
||||||
UNAME_RELEASE=`/usr/sbin/sizer -v | awk '{print $3}'`
|
UNAME_RELEASE=$(/usr/sbin/sizer -v | awk '{print $3}')
|
||||||
;;
|
;;
|
||||||
*5.*)
|
*5.*)
|
||||||
UNAME_RELEASE=`/usr/sbin/sizer -v | awk '{print $4}'`
|
UNAME_RELEASE=$(/usr/sbin/sizer -v | awk '{print $4}')
|
||||||
;;
|
;;
|
||||||
esac
|
esac
|
||||||
# According to Compaq, /usr/sbin/psrinfo has been available on
|
# According to Compaq, /usr/sbin/psrinfo has been available on
|
||||||
# OSF/1 and Tru64 systems produced since 1995. I hope that
|
# OSF/1 and Tru64 systems produced since 1995. I hope that
|
||||||
# covers most systems running today. This code pipes the CPU
|
# covers most systems running today. This code pipes the CPU
|
||||||
# types through head -n 1, so we only detect the type of CPU 0.
|
# types through head -n 1, so we only detect the type of CPU 0.
|
||||||
ALPHA_CPU_TYPE=`/usr/sbin/psrinfo -v | sed -n -e 's/^ The alpha \(.*\) processor.*$/\1/p' | head -n 1`
|
ALPHA_CPU_TYPE=$(/usr/sbin/psrinfo -v | sed -n -e 's/^ The alpha \(.*\) processor.*$/\1/p' | head -n 1)
|
||||||
case "$ALPHA_CPU_TYPE" in
|
case "$ALPHA_CPU_TYPE" in
|
||||||
"EV4 (21064)")
|
"EV4 (21064)")
|
||||||
UNAME_MACHINE=alpha ;;
|
UNAME_MACHINE=alpha ;;
|
||||||
|
@ -329,7 +347,7 @@ case "$UNAME_MACHINE:$UNAME_SYSTEM:$UNAME_RELEASE:$UNAME_VERSION" in
|
||||||
# A Tn.n version is a released field test version.
|
# A Tn.n version is a released field test version.
|
||||||
# A Xn.n version is an unreleased experimental baselevel.
|
# A Xn.n version is an unreleased experimental baselevel.
|
||||||
# 1.2 uses "1.2" for uname -r.
|
# 1.2 uses "1.2" for uname -r.
|
||||||
echo "$UNAME_MACHINE"-dec-osf"`echo "$UNAME_RELEASE" | sed -e 's/^[PVTX]//' | tr ABCDEFGHIJKLMNOPQRSTUVWXYZ abcdefghijklmnopqrstuvwxyz`"
|
echo "$UNAME_MACHINE"-dec-osf"$(echo "$UNAME_RELEASE" | sed -e 's/^[PVTX]//' | tr ABCDEFGHIJKLMNOPQRSTUVWXYZ abcdefghijklmnopqrstuvwxyz)"
|
||||||
# Reset EXIT trap before exiting to avoid spurious non-zero exit code.
|
# Reset EXIT trap before exiting to avoid spurious non-zero exit code.
|
||||||
exitcode=$?
|
exitcode=$?
|
||||||
trap '' 0
|
trap '' 0
|
||||||
|
@ -363,7 +381,7 @@ case "$UNAME_MACHINE:$UNAME_SYSTEM:$UNAME_RELEASE:$UNAME_VERSION" in
|
||||||
exit ;;
|
exit ;;
|
||||||
Pyramid*:OSx*:*:* | MIS*:OSx*:*:* | MIS*:SMP_DC-OSx*:*:*)
|
Pyramid*:OSx*:*:* | MIS*:OSx*:*:* | MIS*:SMP_DC-OSx*:*:*)
|
||||||
# akee@wpdis03.wpafb.af.mil (Earle F. Ake) contributed MIS and NILE.
|
# akee@wpdis03.wpafb.af.mil (Earle F. Ake) contributed MIS and NILE.
|
||||||
if test "`(/bin/universe) 2>/dev/null`" = att ; then
|
if test "$( (/bin/universe) 2>/dev/null)" = att ; then
|
||||||
echo pyramid-pyramid-sysv3
|
echo pyramid-pyramid-sysv3
|
||||||
else
|
else
|
||||||
echo pyramid-pyramid-bsd
|
echo pyramid-pyramid-bsd
|
||||||
|
@ -376,54 +394,59 @@ case "$UNAME_MACHINE:$UNAME_SYSTEM:$UNAME_RELEASE:$UNAME_VERSION" in
|
||||||
echo sparc-icl-nx6
|
echo sparc-icl-nx6
|
||||||
exit ;;
|
exit ;;
|
||||||
DRS?6000:UNIX_SV:4.2*:7* | DRS?6000:isis:4.2*:7*)
|
DRS?6000:UNIX_SV:4.2*:7* | DRS?6000:isis:4.2*:7*)
|
||||||
case `/usr/bin/uname -p` in
|
case $(/usr/bin/uname -p) in
|
||||||
sparc) echo sparc-icl-nx7; exit ;;
|
sparc) echo sparc-icl-nx7; exit ;;
|
||||||
esac ;;
|
esac ;;
|
||||||
s390x:SunOS:*:*)
|
s390x:SunOS:*:*)
|
||||||
echo "$UNAME_MACHINE"-ibm-solaris2"`echo "$UNAME_RELEASE" | sed -e 's/[^.]*//'`"
|
echo "$UNAME_MACHINE"-ibm-solaris2"$(echo "$UNAME_RELEASE" | sed -e 's/[^.]*//')"
|
||||||
exit ;;
|
exit ;;
|
||||||
sun4H:SunOS:5.*:*)
|
sun4H:SunOS:5.*:*)
|
||||||
echo sparc-hal-solaris2"`echo "$UNAME_RELEASE"|sed -e 's/[^.]*//'`"
|
echo sparc-hal-solaris2"$(echo "$UNAME_RELEASE"|sed -e 's/[^.]*//')"
|
||||||
exit ;;
|
exit ;;
|
||||||
sun4*:SunOS:5.*:* | tadpole*:SunOS:5.*:*)
|
sun4*:SunOS:5.*:* | tadpole*:SunOS:5.*:*)
|
||||||
echo sparc-sun-solaris2"`echo "$UNAME_RELEASE" | sed -e 's/[^.]*//'`"
|
echo sparc-sun-solaris2"$(echo "$UNAME_RELEASE" | sed -e 's/[^.]*//')"
|
||||||
exit ;;
|
exit ;;
|
||||||
i86pc:AuroraUX:5.*:* | i86xen:AuroraUX:5.*:*)
|
i86pc:AuroraUX:5.*:* | i86xen:AuroraUX:5.*:*)
|
||||||
echo i386-pc-auroraux"$UNAME_RELEASE"
|
echo i386-pc-auroraux"$UNAME_RELEASE"
|
||||||
exit ;;
|
exit ;;
|
||||||
i86pc:SunOS:5.*:* | i86xen:SunOS:5.*:*)
|
i86pc:SunOS:5.*:* | i86xen:SunOS:5.*:*)
|
||||||
UNAME_REL="`echo "$UNAME_RELEASE" | sed -e 's/[^.]*//'`"
|
set_cc_for_build
|
||||||
case `isainfo -b` in
|
SUN_ARCH=i386
|
||||||
32)
|
# If there is a compiler, see if it is configured for 64-bit objects.
|
||||||
echo i386-pc-solaris2"$UNAME_REL"
|
# Note that the Sun cc does not turn __LP64__ into 1 like gcc does.
|
||||||
;;
|
# This test works for both compilers.
|
||||||
64)
|
if test "$CC_FOR_BUILD" != no_compiler_found; then
|
||||||
echo x86_64-pc-solaris2"$UNAME_REL"
|
if (echo '#ifdef __amd64'; echo IS_64BIT_ARCH; echo '#endif') | \
|
||||||
;;
|
(CCOPTS="" $CC_FOR_BUILD -E - 2>/dev/null) | \
|
||||||
esac
|
grep IS_64BIT_ARCH >/dev/null
|
||||||
|
then
|
||||||
|
SUN_ARCH=x86_64
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
echo "$SUN_ARCH"-pc-solaris2"$(echo "$UNAME_RELEASE"|sed -e 's/[^.]*//')"
|
||||||
exit ;;
|
exit ;;
|
||||||
sun4*:SunOS:6*:*)
|
sun4*:SunOS:6*:*)
|
||||||
# According to config.sub, this is the proper way to canonicalize
|
# According to config.sub, this is the proper way to canonicalize
|
||||||
# SunOS6. Hard to guess exactly what SunOS6 will be like, but
|
# SunOS6. Hard to guess exactly what SunOS6 will be like, but
|
||||||
# it's likely to be more like Solaris than SunOS4.
|
# it's likely to be more like Solaris than SunOS4.
|
||||||
echo sparc-sun-solaris3"`echo "$UNAME_RELEASE"|sed -e 's/[^.]*//'`"
|
echo sparc-sun-solaris3"$(echo "$UNAME_RELEASE"|sed -e 's/[^.]*//')"
|
||||||
exit ;;
|
exit ;;
|
||||||
sun4*:SunOS:*:*)
|
sun4*:SunOS:*:*)
|
||||||
case "`/usr/bin/arch -k`" in
|
case "$(/usr/bin/arch -k)" in
|
||||||
Series*|S4*)
|
Series*|S4*)
|
||||||
UNAME_RELEASE=`uname -v`
|
UNAME_RELEASE=$(uname -v)
|
||||||
;;
|
;;
|
||||||
esac
|
esac
|
||||||
# Japanese Language versions have a version number like `4.1.3-JL'.
|
# Japanese Language versions have a version number like `4.1.3-JL'.
|
||||||
echo sparc-sun-sunos"`echo "$UNAME_RELEASE"|sed -e 's/-/_/'`"
|
echo sparc-sun-sunos"$(echo "$UNAME_RELEASE"|sed -e 's/-/_/')"
|
||||||
exit ;;
|
exit ;;
|
||||||
sun3*:SunOS:*:*)
|
sun3*:SunOS:*:*)
|
||||||
echo m68k-sun-sunos"$UNAME_RELEASE"
|
echo m68k-sun-sunos"$UNAME_RELEASE"
|
||||||
exit ;;
|
exit ;;
|
||||||
sun*:*:4.2BSD:*)
|
sun*:*:4.2BSD:*)
|
||||||
UNAME_RELEASE=`(sed 1q /etc/motd | awk '{print substr($5,1,3)}') 2>/dev/null`
|
UNAME_RELEASE=$( (sed 1q /etc/motd | awk '{print substr($5,1,3)}') 2>/dev/null)
|
||||||
test "x$UNAME_RELEASE" = x && UNAME_RELEASE=3
|
test "x$UNAME_RELEASE" = x && UNAME_RELEASE=3
|
||||||
case "`/bin/arch`" in
|
case "$(/bin/arch)" in
|
||||||
sun3)
|
sun3)
|
||||||
echo m68k-sun-sunos"$UNAME_RELEASE"
|
echo m68k-sun-sunos"$UNAME_RELEASE"
|
||||||
;;
|
;;
|
||||||
|
@ -503,8 +526,8 @@ case "$UNAME_MACHINE:$UNAME_SYSTEM:$UNAME_RELEASE:$UNAME_VERSION" in
|
||||||
}
|
}
|
||||||
EOF
|
EOF
|
||||||
$CC_FOR_BUILD -o "$dummy" "$dummy.c" &&
|
$CC_FOR_BUILD -o "$dummy" "$dummy.c" &&
|
||||||
dummyarg=`echo "$UNAME_RELEASE" | sed -n 's/\([0-9]*\).*/\1/p'` &&
|
dummyarg=$(echo "$UNAME_RELEASE" | sed -n 's/\([0-9]*\).*/\1/p') &&
|
||||||
SYSTEM_NAME=`"$dummy" "$dummyarg"` &&
|
SYSTEM_NAME=$("$dummy" "$dummyarg") &&
|
||||||
{ echo "$SYSTEM_NAME"; exit; }
|
{ echo "$SYSTEM_NAME"; exit; }
|
||||||
echo mips-mips-riscos"$UNAME_RELEASE"
|
echo mips-mips-riscos"$UNAME_RELEASE"
|
||||||
exit ;;
|
exit ;;
|
||||||
|
@ -531,11 +554,11 @@ EOF
|
||||||
exit ;;
|
exit ;;
|
||||||
AViiON:dgux:*:*)
|
AViiON:dgux:*:*)
|
||||||
# DG/UX returns AViiON for all architectures
|
# DG/UX returns AViiON for all architectures
|
||||||
UNAME_PROCESSOR=`/usr/bin/uname -p`
|
UNAME_PROCESSOR=$(/usr/bin/uname -p)
|
||||||
if [ "$UNAME_PROCESSOR" = mc88100 ] || [ "$UNAME_PROCESSOR" = mc88110 ]
|
if test "$UNAME_PROCESSOR" = mc88100 || test "$UNAME_PROCESSOR" = mc88110
|
||||||
then
|
then
|
||||||
if [ "$TARGET_BINARY_INTERFACE"x = m88kdguxelfx ] || \
|
if test "$TARGET_BINARY_INTERFACE"x = m88kdguxelfx || \
|
||||||
[ "$TARGET_BINARY_INTERFACE"x = x ]
|
test "$TARGET_BINARY_INTERFACE"x = x
|
||||||
then
|
then
|
||||||
echo m88k-dg-dgux"$UNAME_RELEASE"
|
echo m88k-dg-dgux"$UNAME_RELEASE"
|
||||||
else
|
else
|
||||||
|
@ -559,17 +582,17 @@ EOF
|
||||||
echo m68k-tektronix-bsd
|
echo m68k-tektronix-bsd
|
||||||
exit ;;
|
exit ;;
|
||||||
*:IRIX*:*:*)
|
*:IRIX*:*:*)
|
||||||
echo mips-sgi-irix"`echo "$UNAME_RELEASE"|sed -e 's/-/_/g'`"
|
echo mips-sgi-irix"$(echo "$UNAME_RELEASE"|sed -e 's/-/_/g')"
|
||||||
exit ;;
|
exit ;;
|
||||||
????????:AIX?:[12].1:2) # AIX 2.2.1 or AIX 2.1.1 is RT/PC AIX.
|
????????:AIX?:[12].1:2) # AIX 2.2.1 or AIX 2.1.1 is RT/PC AIX.
|
||||||
echo romp-ibm-aix # uname -m gives an 8 hex-code CPU id
|
echo romp-ibm-aix # uname -m gives an 8 hex-code CPU id
|
||||||
exit ;; # Note that: echo "'`uname -s`'" gives 'AIX '
|
exit ;; # Note that: echo "'$(uname -s)'" gives 'AIX '
|
||||||
i*86:AIX:*:*)
|
i*86:AIX:*:*)
|
||||||
echo i386-ibm-aix
|
echo i386-ibm-aix
|
||||||
exit ;;
|
exit ;;
|
||||||
ia64:AIX:*:*)
|
ia64:AIX:*:*)
|
||||||
if [ -x /usr/bin/oslevel ] ; then
|
if test -x /usr/bin/oslevel ; then
|
||||||
IBM_REV=`/usr/bin/oslevel`
|
IBM_REV=$(/usr/bin/oslevel)
|
||||||
else
|
else
|
||||||
IBM_REV="$UNAME_VERSION.$UNAME_RELEASE"
|
IBM_REV="$UNAME_VERSION.$UNAME_RELEASE"
|
||||||
fi
|
fi
|
||||||
|
@ -589,7 +612,7 @@ EOF
|
||||||
exit(0);
|
exit(0);
|
||||||
}
|
}
|
||||||
EOF
|
EOF
|
||||||
if $CC_FOR_BUILD -o "$dummy" "$dummy.c" && SYSTEM_NAME=`"$dummy"`
|
if $CC_FOR_BUILD -o "$dummy" "$dummy.c" && SYSTEM_NAME=$("$dummy")
|
||||||
then
|
then
|
||||||
echo "$SYSTEM_NAME"
|
echo "$SYSTEM_NAME"
|
||||||
else
|
else
|
||||||
|
@ -602,15 +625,15 @@ EOF
|
||||||
fi
|
fi
|
||||||
exit ;;
|
exit ;;
|
||||||
*:AIX:*:[4567])
|
*:AIX:*:[4567])
|
||||||
IBM_CPU_ID=`/usr/sbin/lsdev -C -c processor -S available | sed 1q | awk '{ print $1 }'`
|
IBM_CPU_ID=$(/usr/sbin/lsdev -C -c processor -S available | sed 1q | awk '{ print $1 }')
|
||||||
if /usr/sbin/lsattr -El "$IBM_CPU_ID" | grep ' POWER' >/dev/null 2>&1; then
|
if /usr/sbin/lsattr -El "$IBM_CPU_ID" | grep ' POWER' >/dev/null 2>&1; then
|
||||||
IBM_ARCH=rs6000
|
IBM_ARCH=rs6000
|
||||||
else
|
else
|
||||||
IBM_ARCH=powerpc
|
IBM_ARCH=powerpc
|
||||||
fi
|
fi
|
||||||
if [ -x /usr/bin/lslpp ] ; then
|
if test -x /usr/bin/lslpp ; then
|
||||||
IBM_REV=`/usr/bin/lslpp -Lqc bos.rte.libc |
|
IBM_REV=$(/usr/bin/lslpp -Lqc bos.rte.libc |
|
||||||
awk -F: '{ print $3 }' | sed s/[0-9]*$/0/`
|
awk -F: '{ print $3 }' | sed s/[0-9]*$/0/)
|
||||||
else
|
else
|
||||||
IBM_REV="$UNAME_VERSION.$UNAME_RELEASE"
|
IBM_REV="$UNAME_VERSION.$UNAME_RELEASE"
|
||||||
fi
|
fi
|
||||||
|
@ -638,14 +661,14 @@ EOF
|
||||||
echo m68k-hp-bsd4.4
|
echo m68k-hp-bsd4.4
|
||||||
exit ;;
|
exit ;;
|
||||||
9000/[34678]??:HP-UX:*:*)
|
9000/[34678]??:HP-UX:*:*)
|
||||||
HPUX_REV=`echo "$UNAME_RELEASE"|sed -e 's/[^.]*.[0B]*//'`
|
HPUX_REV=$(echo "$UNAME_RELEASE"|sed -e 's/[^.]*.[0B]*//')
|
||||||
case "$UNAME_MACHINE" in
|
case "$UNAME_MACHINE" in
|
||||||
9000/31?) HP_ARCH=m68000 ;;
|
9000/31?) HP_ARCH=m68000 ;;
|
||||||
9000/[34]??) HP_ARCH=m68k ;;
|
9000/[34]??) HP_ARCH=m68k ;;
|
||||||
9000/[678][0-9][0-9])
|
9000/[678][0-9][0-9])
|
||||||
if [ -x /usr/bin/getconf ]; then
|
if test -x /usr/bin/getconf; then
|
||||||
sc_cpu_version=`/usr/bin/getconf SC_CPU_VERSION 2>/dev/null`
|
sc_cpu_version=$(/usr/bin/getconf SC_CPU_VERSION 2>/dev/null)
|
||||||
sc_kernel_bits=`/usr/bin/getconf SC_KERNEL_BITS 2>/dev/null`
|
sc_kernel_bits=$(/usr/bin/getconf SC_KERNEL_BITS 2>/dev/null)
|
||||||
case "$sc_cpu_version" in
|
case "$sc_cpu_version" in
|
||||||
523) HP_ARCH=hppa1.0 ;; # CPU_PA_RISC1_0
|
523) HP_ARCH=hppa1.0 ;; # CPU_PA_RISC1_0
|
||||||
528) HP_ARCH=hppa1.1 ;; # CPU_PA_RISC1_1
|
528) HP_ARCH=hppa1.1 ;; # CPU_PA_RISC1_1
|
||||||
|
@ -657,7 +680,7 @@ EOF
|
||||||
esac ;;
|
esac ;;
|
||||||
esac
|
esac
|
||||||
fi
|
fi
|
||||||
if [ "$HP_ARCH" = "" ]; then
|
if test "$HP_ARCH" = ""; then
|
||||||
set_cc_for_build
|
set_cc_for_build
|
||||||
sed 's/^ //' << EOF > "$dummy.c"
|
sed 's/^ //' << EOF > "$dummy.c"
|
||||||
|
|
||||||
|
@ -692,11 +715,11 @@ EOF
|
||||||
exit (0);
|
exit (0);
|
||||||
}
|
}
|
||||||
EOF
|
EOF
|
||||||
(CCOPTS="" $CC_FOR_BUILD -o "$dummy" "$dummy.c" 2>/dev/null) && HP_ARCH=`"$dummy"`
|
(CCOPTS="" $CC_FOR_BUILD -o "$dummy" "$dummy.c" 2>/dev/null) && HP_ARCH=$("$dummy")
|
||||||
test -z "$HP_ARCH" && HP_ARCH=hppa
|
test -z "$HP_ARCH" && HP_ARCH=hppa
|
||||||
fi ;;
|
fi ;;
|
||||||
esac
|
esac
|
||||||
if [ "$HP_ARCH" = hppa2.0w ]
|
if test "$HP_ARCH" = hppa2.0w
|
||||||
then
|
then
|
||||||
set_cc_for_build
|
set_cc_for_build
|
||||||
|
|
||||||
|
@ -720,7 +743,7 @@ EOF
|
||||||
echo "$HP_ARCH"-hp-hpux"$HPUX_REV"
|
echo "$HP_ARCH"-hp-hpux"$HPUX_REV"
|
||||||
exit ;;
|
exit ;;
|
||||||
ia64:HP-UX:*:*)
|
ia64:HP-UX:*:*)
|
||||||
HPUX_REV=`echo "$UNAME_RELEASE"|sed -e 's/[^.]*.[0B]*//'`
|
HPUX_REV=$(echo "$UNAME_RELEASE"|sed -e 's/[^.]*.[0B]*//')
|
||||||
echo ia64-hp-hpux"$HPUX_REV"
|
echo ia64-hp-hpux"$HPUX_REV"
|
||||||
exit ;;
|
exit ;;
|
||||||
3050*:HI-UX:*:*)
|
3050*:HI-UX:*:*)
|
||||||
|
@ -750,7 +773,7 @@ EOF
|
||||||
exit (0);
|
exit (0);
|
||||||
}
|
}
|
||||||
EOF
|
EOF
|
||||||
$CC_FOR_BUILD -o "$dummy" "$dummy.c" && SYSTEM_NAME=`"$dummy"` &&
|
$CC_FOR_BUILD -o "$dummy" "$dummy.c" && SYSTEM_NAME=$("$dummy") &&
|
||||||
{ echo "$SYSTEM_NAME"; exit; }
|
{ echo "$SYSTEM_NAME"; exit; }
|
||||||
echo unknown-hitachi-hiuxwe2
|
echo unknown-hitachi-hiuxwe2
|
||||||
exit ;;
|
exit ;;
|
||||||
|
@ -770,7 +793,7 @@ EOF
|
||||||
echo hppa1.0-hp-osf
|
echo hppa1.0-hp-osf
|
||||||
exit ;;
|
exit ;;
|
||||||
i*86:OSF1:*:*)
|
i*86:OSF1:*:*)
|
||||||
if [ -x /usr/sbin/sysversion ] ; then
|
if test -x /usr/sbin/sysversion ; then
|
||||||
echo "$UNAME_MACHINE"-unknown-osf1mk
|
echo "$UNAME_MACHINE"-unknown-osf1mk
|
||||||
else
|
else
|
||||||
echo "$UNAME_MACHINE"-unknown-osf1
|
echo "$UNAME_MACHINE"-unknown-osf1
|
||||||
|
@ -819,14 +842,14 @@ EOF
|
||||||
echo craynv-cray-unicosmp"$UNAME_RELEASE" | sed -e 's/\.[^.]*$/.X/'
|
echo craynv-cray-unicosmp"$UNAME_RELEASE" | sed -e 's/\.[^.]*$/.X/'
|
||||||
exit ;;
|
exit ;;
|
||||||
F30[01]:UNIX_System_V:*:* | F700:UNIX_System_V:*:*)
|
F30[01]:UNIX_System_V:*:* | F700:UNIX_System_V:*:*)
|
||||||
FUJITSU_PROC=`uname -m | tr ABCDEFGHIJKLMNOPQRSTUVWXYZ abcdefghijklmnopqrstuvwxyz`
|
FUJITSU_PROC=$(uname -m | tr ABCDEFGHIJKLMNOPQRSTUVWXYZ abcdefghijklmnopqrstuvwxyz)
|
||||||
FUJITSU_SYS=`uname -p | tr ABCDEFGHIJKLMNOPQRSTUVWXYZ abcdefghijklmnopqrstuvwxyz | sed -e 's/\///'`
|
FUJITSU_SYS=$(uname -p | tr ABCDEFGHIJKLMNOPQRSTUVWXYZ abcdefghijklmnopqrstuvwxyz | sed -e 's/\///')
|
||||||
FUJITSU_REL=`echo "$UNAME_RELEASE" | sed -e 's/ /_/'`
|
FUJITSU_REL=$(echo "$UNAME_RELEASE" | sed -e 's/ /_/')
|
||||||
echo "${FUJITSU_PROC}-fujitsu-${FUJITSU_SYS}${FUJITSU_REL}"
|
echo "${FUJITSU_PROC}-fujitsu-${FUJITSU_SYS}${FUJITSU_REL}"
|
||||||
exit ;;
|
exit ;;
|
||||||
5000:UNIX_System_V:4.*:*)
|
5000:UNIX_System_V:4.*:*)
|
||||||
FUJITSU_SYS=`uname -p | tr ABCDEFGHIJKLMNOPQRSTUVWXYZ abcdefghijklmnopqrstuvwxyz | sed -e 's/\///'`
|
FUJITSU_SYS=$(uname -p | tr ABCDEFGHIJKLMNOPQRSTUVWXYZ abcdefghijklmnopqrstuvwxyz | sed -e 's/\///')
|
||||||
FUJITSU_REL=`echo "$UNAME_RELEASE" | tr ABCDEFGHIJKLMNOPQRSTUVWXYZ abcdefghijklmnopqrstuvwxyz | sed -e 's/ /_/'`
|
FUJITSU_REL=$(echo "$UNAME_RELEASE" | tr ABCDEFGHIJKLMNOPQRSTUVWXYZ abcdefghijklmnopqrstuvwxyz | sed -e 's/ /_/')
|
||||||
echo "sparc-fujitsu-${FUJITSU_SYS}${FUJITSU_REL}"
|
echo "sparc-fujitsu-${FUJITSU_SYS}${FUJITSU_REL}"
|
||||||
exit ;;
|
exit ;;
|
||||||
i*86:BSD/386:*:* | i*86:BSD/OS:*:* | *:Ascend\ Embedded/OS:*:*)
|
i*86:BSD/386:*:* | i*86:BSD/OS:*:* | *:Ascend\ Embedded/OS:*:*)
|
||||||
|
@ -838,26 +861,26 @@ EOF
|
||||||
*:BSD/OS:*:*)
|
*:BSD/OS:*:*)
|
||||||
echo "$UNAME_MACHINE"-unknown-bsdi"$UNAME_RELEASE"
|
echo "$UNAME_MACHINE"-unknown-bsdi"$UNAME_RELEASE"
|
||||||
exit ;;
|
exit ;;
|
||||||
arm*:FreeBSD:*:*)
|
arm:FreeBSD:*:*)
|
||||||
UNAME_PROCESSOR=`uname -p`
|
UNAME_PROCESSOR=$(uname -p)
|
||||||
set_cc_for_build
|
set_cc_for_build
|
||||||
if echo __ARM_PCS_VFP | $CC_FOR_BUILD -E - 2>/dev/null \
|
if echo __ARM_PCS_VFP | $CC_FOR_BUILD -E - 2>/dev/null \
|
||||||
| grep -q __ARM_PCS_VFP
|
| grep -q __ARM_PCS_VFP
|
||||||
then
|
then
|
||||||
echo "${UNAME_PROCESSOR}"-unknown-freebsd"`echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'`"-gnueabi
|
echo "${UNAME_PROCESSOR}"-unknown-freebsd"$(echo ${UNAME_RELEASE}|sed -e 's/[-(].*//')"-gnueabi
|
||||||
else
|
else
|
||||||
echo "${UNAME_PROCESSOR}"-unknown-freebsd"`echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'`"-gnueabihf
|
echo "${UNAME_PROCESSOR}"-unknown-freebsd"$(echo ${UNAME_RELEASE}|sed -e 's/[-(].*//')"-gnueabihf
|
||||||
fi
|
fi
|
||||||
exit ;;
|
exit ;;
|
||||||
*:FreeBSD:*:*)
|
*:FreeBSD:*:*)
|
||||||
UNAME_PROCESSOR=`/usr/bin/uname -p`
|
UNAME_PROCESSOR=$(/usr/bin/uname -p)
|
||||||
case "$UNAME_PROCESSOR" in
|
case "$UNAME_PROCESSOR" in
|
||||||
amd64)
|
amd64)
|
||||||
UNAME_PROCESSOR=x86_64 ;;
|
UNAME_PROCESSOR=x86_64 ;;
|
||||||
i386)
|
i386)
|
||||||
UNAME_PROCESSOR=i586 ;;
|
UNAME_PROCESSOR=i586 ;;
|
||||||
esac
|
esac
|
||||||
echo "$UNAME_PROCESSOR"-unknown-freebsd"`echo "$UNAME_RELEASE"|sed -e 's/[-(].*//'`"
|
echo "$UNAME_PROCESSOR"-unknown-freebsd"$(echo "$UNAME_RELEASE"|sed -e 's/[-(].*//')"
|
||||||
exit ;;
|
exit ;;
|
||||||
i*:CYGWIN*:*)
|
i*:CYGWIN*:*)
|
||||||
echo "$UNAME_MACHINE"-pc-cygwin
|
echo "$UNAME_MACHINE"-pc-cygwin
|
||||||
|
@ -890,18 +913,18 @@ EOF
|
||||||
echo "$UNAME_MACHINE"-pc-uwin
|
echo "$UNAME_MACHINE"-pc-uwin
|
||||||
exit ;;
|
exit ;;
|
||||||
amd64:CYGWIN*:*:* | x86_64:CYGWIN*:*:*)
|
amd64:CYGWIN*:*:* | x86_64:CYGWIN*:*:*)
|
||||||
echo x86_64-unknown-cygwin
|
echo x86_64-pc-cygwin
|
||||||
exit ;;
|
exit ;;
|
||||||
prep*:SunOS:5.*:*)
|
prep*:SunOS:5.*:*)
|
||||||
echo powerpcle-unknown-solaris2"`echo "$UNAME_RELEASE"|sed -e 's/[^.]*//'`"
|
echo powerpcle-unknown-solaris2"$(echo "$UNAME_RELEASE"|sed -e 's/[^.]*//')"
|
||||||
exit ;;
|
exit ;;
|
||||||
*:GNU:*:*)
|
*:GNU:*:*)
|
||||||
# the GNU system
|
# the GNU system
|
||||||
echo "`echo "$UNAME_MACHINE"|sed -e 's,[-/].*$,,'`-unknown-$LIBC`echo "$UNAME_RELEASE"|sed -e 's,/.*$,,'`"
|
echo "$(echo "$UNAME_MACHINE"|sed -e 's,[-/].*$,,')-unknown-$LIBC$(echo "$UNAME_RELEASE"|sed -e 's,/.*$,,')"
|
||||||
exit ;;
|
exit ;;
|
||||||
*:GNU/*:*:*)
|
*:GNU/*:*:*)
|
||||||
# other systems with GNU libc and userland
|
# other systems with GNU libc and userland
|
||||||
echo "$UNAME_MACHINE-unknown-`echo "$UNAME_SYSTEM" | sed 's,^[^/]*/,,' | tr "[:upper:]" "[:lower:]"``echo "$UNAME_RELEASE"|sed -e 's/[-(].*//'`-$LIBC"
|
echo "$UNAME_MACHINE-unknown-$(echo "$UNAME_SYSTEM" | sed 's,^[^/]*/,,' | tr "[:upper:]" "[:lower:]")$(echo "$UNAME_RELEASE"|sed -e 's/[-(].*//')-$LIBC"
|
||||||
exit ;;
|
exit ;;
|
||||||
*:Minix:*:*)
|
*:Minix:*:*)
|
||||||
echo "$UNAME_MACHINE"-unknown-minix
|
echo "$UNAME_MACHINE"-unknown-minix
|
||||||
|
@ -914,7 +937,7 @@ EOF
|
||||||
echo "$UNAME_MACHINE"-unknown-linux-"$LIBC"
|
echo "$UNAME_MACHINE"-unknown-linux-"$LIBC"
|
||||||
exit ;;
|
exit ;;
|
||||||
alpha:Linux:*:*)
|
alpha:Linux:*:*)
|
||||||
case `sed -n '/^cpu model/s/^.*: \(.*\)/\1/p' < /proc/cpuinfo` in
|
case $(sed -n '/^cpu model/s/^.*: \(.*\)/\1/p' /proc/cpuinfo 2>/dev/null) in
|
||||||
EV5) UNAME_MACHINE=alphaev5 ;;
|
EV5) UNAME_MACHINE=alphaev5 ;;
|
||||||
EV56) UNAME_MACHINE=alphaev56 ;;
|
EV56) UNAME_MACHINE=alphaev56 ;;
|
||||||
PCA56) UNAME_MACHINE=alphapca56 ;;
|
PCA56) UNAME_MACHINE=alphapca56 ;;
|
||||||
|
@ -981,22 +1004,50 @@ EOF
|
||||||
exit ;;
|
exit ;;
|
||||||
mips:Linux:*:* | mips64:Linux:*:*)
|
mips:Linux:*:* | mips64:Linux:*:*)
|
||||||
set_cc_for_build
|
set_cc_for_build
|
||||||
|
IS_GLIBC=0
|
||||||
|
test x"${LIBC}" = xgnu && IS_GLIBC=1
|
||||||
sed 's/^ //' << EOF > "$dummy.c"
|
sed 's/^ //' << EOF > "$dummy.c"
|
||||||
#undef CPU
|
#undef CPU
|
||||||
#undef ${UNAME_MACHINE}
|
#undef mips
|
||||||
#undef ${UNAME_MACHINE}el
|
#undef mipsel
|
||||||
|
#undef mips64
|
||||||
|
#undef mips64el
|
||||||
|
#if ${IS_GLIBC} && defined(_ABI64)
|
||||||
|
LIBCABI=gnuabi64
|
||||||
|
#else
|
||||||
|
#if ${IS_GLIBC} && defined(_ABIN32)
|
||||||
|
LIBCABI=gnuabin32
|
||||||
|
#else
|
||||||
|
LIBCABI=${LIBC}
|
||||||
|
#endif
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#if ${IS_GLIBC} && defined(__mips64) && defined(__mips_isa_rev) && __mips_isa_rev>=6
|
||||||
|
CPU=mipsisa64r6
|
||||||
|
#else
|
||||||
|
#if ${IS_GLIBC} && !defined(__mips64) && defined(__mips_isa_rev) && __mips_isa_rev>=6
|
||||||
|
CPU=mipsisa32r6
|
||||||
|
#else
|
||||||
|
#if defined(__mips64)
|
||||||
|
CPU=mips64
|
||||||
|
#else
|
||||||
|
CPU=mips
|
||||||
|
#endif
|
||||||
|
#endif
|
||||||
|
#endif
|
||||||
|
|
||||||
#if defined(__MIPSEL__) || defined(__MIPSEL) || defined(_MIPSEL) || defined(MIPSEL)
|
#if defined(__MIPSEL__) || defined(__MIPSEL) || defined(_MIPSEL) || defined(MIPSEL)
|
||||||
CPU=${UNAME_MACHINE}el
|
MIPS_ENDIAN=el
|
||||||
#else
|
#else
|
||||||
#if defined(__MIPSEB__) || defined(__MIPSEB) || defined(_MIPSEB) || defined(MIPSEB)
|
#if defined(__MIPSEB__) || defined(__MIPSEB) || defined(_MIPSEB) || defined(MIPSEB)
|
||||||
CPU=${UNAME_MACHINE}
|
MIPS_ENDIAN=
|
||||||
#else
|
#else
|
||||||
CPU=
|
MIPS_ENDIAN=
|
||||||
#endif
|
#endif
|
||||||
#endif
|
#endif
|
||||||
EOF
|
EOF
|
||||||
eval "`$CC_FOR_BUILD -E "$dummy.c" 2>/dev/null | grep '^CPU'`"
|
eval "$($CC_FOR_BUILD -E "$dummy.c" 2>/dev/null | grep '^CPU\|^MIPS_ENDIAN\|^LIBCABI')"
|
||||||
test "x$CPU" != x && { echo "$CPU-unknown-linux-$LIBC"; exit; }
|
test "x$CPU" != x && { echo "$CPU${MIPS_ENDIAN}-unknown-linux-$LIBCABI"; exit; }
|
||||||
;;
|
;;
|
||||||
mips64el:Linux:*:*)
|
mips64el:Linux:*:*)
|
||||||
echo "$UNAME_MACHINE"-unknown-linux-"$LIBC"
|
echo "$UNAME_MACHINE"-unknown-linux-"$LIBC"
|
||||||
|
@ -1015,7 +1066,7 @@ EOF
|
||||||
exit ;;
|
exit ;;
|
||||||
parisc:Linux:*:* | hppa:Linux:*:*)
|
parisc:Linux:*:* | hppa:Linux:*:*)
|
||||||
# Look for CPU level
|
# Look for CPU level
|
||||||
case `grep '^cpu[^a-z]*:' /proc/cpuinfo 2>/dev/null | cut -d' ' -f2` in
|
case $(grep '^cpu[^a-z]*:' /proc/cpuinfo 2>/dev/null | cut -d' ' -f2) in
|
||||||
PA7*) echo hppa1.1-unknown-linux-"$LIBC" ;;
|
PA7*) echo hppa1.1-unknown-linux-"$LIBC" ;;
|
||||||
PA8*) echo hppa2.0-unknown-linux-"$LIBC" ;;
|
PA8*) echo hppa2.0-unknown-linux-"$LIBC" ;;
|
||||||
*) echo hppa-unknown-linux-"$LIBC" ;;
|
*) echo hppa-unknown-linux-"$LIBC" ;;
|
||||||
|
@ -1055,7 +1106,17 @@ EOF
|
||||||
echo "$UNAME_MACHINE"-dec-linux-"$LIBC"
|
echo "$UNAME_MACHINE"-dec-linux-"$LIBC"
|
||||||
exit ;;
|
exit ;;
|
||||||
x86_64:Linux:*:*)
|
x86_64:Linux:*:*)
|
||||||
echo "$UNAME_MACHINE"-pc-linux-"$LIBC"
|
set_cc_for_build
|
||||||
|
LIBCABI=$LIBC
|
||||||
|
if test "$CC_FOR_BUILD" != no_compiler_found; then
|
||||||
|
if (echo '#ifdef __ILP32__'; echo IS_X32; echo '#endif') | \
|
||||||
|
(CCOPTS="" $CC_FOR_BUILD -E - 2>/dev/null) | \
|
||||||
|
grep IS_X32 >/dev/null
|
||||||
|
then
|
||||||
|
LIBCABI="$LIBC"x32
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
echo "$UNAME_MACHINE"-pc-linux-"$LIBCABI"
|
||||||
exit ;;
|
exit ;;
|
||||||
xtensa*:Linux:*:*)
|
xtensa*:Linux:*:*)
|
||||||
echo "$UNAME_MACHINE"-unknown-linux-"$LIBC"
|
echo "$UNAME_MACHINE"-unknown-linux-"$LIBC"
|
||||||
|
@ -1095,7 +1156,7 @@ EOF
|
||||||
echo "$UNAME_MACHINE"-pc-msdosdjgpp
|
echo "$UNAME_MACHINE"-pc-msdosdjgpp
|
||||||
exit ;;
|
exit ;;
|
||||||
i*86:*:4.*:*)
|
i*86:*:4.*:*)
|
||||||
UNAME_REL=`echo "$UNAME_RELEASE" | sed 's/\/MP$//'`
|
UNAME_REL=$(echo "$UNAME_RELEASE" | sed 's/\/MP$//')
|
||||||
if grep Novell /usr/include/link.h >/dev/null 2>/dev/null; then
|
if grep Novell /usr/include/link.h >/dev/null 2>/dev/null; then
|
||||||
echo "$UNAME_MACHINE"-univel-sysv"$UNAME_REL"
|
echo "$UNAME_MACHINE"-univel-sysv"$UNAME_REL"
|
||||||
else
|
else
|
||||||
|
@ -1104,19 +1165,19 @@ EOF
|
||||||
exit ;;
|
exit ;;
|
||||||
i*86:*:5:[678]*)
|
i*86:*:5:[678]*)
|
||||||
# UnixWare 7.x, OpenUNIX and OpenServer 6.
|
# UnixWare 7.x, OpenUNIX and OpenServer 6.
|
||||||
case `/bin/uname -X | grep "^Machine"` in
|
case $(/bin/uname -X | grep "^Machine") in
|
||||||
*486*) UNAME_MACHINE=i486 ;;
|
*486*) UNAME_MACHINE=i486 ;;
|
||||||
*Pentium) UNAME_MACHINE=i586 ;;
|
*Pentium) UNAME_MACHINE=i586 ;;
|
||||||
*Pent*|*Celeron) UNAME_MACHINE=i686 ;;
|
*Pent*|*Celeron) UNAME_MACHINE=i686 ;;
|
||||||
esac
|
esac
|
||||||
echo "$UNAME_MACHINE-unknown-sysv${UNAME_RELEASE}${UNAME_SYSTEM}{$UNAME_VERSION}"
|
echo "$UNAME_MACHINE-unknown-sysv${UNAME_RELEASE}${UNAME_SYSTEM}${UNAME_VERSION}"
|
||||||
exit ;;
|
exit ;;
|
||||||
i*86:*:3.2:*)
|
i*86:*:3.2:*)
|
||||||
if test -f /usr/options/cb.name; then
|
if test -f /usr/options/cb.name; then
|
||||||
UNAME_REL=`sed -n 's/.*Version //p' </usr/options/cb.name`
|
UNAME_REL=$(sed -n 's/.*Version //p' </usr/options/cb.name)
|
||||||
echo "$UNAME_MACHINE"-pc-isc"$UNAME_REL"
|
echo "$UNAME_MACHINE"-pc-isc"$UNAME_REL"
|
||||||
elif /bin/uname -X 2>/dev/null >/dev/null ; then
|
elif /bin/uname -X 2>/dev/null >/dev/null ; then
|
||||||
UNAME_REL=`(/bin/uname -X|grep Release|sed -e 's/.*= //')`
|
UNAME_REL=$( (/bin/uname -X|grep Release|sed -e 's/.*= //'))
|
||||||
(/bin/uname -X|grep i80486 >/dev/null) && UNAME_MACHINE=i486
|
(/bin/uname -X|grep i80486 >/dev/null) && UNAME_MACHINE=i486
|
||||||
(/bin/uname -X|grep '^Machine.*Pentium' >/dev/null) \
|
(/bin/uname -X|grep '^Machine.*Pentium' >/dev/null) \
|
||||||
&& UNAME_MACHINE=i586
|
&& UNAME_MACHINE=i586
|
||||||
|
@ -1166,7 +1227,7 @@ EOF
|
||||||
3[345]??:*:4.0:3.0 | 3[34]??A:*:4.0:3.0 | 3[34]??,*:*:4.0:3.0 | 3[34]??/*:*:4.0:3.0 | 4400:*:4.0:3.0 | 4850:*:4.0:3.0 | SKA40:*:4.0:3.0 | SDS2:*:4.0:3.0 | SHG2:*:4.0:3.0 | S7501*:*:4.0:3.0)
|
3[345]??:*:4.0:3.0 | 3[34]??A:*:4.0:3.0 | 3[34]??,*:*:4.0:3.0 | 3[34]??/*:*:4.0:3.0 | 4400:*:4.0:3.0 | 4850:*:4.0:3.0 | SKA40:*:4.0:3.0 | SDS2:*:4.0:3.0 | SHG2:*:4.0:3.0 | S7501*:*:4.0:3.0)
|
||||||
OS_REL=''
|
OS_REL=''
|
||||||
test -r /etc/.relid \
|
test -r /etc/.relid \
|
||||||
&& OS_REL=.`sed -n 's/[^ ]* [^ ]* \([0-9][0-9]\).*/\1/p' < /etc/.relid`
|
&& OS_REL=.$(sed -n 's/[^ ]* [^ ]* \([0-9][0-9]\).*/\1/p' < /etc/.relid)
|
||||||
/bin/uname -p 2>/dev/null | grep 86 >/dev/null \
|
/bin/uname -p 2>/dev/null | grep 86 >/dev/null \
|
||||||
&& { echo i486-ncr-sysv4.3"$OS_REL"; exit; }
|
&& { echo i486-ncr-sysv4.3"$OS_REL"; exit; }
|
||||||
/bin/uname -p 2>/dev/null | /bin/grep entium >/dev/null \
|
/bin/uname -p 2>/dev/null | /bin/grep entium >/dev/null \
|
||||||
|
@ -1177,7 +1238,7 @@ EOF
|
||||||
NCR*:*:4.2:* | MPRAS*:*:4.2:*)
|
NCR*:*:4.2:* | MPRAS*:*:4.2:*)
|
||||||
OS_REL='.3'
|
OS_REL='.3'
|
||||||
test -r /etc/.relid \
|
test -r /etc/.relid \
|
||||||
&& OS_REL=.`sed -n 's/[^ ]* [^ ]* \([0-9][0-9]\).*/\1/p' < /etc/.relid`
|
&& OS_REL=.$(sed -n 's/[^ ]* [^ ]* \([0-9][0-9]\).*/\1/p' < /etc/.relid)
|
||||||
/bin/uname -p 2>/dev/null | grep 86 >/dev/null \
|
/bin/uname -p 2>/dev/null | grep 86 >/dev/null \
|
||||||
&& { echo i486-ncr-sysv4.3"$OS_REL"; exit; }
|
&& { echo i486-ncr-sysv4.3"$OS_REL"; exit; }
|
||||||
/bin/uname -p 2>/dev/null | /bin/grep entium >/dev/null \
|
/bin/uname -p 2>/dev/null | /bin/grep entium >/dev/null \
|
||||||
|
@ -1210,7 +1271,7 @@ EOF
|
||||||
exit ;;
|
exit ;;
|
||||||
*:SINIX-*:*:*)
|
*:SINIX-*:*:*)
|
||||||
if uname -p 2>/dev/null >/dev/null ; then
|
if uname -p 2>/dev/null >/dev/null ; then
|
||||||
UNAME_MACHINE=`(uname -p) 2>/dev/null`
|
UNAME_MACHINE=$( (uname -p) 2>/dev/null)
|
||||||
echo "$UNAME_MACHINE"-sni-sysv4
|
echo "$UNAME_MACHINE"-sni-sysv4
|
||||||
else
|
else
|
||||||
echo ns32k-sni-sysv
|
echo ns32k-sni-sysv
|
||||||
|
@ -1244,7 +1305,7 @@ EOF
|
||||||
echo mips-sony-newsos6
|
echo mips-sony-newsos6
|
||||||
exit ;;
|
exit ;;
|
||||||
R[34]000:*System_V*:*:* | R4000:UNIX_SYSV:*:* | R*000:UNIX_SV:*:*)
|
R[34]000:*System_V*:*:* | R4000:UNIX_SYSV:*:* | R*000:UNIX_SV:*:*)
|
||||||
if [ -d /usr/nec ]; then
|
if test -d /usr/nec; then
|
||||||
echo mips-nec-sysv"$UNAME_RELEASE"
|
echo mips-nec-sysv"$UNAME_RELEASE"
|
||||||
else
|
else
|
||||||
echo mips-unknown-sysv"$UNAME_RELEASE"
|
echo mips-unknown-sysv"$UNAME_RELEASE"
|
||||||
|
@ -1292,14 +1353,24 @@ EOF
|
||||||
*:Rhapsody:*:*)
|
*:Rhapsody:*:*)
|
||||||
echo "$UNAME_MACHINE"-apple-rhapsody"$UNAME_RELEASE"
|
echo "$UNAME_MACHINE"-apple-rhapsody"$UNAME_RELEASE"
|
||||||
exit ;;
|
exit ;;
|
||||||
|
arm64:Darwin:*:*)
|
||||||
|
echo aarch64-apple-darwin"$UNAME_RELEASE"
|
||||||
|
exit ;;
|
||||||
*:Darwin:*:*)
|
*:Darwin:*:*)
|
||||||
UNAME_PROCESSOR=`uname -p` || UNAME_PROCESSOR=unknown
|
UNAME_PROCESSOR=$(uname -p)
|
||||||
|
case $UNAME_PROCESSOR in
|
||||||
|
unknown) UNAME_PROCESSOR=powerpc ;;
|
||||||
|
esac
|
||||||
|
if command -v xcode-select > /dev/null 2> /dev/null && \
|
||||||
|
! xcode-select --print-path > /dev/null 2> /dev/null ; then
|
||||||
|
# Avoid executing cc if there is no toolchain installed as
|
||||||
|
# cc will be a stub that puts up a graphical alert
|
||||||
|
# prompting the user to install developer tools.
|
||||||
|
CC_FOR_BUILD=no_compiler_found
|
||||||
|
else
|
||||||
set_cc_for_build
|
set_cc_for_build
|
||||||
if test "$UNAME_PROCESSOR" = unknown ; then
|
|
||||||
UNAME_PROCESSOR=powerpc
|
|
||||||
fi
|
fi
|
||||||
if test "`echo "$UNAME_RELEASE" | sed -e 's/\..*//'`" -le 10 ; then
|
if test "$CC_FOR_BUILD" != no_compiler_found; then
|
||||||
if [ "$CC_FOR_BUILD" != no_compiler_found ]; then
|
|
||||||
if (echo '#ifdef __LP64__'; echo IS_64BIT_ARCH; echo '#endif') | \
|
if (echo '#ifdef __LP64__'; echo IS_64BIT_ARCH; echo '#endif') | \
|
||||||
(CCOPTS="" $CC_FOR_BUILD -E - 2>/dev/null) | \
|
(CCOPTS="" $CC_FOR_BUILD -E - 2>/dev/null) | \
|
||||||
grep IS_64BIT_ARCH >/dev/null
|
grep IS_64BIT_ARCH >/dev/null
|
||||||
|
@ -1316,20 +1387,14 @@ EOF
|
||||||
then
|
then
|
||||||
UNAME_PROCESSOR=powerpc
|
UNAME_PROCESSOR=powerpc
|
||||||
fi
|
fi
|
||||||
fi
|
|
||||||
elif test "$UNAME_PROCESSOR" = i386 ; then
|
elif test "$UNAME_PROCESSOR" = i386 ; then
|
||||||
# Avoid executing cc on OS X 10.9, as it ships with a stub
|
# uname -m returns i386 or x86_64
|
||||||
# that puts up a graphical alert prompting to install
|
UNAME_PROCESSOR=$UNAME_MACHINE
|
||||||
# developer tools. Any system running Mac OS X 10.7 or
|
|
||||||
# later (Darwin 11 and later) is required to have a 64-bit
|
|
||||||
# processor. This is not true of the ARM version of Darwin
|
|
||||||
# that Apple uses in portable devices.
|
|
||||||
UNAME_PROCESSOR=x86_64
|
|
||||||
fi
|
fi
|
||||||
echo "$UNAME_PROCESSOR"-apple-darwin"$UNAME_RELEASE"
|
echo "$UNAME_PROCESSOR"-apple-darwin"$UNAME_RELEASE"
|
||||||
exit ;;
|
exit ;;
|
||||||
*:procnto*:*:* | *:QNX:[0123456789]*:*)
|
*:procnto*:*:* | *:QNX:[0123456789]*:*)
|
||||||
UNAME_PROCESSOR=`uname -p`
|
UNAME_PROCESSOR=$(uname -p)
|
||||||
if test "$UNAME_PROCESSOR" = x86; then
|
if test "$UNAME_PROCESSOR" = x86; then
|
||||||
UNAME_PROCESSOR=i386
|
UNAME_PROCESSOR=i386
|
||||||
UNAME_MACHINE=pc
|
UNAME_MACHINE=pc
|
||||||
|
@ -1397,10 +1462,10 @@ EOF
|
||||||
echo mips-sei-seiux"$UNAME_RELEASE"
|
echo mips-sei-seiux"$UNAME_RELEASE"
|
||||||
exit ;;
|
exit ;;
|
||||||
*:DragonFly:*:*)
|
*:DragonFly:*:*)
|
||||||
echo "$UNAME_MACHINE"-unknown-dragonfly"`echo "$UNAME_RELEASE"|sed -e 's/[-(].*//'`"
|
echo "$UNAME_MACHINE"-unknown-dragonfly"$(echo "$UNAME_RELEASE"|sed -e 's/[-(].*//')"
|
||||||
exit ;;
|
exit ;;
|
||||||
*:*VMS:*:*)
|
*:*VMS:*:*)
|
||||||
UNAME_MACHINE=`(uname -p) 2>/dev/null`
|
UNAME_MACHINE=$( (uname -p) 2>/dev/null)
|
||||||
case "$UNAME_MACHINE" in
|
case "$UNAME_MACHINE" in
|
||||||
A*) echo alpha-dec-vms ; exit ;;
|
A*) echo alpha-dec-vms ; exit ;;
|
||||||
I*) echo ia64-dec-vms ; exit ;;
|
I*) echo ia64-dec-vms ; exit ;;
|
||||||
|
@ -1410,7 +1475,7 @@ EOF
|
||||||
echo i386-pc-xenix
|
echo i386-pc-xenix
|
||||||
exit ;;
|
exit ;;
|
||||||
i*86:skyos:*:*)
|
i*86:skyos:*:*)
|
||||||
echo "$UNAME_MACHINE"-pc-skyos"`echo "$UNAME_RELEASE" | sed -e 's/ .*$//'`"
|
echo "$UNAME_MACHINE"-pc-skyos"$(echo "$UNAME_RELEASE" | sed -e 's/ .*$//')"
|
||||||
exit ;;
|
exit ;;
|
||||||
i*86:rdos:*:*)
|
i*86:rdos:*:*)
|
||||||
echo "$UNAME_MACHINE"-pc-rdos
|
echo "$UNAME_MACHINE"-pc-rdos
|
||||||
|
@ -1424,8 +1489,148 @@ EOF
|
||||||
amd64:Isilon\ OneFS:*:*)
|
amd64:Isilon\ OneFS:*:*)
|
||||||
echo x86_64-unknown-onefs
|
echo x86_64-unknown-onefs
|
||||||
exit ;;
|
exit ;;
|
||||||
|
*:Unleashed:*:*)
|
||||||
|
echo "$UNAME_MACHINE"-unknown-unleashed"$UNAME_RELEASE"
|
||||||
|
exit ;;
|
||||||
esac
|
esac
|
||||||
|
|
||||||
|
# No uname command or uname output not recognized.
|
||||||
|
set_cc_for_build
|
||||||
|
cat > "$dummy.c" <<EOF
|
||||||
|
#ifdef _SEQUENT_
|
||||||
|
#include <sys/types.h>
|
||||||
|
#include <sys/utsname.h>
|
||||||
|
#endif
|
||||||
|
#if defined(ultrix) || defined(_ultrix) || defined(__ultrix) || defined(__ultrix__)
|
||||||
|
#if defined (vax) || defined (__vax) || defined (__vax__) || defined(mips) || defined(__mips) || defined(__mips__) || defined(MIPS) || defined(__MIPS__)
|
||||||
|
#include <signal.h>
|
||||||
|
#if defined(_SIZE_T_) || defined(SIGLOST)
|
||||||
|
#include <sys/utsname.h>
|
||||||
|
#endif
|
||||||
|
#endif
|
||||||
|
#endif
|
||||||
|
main ()
|
||||||
|
{
|
||||||
|
#if defined (sony)
|
||||||
|
#if defined (MIPSEB)
|
||||||
|
/* BFD wants "bsd" instead of "newsos". Perhaps BFD should be changed,
|
||||||
|
I don't know.... */
|
||||||
|
printf ("mips-sony-bsd\n"); exit (0);
|
||||||
|
#else
|
||||||
|
#include <sys/param.h>
|
||||||
|
printf ("m68k-sony-newsos%s\n",
|
||||||
|
#ifdef NEWSOS4
|
||||||
|
"4"
|
||||||
|
#else
|
||||||
|
""
|
||||||
|
#endif
|
||||||
|
); exit (0);
|
||||||
|
#endif
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#if defined (NeXT)
|
||||||
|
#if !defined (__ARCHITECTURE__)
|
||||||
|
#define __ARCHITECTURE__ "m68k"
|
||||||
|
#endif
|
||||||
|
int version;
|
||||||
|
version=$( (hostinfo | sed -n 's/.*NeXT Mach \([0-9]*\).*/\1/p') 2>/dev/null);
|
||||||
|
if (version < 4)
|
||||||
|
printf ("%s-next-nextstep%d\n", __ARCHITECTURE__, version);
|
||||||
|
else
|
||||||
|
printf ("%s-next-openstep%d\n", __ARCHITECTURE__, version);
|
||||||
|
exit (0);
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#if defined (MULTIMAX) || defined (n16)
|
||||||
|
#if defined (UMAXV)
|
||||||
|
printf ("ns32k-encore-sysv\n"); exit (0);
|
||||||
|
#else
|
||||||
|
#if defined (CMU)
|
||||||
|
printf ("ns32k-encore-mach\n"); exit (0);
|
||||||
|
#else
|
||||||
|
printf ("ns32k-encore-bsd\n"); exit (0);
|
||||||
|
#endif
|
||||||
|
#endif
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#if defined (__386BSD__)
|
||||||
|
printf ("i386-pc-bsd\n"); exit (0);
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#if defined (sequent)
|
||||||
|
#if defined (i386)
|
||||||
|
printf ("i386-sequent-dynix\n"); exit (0);
|
||||||
|
#endif
|
||||||
|
#if defined (ns32000)
|
||||||
|
printf ("ns32k-sequent-dynix\n"); exit (0);
|
||||||
|
#endif
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#if defined (_SEQUENT_)
|
||||||
|
struct utsname un;
|
||||||
|
|
||||||
|
uname(&un);
|
||||||
|
if (strncmp(un.version, "V2", 2) == 0) {
|
||||||
|
printf ("i386-sequent-ptx2\n"); exit (0);
|
||||||
|
}
|
||||||
|
if (strncmp(un.version, "V1", 2) == 0) { /* XXX is V1 correct? */
|
||||||
|
printf ("i386-sequent-ptx1\n"); exit (0);
|
||||||
|
}
|
||||||
|
printf ("i386-sequent-ptx\n"); exit (0);
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#if defined (vax)
|
||||||
|
#if !defined (ultrix)
|
||||||
|
#include <sys/param.h>
|
||||||
|
#if defined (BSD)
|
||||||
|
#if BSD == 43
|
||||||
|
printf ("vax-dec-bsd4.3\n"); exit (0);
|
||||||
|
#else
|
||||||
|
#if BSD == 199006
|
||||||
|
printf ("vax-dec-bsd4.3reno\n"); exit (0);
|
||||||
|
#else
|
||||||
|
printf ("vax-dec-bsd\n"); exit (0);
|
||||||
|
#endif
|
||||||
|
#endif
|
||||||
|
#else
|
||||||
|
printf ("vax-dec-bsd\n"); exit (0);
|
||||||
|
#endif
|
||||||
|
#else
|
||||||
|
#if defined(_SIZE_T_) || defined(SIGLOST)
|
||||||
|
struct utsname un;
|
||||||
|
uname (&un);
|
||||||
|
printf ("vax-dec-ultrix%s\n", un.release); exit (0);
|
||||||
|
#else
|
||||||
|
printf ("vax-dec-ultrix\n"); exit (0);
|
||||||
|
#endif
|
||||||
|
#endif
|
||||||
|
#endif
|
||||||
|
#if defined(ultrix) || defined(_ultrix) || defined(__ultrix) || defined(__ultrix__)
|
||||||
|
#if defined(mips) || defined(__mips) || defined(__mips__) || defined(MIPS) || defined(__MIPS__)
|
||||||
|
#if defined(_SIZE_T_) || defined(SIGLOST)
|
||||||
|
struct utsname *un;
|
||||||
|
uname (&un);
|
||||||
|
printf ("mips-dec-ultrix%s\n", un.release); exit (0);
|
||||||
|
#else
|
||||||
|
printf ("mips-dec-ultrix\n"); exit (0);
|
||||||
|
#endif
|
||||||
|
#endif
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#if defined (alliant) && defined (i860)
|
||||||
|
printf ("i860-alliant-bsd\n"); exit (0);
|
||||||
|
#endif
|
||||||
|
|
||||||
|
exit (1);
|
||||||
|
}
|
||||||
|
EOF
|
||||||
|
|
||||||
|
$CC_FOR_BUILD -o "$dummy" "$dummy.c" 2>/dev/null && SYSTEM_NAME=$($dummy) &&
|
||||||
|
{ echo "$SYSTEM_NAME"; exit; }
|
||||||
|
|
||||||
|
# Apollos put the system type in the environment.
|
||||||
|
test -d /usr/apollo && { echo "$ISP-apollo-$SYSTYPE"; exit; }
|
||||||
|
|
||||||
echo "$0: unable to guess system type" >&2
|
echo "$0: unable to guess system type" >&2
|
||||||
|
|
||||||
case "$UNAME_MACHINE:$UNAME_SYSTEM" in
|
case "$UNAME_MACHINE:$UNAME_SYSTEM" in
|
||||||
|
@ -1445,9 +1650,15 @@ This script (version $timestamp), has failed to recognize the
|
||||||
operating system you are using. If your script is old, overwrite *all*
|
operating system you are using. If your script is old, overwrite *all*
|
||||||
copies of config.guess and config.sub with the latest versions from:
|
copies of config.guess and config.sub with the latest versions from:
|
||||||
|
|
||||||
https://git.savannah.gnu.org/gitweb/?p=config.git;a=blob_plain;f=config.guess
|
https://git.savannah.gnu.org/cgit/config.git/plain/config.guess
|
||||||
and
|
and
|
||||||
https://git.savannah.gnu.org/gitweb/?p=config.git;a=blob_plain;f=config.sub
|
https://git.savannah.gnu.org/cgit/config.git/plain/config.sub
|
||||||
|
EOF
|
||||||
|
|
||||||
|
year=$(echo $timestamp | sed 's,-.*,,')
|
||||||
|
# shellcheck disable=SC2003
|
||||||
|
if test "$(expr "$(date +%Y)" - "$year")" -lt 3 ; then
|
||||||
|
cat >&2 <<EOF
|
||||||
|
|
||||||
If $0 has already been updated, send the following data and any
|
If $0 has already been updated, send the following data and any
|
||||||
information you think might be pertinent to config-patches@gnu.org to
|
information you think might be pertinent to config-patches@gnu.org to
|
||||||
|
@ -1455,26 +1666,27 @@ provide the necessary information to handle your system.
|
||||||
|
|
||||||
config.guess timestamp = $timestamp
|
config.guess timestamp = $timestamp
|
||||||
|
|
||||||
uname -m = `(uname -m) 2>/dev/null || echo unknown`
|
uname -m = $( (uname -m) 2>/dev/null || echo unknown)
|
||||||
uname -r = `(uname -r) 2>/dev/null || echo unknown`
|
uname -r = $( (uname -r) 2>/dev/null || echo unknown)
|
||||||
uname -s = `(uname -s) 2>/dev/null || echo unknown`
|
uname -s = $( (uname -s) 2>/dev/null || echo unknown)
|
||||||
uname -v = `(uname -v) 2>/dev/null || echo unknown`
|
uname -v = $( (uname -v) 2>/dev/null || echo unknown)
|
||||||
|
|
||||||
/usr/bin/uname -p = `(/usr/bin/uname -p) 2>/dev/null`
|
/usr/bin/uname -p = $( (/usr/bin/uname -p) 2>/dev/null)
|
||||||
/bin/uname -X = `(/bin/uname -X) 2>/dev/null`
|
/bin/uname -X = $( (/bin/uname -X) 2>/dev/null)
|
||||||
|
|
||||||
hostinfo = `(hostinfo) 2>/dev/null`
|
hostinfo = $( (hostinfo) 2>/dev/null)
|
||||||
/bin/universe = `(/bin/universe) 2>/dev/null`
|
/bin/universe = $( (/bin/universe) 2>/dev/null)
|
||||||
/usr/bin/arch -k = `(/usr/bin/arch -k) 2>/dev/null`
|
/usr/bin/arch -k = $( (/usr/bin/arch -k) 2>/dev/null)
|
||||||
/bin/arch = `(/bin/arch) 2>/dev/null`
|
/bin/arch = $( (/bin/arch) 2>/dev/null)
|
||||||
/usr/bin/oslevel = `(/usr/bin/oslevel) 2>/dev/null`
|
/usr/bin/oslevel = $( (/usr/bin/oslevel) 2>/dev/null)
|
||||||
/usr/convex/getsysinfo = `(/usr/convex/getsysinfo) 2>/dev/null`
|
/usr/convex/getsysinfo = $( (/usr/convex/getsysinfo) 2>/dev/null)
|
||||||
|
|
||||||
UNAME_MACHINE = "$UNAME_MACHINE"
|
UNAME_MACHINE = "$UNAME_MACHINE"
|
||||||
UNAME_RELEASE = "$UNAME_RELEASE"
|
UNAME_RELEASE = "$UNAME_RELEASE"
|
||||||
UNAME_SYSTEM = "$UNAME_SYSTEM"
|
UNAME_SYSTEM = "$UNAME_SYSTEM"
|
||||||
UNAME_VERSION = "$UNAME_VERSION"
|
UNAME_VERSION = "$UNAME_VERSION"
|
||||||
EOF
|
EOF
|
||||||
|
fi
|
||||||
|
|
||||||
exit 1
|
exit 1
|
||||||
|
|
||||||
|
|
1834
config/config.sub
vendored
1834
config/config.sub
vendored
File diff suppressed because it is too large
Load diff
|
@ -179,6 +179,10 @@ AC_CHECK_HEADERS([bzlib.h], [true],
|
||||||
[AC_MSG_ERROR([Nix requires libbz2, which is part of bzip2. See https://web.archive.org/web/20180624184756/http://www.bzip.org/.])])
|
[AC_MSG_ERROR([Nix requires libbz2, which is part of bzip2. See https://web.archive.org/web/20180624184756/http://www.bzip.org/.])])
|
||||||
# Checks for libarchive
|
# Checks for libarchive
|
||||||
PKG_CHECK_MODULES([LIBARCHIVE], [libarchive >= 3.1.2], [CXXFLAGS="$LIBARCHIVE_CFLAGS $CXXFLAGS"])
|
PKG_CHECK_MODULES([LIBARCHIVE], [libarchive >= 3.1.2], [CXXFLAGS="$LIBARCHIVE_CFLAGS $CXXFLAGS"])
|
||||||
|
# Workaround until https://github.com/libarchive/libarchive/issues/1446 is fixed
|
||||||
|
if test "$shared" != yes; then
|
||||||
|
LIBARCHIVE_LIBS+=' -lz'
|
||||||
|
fi
|
||||||
|
|
||||||
# Look for SQLite, a required dependency.
|
# Look for SQLite, a required dependency.
|
||||||
PKG_CHECK_MODULES([SQLITE3], [sqlite3 >= 3.6.19], [CXXFLAGS="$SQLITE3_CFLAGS $CXXFLAGS"])
|
PKG_CHECK_MODULES([SQLITE3], [sqlite3 >= 3.6.19], [CXXFLAGS="$SQLITE3_CFLAGS $CXXFLAGS"])
|
||||||
|
@ -251,6 +255,7 @@ if test -n "$enable_s3"; then
|
||||||
declare -a aws_version_tokens=($(printf '#include <aws/core/VersionConfig.h>\nAWS_SDK_VERSION_STRING' | $CPP $CPPFLAGS - | grep -v '^#.*' | sed 's/"//g' | tr '.' ' '))
|
declare -a aws_version_tokens=($(printf '#include <aws/core/VersionConfig.h>\nAWS_SDK_VERSION_STRING' | $CPP $CPPFLAGS - | grep -v '^#.*' | sed 's/"//g' | tr '.' ' '))
|
||||||
AC_DEFINE_UNQUOTED([AWS_VERSION_MAJOR], ${aws_version_tokens@<:@0@:>@}, [Major version of aws-sdk-cpp.])
|
AC_DEFINE_UNQUOTED([AWS_VERSION_MAJOR], ${aws_version_tokens@<:@0@:>@}, [Major version of aws-sdk-cpp.])
|
||||||
AC_DEFINE_UNQUOTED([AWS_VERSION_MINOR], ${aws_version_tokens@<:@1@:>@}, [Minor version of aws-sdk-cpp.])
|
AC_DEFINE_UNQUOTED([AWS_VERSION_MINOR], ${aws_version_tokens@<:@1@:>@}, [Minor version of aws-sdk-cpp.])
|
||||||
|
AC_DEFINE_UNQUOTED([AWS_VERSION_PATCH], ${aws_version_tokens@<:@2@:>@}, [Patch version of aws-sdk-cpp.])
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
{ system ? "" # obsolete
|
{ system ? "" # obsolete
|
||||||
, url
|
, url
|
||||||
, hash ? "" # an SRI ash
|
, hash ? "" # an SRI hash
|
||||||
|
|
||||||
# Legacy hash specification
|
# Legacy hash specification
|
||||||
, md5 ? "", sha1 ? "", sha256 ? "", sha512 ? ""
|
, md5 ? "", sha1 ? "", sha256 ? "", sha512 ? ""
|
||||||
|
|
File diff suppressed because it is too large
Load diff
|
@ -1,33 +1,40 @@
|
||||||
|
command:
|
||||||
|
|
||||||
with builtins;
|
with builtins;
|
||||||
with import ./utils.nix;
|
with import ./utils.nix;
|
||||||
|
|
||||||
let
|
let
|
||||||
|
|
||||||
showCommand =
|
showCommand =
|
||||||
{ command, section, def }:
|
{ command, def, filename }:
|
||||||
"${section} Name\n\n"
|
"# Name\n\n"
|
||||||
+ "`${command}` - ${def.description}\n\n"
|
+ "`${command}` - ${def.description}\n\n"
|
||||||
+ "${section} Synopsis\n\n"
|
+ "# Synopsis\n\n"
|
||||||
+ showSynopsis { inherit command; args = def.args; }
|
+ showSynopsis { inherit command; args = def.args; }
|
||||||
+ (if def ? doc
|
+ (if def.commands or {} != {}
|
||||||
then "${section} Description\n\n" + def.doc + "\n\n"
|
then
|
||||||
else "")
|
"where *subcommand* is one of the following:\n\n"
|
||||||
+ (let s = showFlags def.flags; in
|
# FIXME: group by category
|
||||||
if s != ""
|
+ concatStrings (map (name:
|
||||||
then "${section} Flags\n\n${s}"
|
"* [`${command} ${name}`](./${appendName filename name}.md) - ${def.commands.${name}.description}\n")
|
||||||
|
(attrNames def.commands))
|
||||||
|
+ "\n"
|
||||||
else "")
|
else "")
|
||||||
+ (if def.examples or [] != []
|
+ (if def.examples or [] != []
|
||||||
then
|
then
|
||||||
"${section} Examples\n\n"
|
"# Examples\n\n"
|
||||||
+ concatStrings (map ({ description, command }: "${description}\n\n```console\n${command}\n```\n\n") def.examples)
|
+ concatStrings (map ({ description, command }: "${description}\n\n```console\n${command}\n```\n\n") def.examples)
|
||||||
else "")
|
else "")
|
||||||
+ (if def.commands or [] != []
|
+ (if def ? doc
|
||||||
then concatStrings (
|
then def.doc + "\n\n"
|
||||||
map (name:
|
else "")
|
||||||
"# Subcommand `${command} ${name}`\n\n"
|
+ (let s = showFlags def.flags; in
|
||||||
+ showCommand { command = command + " " + name; section = "##"; def = def.commands.${name}; })
|
if s != ""
|
||||||
(attrNames def.commands))
|
then "# Flags\n\n${s}"
|
||||||
else "");
|
else "")
|
||||||
|
;
|
||||||
|
|
||||||
|
appendName = filename: name: (if filename == "nix" then "nix3" else filename) + "-" + name;
|
||||||
|
|
||||||
showFlags = flags:
|
showFlags = flags:
|
||||||
concatStrings
|
concatStrings
|
||||||
|
@ -48,8 +55,20 @@ let
|
||||||
"`${command}` [*flags*...] ${concatStringsSep " "
|
"`${command}` [*flags*...] ${concatStringsSep " "
|
||||||
(map (arg: "*${arg.label}*" + (if arg ? arity then "" else "...")) args)}\n\n";
|
(map (arg: "*${arg.label}*" + (if arg ? arity then "" else "...")) args)}\n\n";
|
||||||
|
|
||||||
|
processCommand = { command, def, filename }:
|
||||||
|
[ { name = filename + ".md"; value = showCommand { inherit command def filename; }; inherit command; } ]
|
||||||
|
++ concatMap
|
||||||
|
(name: processCommand {
|
||||||
|
filename = appendName filename name;
|
||||||
|
command = command + " " + name;
|
||||||
|
def = def.commands.${name};
|
||||||
|
})
|
||||||
|
(attrNames def.commands or {});
|
||||||
|
|
||||||
in
|
in
|
||||||
|
|
||||||
command:
|
let
|
||||||
|
manpages = processCommand { filename = "nix"; command = "nix"; def = command; };
|
||||||
showCommand { command = "nix"; section = "#"; def = command; }
|
summary = concatStrings (map (manpage: " - [${manpage.command}](command-ref/new-cli/${manpage.name})\n") manpages);
|
||||||
|
in
|
||||||
|
(listToAttrs manpages) // { "SUMMARY.md" = summary; }
|
||||||
|
|
|
@ -4,7 +4,7 @@ MANUAL_SRCS := $(call rwildcard, $(d)/src, *.md)
|
||||||
|
|
||||||
# Generate man pages.
|
# Generate man pages.
|
||||||
man-pages := $(foreach n, \
|
man-pages := $(foreach n, \
|
||||||
nix-env.1 nix-build.1 nix-shell.1 nix-store.1 nix-instantiate.1 nix.1 \
|
nix-env.1 nix-build.1 nix-shell.1 nix-store.1 nix-instantiate.1 \
|
||||||
nix-collect-garbage.1 \
|
nix-collect-garbage.1 \
|
||||||
nix-prefetch-url.1 nix-channel.1 \
|
nix-prefetch-url.1 nix-channel.1 \
|
||||||
nix-hash.1 nix-copy-closure.1 \
|
nix-hash.1 nix-copy-closure.1 \
|
||||||
|
@ -13,9 +13,14 @@ man-pages := $(foreach n, \
|
||||||
|
|
||||||
clean-files += $(d)/*.1 $(d)/*.5 $(d)/*.8
|
clean-files += $(d)/*.1 $(d)/*.5 $(d)/*.8
|
||||||
|
|
||||||
dist-files += $(man-pages)
|
# Provide a dummy environment for nix, so that it will not access files outside the macOS sandbox.
|
||||||
|
dummy-env = env -i \
|
||||||
|
HOME=/dummy \
|
||||||
|
NIX_CONF_DIR=/dummy \
|
||||||
|
NIX_SSL_CERT_FILE=/dummy/no-ca-bundle.crt \
|
||||||
|
NIX_STATE_DIR=/dummy
|
||||||
|
|
||||||
nix-eval = $(bindir)/nix eval --experimental-features nix-command -I nix/corepkgs=corepkgs --store dummy:// --impure --raw --expr
|
nix-eval = $(dummy-env) $(bindir)/nix eval --experimental-features nix-command -I nix/corepkgs=corepkgs --store dummy:// --impure --raw
|
||||||
|
|
||||||
$(d)/%.1: $(d)/src/command-ref/%.md
|
$(d)/%.1: $(d)/src/command-ref/%.md
|
||||||
@printf "Title: %s\n\n" "$$(basename $@ .1)" > $^.tmp
|
@printf "Title: %s\n\n" "$$(basename $@ .1)" > $^.tmp
|
||||||
|
@ -35,37 +40,51 @@ $(d)/nix.conf.5: $(d)/src/command-ref/conf-file.md
|
||||||
$(trace-gen) lowdown -sT man $^.tmp -o $@
|
$(trace-gen) lowdown -sT man $^.tmp -o $@
|
||||||
@rm $^.tmp
|
@rm $^.tmp
|
||||||
|
|
||||||
$(d)/src/command-ref/nix.md: $(d)/nix.json $(d)/generate-manpage.nix $(bindir)/nix
|
$(d)/src/SUMMARY.md: $(d)/src/SUMMARY.md.in $(d)/src/command-ref/new-cli
|
||||||
$(trace-gen) $(nix-eval) 'import doc/manual/generate-manpage.nix (builtins.fromJSON (builtins.readFile $<))' > $@.tmp
|
$(trace-gen) cat doc/manual/src/SUMMARY.md.in | while IFS= read line; do if [[ $$line = @manpages@ ]]; then cat doc/manual/src/command-ref/new-cli/SUMMARY.md; else echo "$$line"; fi; done > $@.tmp
|
||||||
@mv $@.tmp $@
|
@mv $@.tmp $@
|
||||||
|
|
||||||
|
$(d)/src/command-ref/new-cli: $(d)/nix.json $(d)/generate-manpage.nix $(bindir)/nix
|
||||||
|
@rm -rf $@
|
||||||
|
$(trace-gen) $(nix-eval) --write-to $@ --expr 'import doc/manual/generate-manpage.nix (builtins.fromJSON (builtins.readFile $<))'
|
||||||
|
|
||||||
$(d)/src/command-ref/conf-file.md: $(d)/conf-file.json $(d)/generate-options.nix $(d)/src/command-ref/conf-file-prefix.md $(bindir)/nix
|
$(d)/src/command-ref/conf-file.md: $(d)/conf-file.json $(d)/generate-options.nix $(d)/src/command-ref/conf-file-prefix.md $(bindir)/nix
|
||||||
@cat doc/manual/src/command-ref/conf-file-prefix.md > $@.tmp
|
@cat doc/manual/src/command-ref/conf-file-prefix.md > $@.tmp
|
||||||
$(trace-gen) $(nix-eval) 'import doc/manual/generate-options.nix (builtins.fromJSON (builtins.readFile $<))' >> $@.tmp
|
$(trace-gen) $(nix-eval) --expr 'import doc/manual/generate-options.nix (builtins.fromJSON (builtins.readFile $<))' >> $@.tmp
|
||||||
@mv $@.tmp $@
|
@mv $@.tmp $@
|
||||||
|
|
||||||
$(d)/nix.json: $(bindir)/nix
|
$(d)/nix.json: $(bindir)/nix
|
||||||
$(trace-gen) $(bindir)/nix __dump-args > $@.tmp
|
$(trace-gen) $(dummy-env) $(bindir)/nix __dump-args > $@.tmp
|
||||||
@mv $@.tmp $@
|
@mv $@.tmp $@
|
||||||
|
|
||||||
$(d)/conf-file.json: $(bindir)/nix
|
$(d)/conf-file.json: $(bindir)/nix
|
||||||
$(trace-gen) env -i NIX_CONF_DIR=/dummy HOME=/dummy NIX_SSL_CERT_FILE=/dummy/no-ca-bundle.crt $(bindir)/nix show-config --json --experimental-features nix-command > $@.tmp
|
$(trace-gen) $(dummy-env) $(bindir)/nix show-config --json --experimental-features nix-command > $@.tmp
|
||||||
@mv $@.tmp $@
|
@mv $@.tmp $@
|
||||||
|
|
||||||
$(d)/src/expressions/builtins.md: $(d)/builtins.json $(d)/generate-builtins.nix $(d)/src/expressions/builtins-prefix.md $(bindir)/nix
|
$(d)/src/expressions/builtins.md: $(d)/builtins.json $(d)/generate-builtins.nix $(d)/src/expressions/builtins-prefix.md $(bindir)/nix
|
||||||
@cat doc/manual/src/expressions/builtins-prefix.md > $@.tmp
|
@cat doc/manual/src/expressions/builtins-prefix.md > $@.tmp
|
||||||
$(trace-gen) $(nix-eval) 'import doc/manual/generate-builtins.nix (builtins.fromJSON (builtins.readFile $<))' >> $@.tmp
|
$(trace-gen) $(nix-eval) --expr 'import doc/manual/generate-builtins.nix (builtins.fromJSON (builtins.readFile $<))' >> $@.tmp
|
||||||
@mv $@.tmp $@
|
@mv $@.tmp $@
|
||||||
|
|
||||||
$(d)/builtins.json: $(bindir)/nix
|
$(d)/builtins.json: $(bindir)/nix
|
||||||
$(trace-gen) NIX_PATH=nix/corepkgs=corepkgs $(bindir)/nix __dump-builtins > $@.tmp
|
$(trace-gen) $(dummy-env) NIX_PATH=nix/corepkgs=corepkgs $(bindir)/nix __dump-builtins > $@.tmp
|
||||||
mv $@.tmp $@
|
@mv $@.tmp $@
|
||||||
|
|
||||||
# Generate the HTML manual.
|
# Generate the HTML manual.
|
||||||
install: $(docdir)/manual/index.html
|
install: $(docdir)/manual/index.html
|
||||||
|
|
||||||
$(docdir)/manual/index.html: $(MANUAL_SRCS) $(d)/book.toml $(d)/custom.css $(d)/src/command-ref/nix.md $(d)/src/command-ref/conf-file.md $(d)/src/expressions/builtins.md
|
# Generate 'nix' manpages.
|
||||||
$(trace-gen) mdbook build doc/manual -d $(docdir)/manual
|
install: $(d)/src/command-ref/new-cli
|
||||||
|
$(trace-gen) for i in doc/manual/src/command-ref/new-cli/*.md; do \
|
||||||
|
name=$$(basename $$i .md); \
|
||||||
|
if [[ $$name = SUMMARY ]]; then continue; fi; \
|
||||||
|
printf "Title: %s\n\n" "$$name" > $$i.tmp; \
|
||||||
|
cat $$i >> $$i.tmp; \
|
||||||
|
lowdown -sT man $$i.tmp -o $(mandir)/man1/$$name.1; \
|
||||||
|
done
|
||||||
|
|
||||||
|
$(docdir)/manual/index.html: $(MANUAL_SRCS) $(d)/book.toml $(d)/custom.css $(d)/src/SUMMARY.md $(d)/src/command-ref/new-cli $(d)/src/command-ref/conf-file.md $(d)/src/expressions/builtins.md
|
||||||
|
$(trace-gen) RUST_LOG=warn mdbook build doc/manual -d $(docdir)/manual
|
||||||
@cp doc/manual/highlight.pack.js $(docdir)/manual/highlight.js
|
@cp doc/manual/highlight.pack.js $(docdir)/manual/highlight.js
|
||||||
|
|
||||||
endif
|
endif
|
||||||
|
|
|
@ -62,11 +62,13 @@
|
||||||
- [nix-instantiate](command-ref/nix-instantiate.md)
|
- [nix-instantiate](command-ref/nix-instantiate.md)
|
||||||
- [nix-prefetch-url](command-ref/nix-prefetch-url.md)
|
- [nix-prefetch-url](command-ref/nix-prefetch-url.md)
|
||||||
- [Experimental Commands](command-ref/experimental-commands.md)
|
- [Experimental Commands](command-ref/experimental-commands.md)
|
||||||
- [nix](command-ref/nix.md)
|
@manpages@
|
||||||
- [Files](command-ref/files.md)
|
- [Files](command-ref/files.md)
|
||||||
- [nix.conf](command-ref/conf-file.md)
|
- [nix.conf](command-ref/conf-file.md)
|
||||||
- [Glossary](glossary.md)
|
- [Glossary](glossary.md)
|
||||||
- [Hacking](hacking.md)
|
- [Contributing](contributing/contributing.md)
|
||||||
|
- [Hacking](contributing/hacking.md)
|
||||||
|
- [CLI guideline](contributing/cli-guideline.md)
|
||||||
- [Release Notes](release-notes/release-notes.md)
|
- [Release Notes](release-notes/release-notes.md)
|
||||||
- [Release 2.3 (2019-09-04)](release-notes/rl-2.3.md)
|
- [Release 2.3 (2019-09-04)](release-notes/rl-2.3.md)
|
||||||
- [Release 2.2 (2019-01-11)](release-notes/rl-2.2.md)
|
- [Release 2.2 (2019-01-11)](release-notes/rl-2.2.md)
|
|
@ -19,19 +19,33 @@ By default Nix reads settings from the following places:
|
||||||
and `XDG_CONFIG_HOME`. If these are unset, it will look in
|
and `XDG_CONFIG_HOME`. If these are unset, it will look in
|
||||||
`$HOME/.config/nix.conf`.
|
`$HOME/.config/nix.conf`.
|
||||||
|
|
||||||
The configuration files consist of `name =
|
- If `NIX_CONFIG` is set, its contents is treated as the contents of
|
||||||
value` pairs, one per line. Other files can be included with a line like
|
a configuration file.
|
||||||
`include
|
|
||||||
path`, where *path* is interpreted relative to the current conf file and
|
The configuration files consist of `name = value` pairs, one per
|
||||||
a missing file is an error unless `!include` is used instead. Comments
|
line. Other files can be included with a line like `include path`,
|
||||||
|
where *path* is interpreted relative to the current conf file and a
|
||||||
|
missing file is an error unless `!include` is used instead. Comments
|
||||||
start with a `#` character. Here is an example configuration file:
|
start with a `#` character. Here is an example configuration file:
|
||||||
|
|
||||||
keep-outputs = true # Nice for developers
|
keep-outputs = true # Nice for developers
|
||||||
keep-derivations = true # Idem
|
keep-derivations = true # Idem
|
||||||
|
|
||||||
You can override settings on the command line using the `--option` flag,
|
You can override settings on the command line using the `--option`
|
||||||
e.g. `--option keep-outputs
|
flag, e.g. `--option keep-outputs false`. Every configuration setting
|
||||||
false`.
|
also has a corresponding command line flag, e.g. `--max-jobs 16`; for
|
||||||
|
Boolean settings, there are two flags to enable or disable the setting
|
||||||
|
(e.g. `--keep-failed` and `--no-keep-failed`).
|
||||||
|
|
||||||
|
A configuration setting usually overrides any previous value. However,
|
||||||
|
you can prefix the name of the setting by `extra-` to *append* to the
|
||||||
|
previous value. For instance,
|
||||||
|
|
||||||
|
substituters = a b
|
||||||
|
extra-substituters = c d
|
||||||
|
|
||||||
|
defines the `substituters` setting to be `a b c d`. This is also
|
||||||
|
available as a command line flag (e.g. `--extra-substituters`).
|
||||||
|
|
||||||
The following settings are currently available:
|
The following settings are currently available:
|
||||||
|
|
||||||
|
|
|
@ -81,6 +81,11 @@ Most Nix commands interpret the following environment variables:
|
||||||
Overrides the location of the system Nix configuration directory
|
Overrides the location of the system Nix configuration directory
|
||||||
(default `prefix/etc/nix`).
|
(default `prefix/etc/nix`).
|
||||||
|
|
||||||
|
- `NIX_CONFIG`
|
||||||
|
Applies settings from Nix configuration from the environment.
|
||||||
|
The content is treated as if it was read from a Nix configuration file.
|
||||||
|
Settings are separated by the newline character.
|
||||||
|
|
||||||
- `NIX_USER_CONF_FILES`
|
- `NIX_USER_CONF_FILES`
|
||||||
Overrides the location of the user Nix configuration files to load
|
Overrides the location of the user Nix configuration files to load
|
||||||
from (defaults to the XDG spec locations). The variable is treated
|
from (defaults to the XDG spec locations). The variable is treated
|
||||||
|
|
|
@ -45,7 +45,7 @@ md5sum`.
|
||||||
|
|
||||||
- `--type` *hashAlgo*
|
- `--type` *hashAlgo*
|
||||||
Use the specified cryptographic hash algorithm, which can be one of
|
Use the specified cryptographic hash algorithm, which can be one of
|
||||||
`md5`, `sha1`, and `sha256`.
|
`md5`, `sha1`, `sha256`, and `sha512`.
|
||||||
|
|
||||||
- `--to-base16`
|
- `--to-base16`
|
||||||
Don’t hash anything, but convert the base-32 hash representation
|
Don’t hash anything, but convert the base-32 hash representation
|
||||||
|
|
|
@ -39,7 +39,7 @@ Nix store is also printed.
|
||||||
|
|
||||||
- `--type` *hashAlgo*
|
- `--type` *hashAlgo*
|
||||||
Use the specified cryptographic hash algorithm, which can be one of
|
Use the specified cryptographic hash algorithm, which can be one of
|
||||||
`md5`, `sha1`, and `sha256`.
|
`md5`, `sha1`, `sha256`, and `sha512`.
|
||||||
|
|
||||||
- `--print-path`
|
- `--print-path`
|
||||||
Print the store path of the downloaded file on standard output.
|
Print the store path of the downloaded file on standard output.
|
||||||
|
|
|
@ -32,7 +32,7 @@ URL of a tarball that will be downloaded and unpacked to a temporary
|
||||||
location. The tarball must include a single top-level directory
|
location. The tarball must include a single top-level directory
|
||||||
containing at least a file named `default.nix`.
|
containing at least a file named `default.nix`.
|
||||||
|
|
||||||
If the derivation defines the variable `shellHook`, it will be evaluated
|
If the derivation defines the variable `shellHook`, it will be run
|
||||||
after `$stdenv/setup` has been sourced. Since this hook is not executed
|
after `$stdenv/setup` has been sourced. Since this hook is not executed
|
||||||
by regular Nix builds, it allows you to perform initialisation specific
|
by regular Nix builds, it allows you to perform initialisation specific
|
||||||
to `nix-shell`. For example, the derivation attribute
|
to `nix-shell`. For example, the derivation attribute
|
||||||
|
@ -41,10 +41,12 @@ to `nix-shell`. For example, the derivation attribute
|
||||||
shellHook =
|
shellHook =
|
||||||
''
|
''
|
||||||
echo "Hello shell"
|
echo "Hello shell"
|
||||||
|
export SOME_API_TOKEN="$(cat ~/.config/some-app/api-token)"
|
||||||
'';
|
'';
|
||||||
```
|
```
|
||||||
|
|
||||||
will cause `nix-shell` to print `Hello shell`.
|
will cause `nix-shell` to print `Hello shell` and set the `SOME_API_TOKEN`
|
||||||
|
environment variable to a user-configured value.
|
||||||
|
|
||||||
# Options
|
# Options
|
||||||
|
|
||||||
|
@ -76,8 +78,8 @@ All options not listed here are passed to `nix-store
|
||||||
cleared before the interactive shell is started, so you get an
|
cleared before the interactive shell is started, so you get an
|
||||||
environment that more closely corresponds to the “real” Nix build. A
|
environment that more closely corresponds to the “real” Nix build. A
|
||||||
few variables, in particular `HOME`, `USER` and `DISPLAY`, are
|
few variables, in particular `HOME`, `USER` and `DISPLAY`, are
|
||||||
retained. Note that `~/.bashrc` and (depending on your Bash
|
retained. Note that (depending on your Bash
|
||||||
installation) `/etc/bashrc` are still sourced, so any variables set
|
installation) `/etc/bashrc` is still sourced, so any variables set
|
||||||
there will affect the interactive shell.
|
there will affect the interactive shell.
|
||||||
|
|
||||||
- `--packages` / `-p` *packages*…
|
- `--packages` / `-p` *packages*…
|
||||||
|
|
589
doc/manual/src/contributing/cli-guideline.md
Normal file
589
doc/manual/src/contributing/cli-guideline.md
Normal file
|
@ -0,0 +1,589 @@
|
||||||
|
# CLI guideline
|
||||||
|
|
||||||
|
## Goals
|
||||||
|
|
||||||
|
Purpose of this document is to provide a clear direction to **help design
|
||||||
|
delightful command line** experience. This document contain guidelines to
|
||||||
|
follow to ensure a consistent and approachable user experience.
|
||||||
|
|
||||||
|
## Overview
|
||||||
|
|
||||||
|
`nix` command provides a single entry to a number of sub-commands that help
|
||||||
|
**developers and system administrators** in the life-cycle of a software
|
||||||
|
project. We particularly need to pay special attention to help and assist new
|
||||||
|
users of Nix.
|
||||||
|
|
||||||
|
# Naming the `COMMANDS`
|
||||||
|
|
||||||
|
Words matter. Naming is an important part of the usability. Users will be
|
||||||
|
interacting with Nix on a regular basis so we should **name things for ease of
|
||||||
|
understanding**.
|
||||||
|
|
||||||
|
We recommend following the [Principle of Least
|
||||||
|
Astonishment](https://en.wikipedia.org/wiki/Principle_of_least_astonishment).
|
||||||
|
This means that you should **never use acronyms or abbreviations** unless they
|
||||||
|
are commonly used in other tools (e.g. `nix init`). And if the command name is
|
||||||
|
too long (> 10-12 characters) then shortening it makes sense (e.g.
|
||||||
|
“prioritization” → “priority”).
|
||||||
|
|
||||||
|
Commands should **follow a noun-verb dialogue**. Although noun-verb formatting
|
||||||
|
seems backwards from a speaking perspective (i.e. `nix store copy` vs. `nix
|
||||||
|
copy store`) it allows us to organize commands the same way users think about
|
||||||
|
completing an action (the group first, then the command).
|
||||||
|
|
||||||
|
## Naming rules
|
||||||
|
|
||||||
|
Rules are there to guide you by limiting your options. But not everything can
|
||||||
|
fit the rules all the time. In those cases document the exceptions in [Appendix
|
||||||
|
1: Commands naming exceptions](#appendix-1-commands-naming-exceptions) and
|
||||||
|
provide reason. The rules want to force a Nix developer to look, not just at
|
||||||
|
the command at hand, but also the command in a full context alongside other
|
||||||
|
`nix` commands.
|
||||||
|
|
||||||
|
```shell
|
||||||
|
$ nix [<GROUP>] <COMMAND> [<ARGUMENTS>] [<OPTIONS>]
|
||||||
|
```
|
||||||
|
|
||||||
|
- `GROUP`, `COMMAND`, `ARGUMENTS` and `OPTIONS` should be lowercase and in a
|
||||||
|
singular form.
|
||||||
|
- `GROUP` should be a **NOUN**.
|
||||||
|
- `COMMAND` should be a **VERB**.
|
||||||
|
- `ARGUMENTS` and `OPTIONS` are discussed in [*Input* section](#input).
|
||||||
|
|
||||||
|
## Classification
|
||||||
|
|
||||||
|
Some commands are more important, some less. While we want all of our commands
|
||||||
|
to be perfect we can only spend limited amount of time testing and improving
|
||||||
|
them.
|
||||||
|
|
||||||
|
This classification tries to separate commands in 3 categories in terms of
|
||||||
|
their importance in regards to the new users. Users who are likely to be
|
||||||
|
impacted the most by bad user experience.
|
||||||
|
|
||||||
|
- **Main commands**
|
||||||
|
|
||||||
|
Commands used for our main use cases and most likely used by new users. We
|
||||||
|
expect attention to details, such as:
|
||||||
|
|
||||||
|
- Proper use of [colors](#colors), [emojis](#special-unicode-characters)
|
||||||
|
and [aligning of text](#text-alignment).
|
||||||
|
- [Autocomplete](#shell-completion) of options.
|
||||||
|
- Show [next possible steps](#next-steps).
|
||||||
|
- Showing some [“tips”](#educate-the-user) when running logs running tasks
|
||||||
|
(eg. building / downloading) in order to teach users interesting bits of
|
||||||
|
Nix ecosystem.
|
||||||
|
- [Help pages](#help-is-essential) to be as good as we can write them
|
||||||
|
pointing to external documentation and tutorials for more.
|
||||||
|
|
||||||
|
Examples of such commands: `nix init`, `nix develop`, `nix build`, `nix run`,
|
||||||
|
...
|
||||||
|
|
||||||
|
- **Infrequently used commands**
|
||||||
|
|
||||||
|
From infrequently used commands we expect less attention to details, but
|
||||||
|
still some:
|
||||||
|
|
||||||
|
- Proper use of [colors](#colors), [emojis](#special-unicode-characters)
|
||||||
|
and [aligning of text](#text-alignment).
|
||||||
|
- [Autocomplete](#shell-completion) of options.
|
||||||
|
|
||||||
|
Examples of such commands: `nix doctor`, `nix edit`, `nix eval`, ...
|
||||||
|
|
||||||
|
- **Utility and scripting commands**
|
||||||
|
|
||||||
|
Commands that expose certain internal functionality of `nix`, mostly used by
|
||||||
|
other scripts.
|
||||||
|
|
||||||
|
- [Autocomplete](#shell-completion) of options.
|
||||||
|
|
||||||
|
Examples of such commands: `nix store copy`, `nix hash base16`, `nix store
|
||||||
|
ping`, ...
|
||||||
|
|
||||||
|
|
||||||
|
# Help is essential
|
||||||
|
|
||||||
|
Help should be built into your command line so that new users can gradually
|
||||||
|
discover new features when they need them.
|
||||||
|
|
||||||
|
## Looking for help
|
||||||
|
|
||||||
|
Since there is no standard way how user will look for help we rely on ways help
|
||||||
|
is provided by commonly used tools. As a guide for this we took `git` and
|
||||||
|
whenever in doubt look at it as a preferred direction.
|
||||||
|
|
||||||
|
The rules are:
|
||||||
|
|
||||||
|
- Help is shown by using `--help` or `help` command (eg `nix` `--``help` or
|
||||||
|
`nix help`).
|
||||||
|
- For non-COMMANDs (eg. `nix` `--``help` and `nix store` `--``help`) we **show
|
||||||
|
a summary** of most common use cases. Summary is presented on the STDOUT
|
||||||
|
without any use of PAGER.
|
||||||
|
- For COMMANDs (eg. `nix init` `--``help` or `nix help init`) we display the
|
||||||
|
man page of that command. By default the PAGER is used (as in `git`).
|
||||||
|
- At the end of either summary or man page there should be an URL pointing to
|
||||||
|
an online version of more detailed documentation.
|
||||||
|
- The structure of summaries and man pages should be the same as in `git`.
|
||||||
|
|
||||||
|
## Anticipate where help is needed
|
||||||
|
|
||||||
|
Even better then requiring the user to search for help is to anticipate and
|
||||||
|
predict when user might need it. Either because the lack of discoverability,
|
||||||
|
typo in the input or simply taking the opportunity to teach the user of
|
||||||
|
interesting - but less visible - details.
|
||||||
|
|
||||||
|
### Shell completion
|
||||||
|
|
||||||
|
This type of help is most common and almost expected by users. We need to
|
||||||
|
**provide the best shell completion** for `bash`, `zsh` and `fish`.
|
||||||
|
|
||||||
|
Completion needs to be **context aware**, this mean when a user types:
|
||||||
|
|
||||||
|
```shell
|
||||||
|
$ nix build n<TAB>
|
||||||
|
```
|
||||||
|
|
||||||
|
we need to display a list of flakes starting with `n`.
|
||||||
|
|
||||||
|
### Wrong input
|
||||||
|
|
||||||
|
As we all know we humans make mistakes, all the time. When a typo - intentional
|
||||||
|
or unintentional - is made, we should prompt for closest possible options or
|
||||||
|
point to the documentation which would educate user to not make the same
|
||||||
|
errors. Here are few examples:
|
||||||
|
|
||||||
|
In first example we prompt the user for typing wrong command name:
|
||||||
|
|
||||||
|
|
||||||
|
```shell
|
||||||
|
$ nix int
|
||||||
|
------------------------------------------------------------------------
|
||||||
|
Error! Command `int` not found.
|
||||||
|
------------------------------------------------------------------------
|
||||||
|
Did you mean:
|
||||||
|
|> nix init
|
||||||
|
|> nix input
|
||||||
|
```
|
||||||
|
|
||||||
|
Sometimes users will make mistake either because of a typo or simply because of
|
||||||
|
lack of discoverability. Our handling of this cases needs to be context
|
||||||
|
sensitive.
|
||||||
|
|
||||||
|
|
||||||
|
```shell
|
||||||
|
$ nix init --template=template#pyton
|
||||||
|
------------------------------------------------------------------------
|
||||||
|
Error! Template `template#pyton` not found.
|
||||||
|
------------------------------------------------------------------------
|
||||||
|
Initializing Nix project at `/path/to/here`.
|
||||||
|
Select a template for you new project:
|
||||||
|
|> template#pyton
|
||||||
|
template#python-pip
|
||||||
|
template#python-poetry
|
||||||
|
```
|
||||||
|
|
||||||
|
### Next steps
|
||||||
|
|
||||||
|
It can be invaluable to newcomers to show what a possible next steps and what
|
||||||
|
is the usual development workflow with Nix. For example:
|
||||||
|
|
||||||
|
|
||||||
|
```shell
|
||||||
|
$ nix init --template=template#python
|
||||||
|
Initializing project `template#python`
|
||||||
|
in `/home/USER/dev/new-project`
|
||||||
|
|
||||||
|
Next steps
|
||||||
|
|> nix develop -- to enter development environment
|
||||||
|
|> nix build -- to build your project
|
||||||
|
```
|
||||||
|
|
||||||
|
### Educate the user
|
||||||
|
|
||||||
|
We should take any opportunity to **educate users**, but at the same time we
|
||||||
|
must **be very very careful to not annoy users**. There is a thin line between
|
||||||
|
being helpful and being annoying.
|
||||||
|
|
||||||
|
An example of educating users might be to provide *Tips* in places where they
|
||||||
|
are waiting.
|
||||||
|
|
||||||
|
```shell
|
||||||
|
$ nix build
|
||||||
|
Started building my-project 1.2.3
|
||||||
|
Downloaded python3.8-poetry 1.2.3 in 5.3 seconds
|
||||||
|
Downloaded python3.8-requests 1.2.3 in 5.3 seconds
|
||||||
|
------------------------------------------------------------------------
|
||||||
|
Press `v` to increase logs verbosity
|
||||||
|
|> `?` to see other options
|
||||||
|
------------------------------------------------------------------------
|
||||||
|
Learn something new with every build...
|
||||||
|
|> See last logs of a build with `nix log --last` command.
|
||||||
|
------------------------------------------------------------------------
|
||||||
|
Evaluated my-project 1.2.3 in 14.43 seconds
|
||||||
|
Downloading [12 / 200]
|
||||||
|
|> firefox 1.2.3 [#########> ] 10Mb/s | 2min left
|
||||||
|
Building [2 / 20]
|
||||||
|
|> glibc 1.2.3 -> buildPhase: <last log line>
|
||||||
|
------------------------------------------------------------------------
|
||||||
|
```
|
||||||
|
|
||||||
|
Now **Learn** part of the output is where you educate users. You should only
|
||||||
|
show it when you know that a build will take some time and not annoy users of
|
||||||
|
the builds that take only few seconds.
|
||||||
|
|
||||||
|
Every feature like this should go though a intensive review and testing to
|
||||||
|
collect as much a feedback as possible and to fine tune every little detail. If
|
||||||
|
done right this can be an awesome features beginners and advance users will
|
||||||
|
love, but if not done perfectly it will annoy users and leave bad impression.
|
||||||
|
|
||||||
|
# Input
|
||||||
|
|
||||||
|
Input to a command is provided via `ARGUMENTS` and `OPTIONS`.
|
||||||
|
|
||||||
|
`ARGUMENTS` represent a required input for a function. When choosing to use
|
||||||
|
`ARGUMENT` over function please be aware of the downsides that come with it:
|
||||||
|
|
||||||
|
- User will need to remember the order of `ARGUMENTS`. This is not a problem if
|
||||||
|
there is only one `ARGUMENT`.
|
||||||
|
- With `OPTIONS` it is possible to provide much better auto completion.
|
||||||
|
- With `OPTIONS` it is possible to provide much better error message.
|
||||||
|
- Using `OPTIONS` it will mean there is a little bit more typing.
|
||||||
|
|
||||||
|
We don’t discourage the use of `ARGUMENTS`, but simply want to make every
|
||||||
|
developer consider the downsides and choose wisely.
|
||||||
|
|
||||||
|
## Naming the `OPTIONS`
|
||||||
|
|
||||||
|
Then only naming convention - apart from the ones mentioned in Naming the
|
||||||
|
`COMMANDS` section is how flags are named.
|
||||||
|
|
||||||
|
Flags are a type of `OPTION` that represent an option that can be turned ON of
|
||||||
|
OFF. We can say **flags are boolean type of** `**OPTION**`.
|
||||||
|
|
||||||
|
Here are few examples of flag `OPTIONS`:
|
||||||
|
|
||||||
|
- `--colors` vs. `--no-colors` (showing colors in the output)
|
||||||
|
- `--emojis` vs. `--no-emojis` (showing emojis in the output)
|
||||||
|
|
||||||
|
## Prompt when input not provided
|
||||||
|
|
||||||
|
For *main commands* (as [per classification](#classification)) we want command
|
||||||
|
to improve the discoverability of possible input. A new user will most likely
|
||||||
|
not know which `ARGUMENTS` and `OPTIONS` are required or which values are
|
||||||
|
possible for those options.
|
||||||
|
|
||||||
|
In cases, the user might not provide the input or they provide wrong input,
|
||||||
|
rather then show the error, prompt a user with an option to find and select
|
||||||
|
correct input (see examples).
|
||||||
|
|
||||||
|
Prompting is of course not required when TTY is not attached to STDIN. This
|
||||||
|
would mean that scripts wont need to handle prompt, but rather handle errors.
|
||||||
|
|
||||||
|
A place to use prompt and provide user with interactive select
|
||||||
|
|
||||||
|
|
||||||
|
```shell
|
||||||
|
$ nix init
|
||||||
|
Initializing Nix project at `/path/to/here`.
|
||||||
|
Select a template for you new project:
|
||||||
|
|> py
|
||||||
|
template#python-pip
|
||||||
|
template#python-poetry
|
||||||
|
[ Showing 2 templates from 1345 templates ]
|
||||||
|
```
|
||||||
|
|
||||||
|
Another great place to add prompts are **confirmation dialogues for dangerous
|
||||||
|
actions**. For example when adding new substitutor via `OPTIONS` or via
|
||||||
|
`flake.nix` we should prompt - for the first time - and let user review what is
|
||||||
|
going to happen.
|
||||||
|
|
||||||
|
|
||||||
|
```shell
|
||||||
|
$ nix build --option substitutors https://cache.example.org
|
||||||
|
------------------------------------------------------------------------
|
||||||
|
Warning! A security related question need to be answered.
|
||||||
|
------------------------------------------------------------------------
|
||||||
|
The following substitutors will be used to in `my-project`:
|
||||||
|
- https://cache.example.org
|
||||||
|
|
||||||
|
Do you allow `my-project` to use above mentioned substitutors?
|
||||||
|
[y/N] |> y
|
||||||
|
```
|
||||||
|
|
||||||
|
# Output
|
||||||
|
|
||||||
|
Terminal output can be quite limiting in many ways. Which should forces us to
|
||||||
|
think about the experience even more. As with every design the output is a
|
||||||
|
compromise between being terse and being verbose, between showing help to
|
||||||
|
beginners and annoying advance users. For this it is important that we know
|
||||||
|
what are the priorities.
|
||||||
|
|
||||||
|
Nix command line should be first and foremost written with beginners in mind.
|
||||||
|
But users wont stay beginners for long and what was once useful might quickly
|
||||||
|
become annoying. There is no golden rule that we can give in this guideline
|
||||||
|
that would make it easier how to draw a line and find best compromise.
|
||||||
|
|
||||||
|
What we would encourage is to **build prototypes**, do some **user testing**
|
||||||
|
and collect **feedback**. Then repeat the cycle few times.
|
||||||
|
|
||||||
|
First design the *happy path* and only after your iron it out, continue to work
|
||||||
|
on **edge cases** (handling and displaying errors, changes of the output by
|
||||||
|
certain `OPTIONS`, etc…)
|
||||||
|
|
||||||
|
## Follow best practices
|
||||||
|
|
||||||
|
Needless to say we Nix must be a good citizen and follow best practices in
|
||||||
|
command line.
|
||||||
|
|
||||||
|
In short: **STDOUT is for output, STDERR is for (human) messaging.**
|
||||||
|
|
||||||
|
STDOUT and STDERR provide a way for you to output messages to the user while
|
||||||
|
also allowing them to redirect content to a file. For example:
|
||||||
|
|
||||||
|
```shell
|
||||||
|
$ nix build > build.txt
|
||||||
|
------------------------------------------------------------------------
|
||||||
|
Error! Atrribute `bin` missing at (1:94) from string.
|
||||||
|
------------------------------------------------------------------------
|
||||||
|
|
||||||
|
1| with import <nixpkgs> { }; (pkgs.runCommandCC or pkgs.runCommand) "shell" { buildInputs = [ (surge.bin) ]; } ""
|
||||||
|
```
|
||||||
|
|
||||||
|
Because this warning is on STDERR, it doesn’t end up in the file.
|
||||||
|
|
||||||
|
But not everything on STDERR is an error though. For example, you can run `nix
|
||||||
|
build` and collect logs in a file while still seeing the progress.
|
||||||
|
|
||||||
|
```
|
||||||
|
$ nix build > build.txt
|
||||||
|
Evaluated 1234 files in 1.2 seconds
|
||||||
|
Downloaded python3.8-poetry 1.2.3 in 5.3 seconds
|
||||||
|
Downloaded python3.8-requests 1.2.3 in 5.3 seconds
|
||||||
|
------------------------------------------------------------------------
|
||||||
|
Press `v` to increase logs verbosity
|
||||||
|
|> `?` to see other options
|
||||||
|
------------------------------------------------------------------------
|
||||||
|
Learn something new with every build...
|
||||||
|
|> See last logs of a build with `nix log --last` command.
|
||||||
|
------------------------------------------------------------------------
|
||||||
|
Evaluated my-project 1.2.3 in 14.43 seconds
|
||||||
|
Downloading [12 / 200]
|
||||||
|
|> firefox 1.2.3 [#########> ] 10Mb/s | 2min left
|
||||||
|
Building [2 / 20]
|
||||||
|
|> glibc 1.2.3 -> buildPhase: <last log line>
|
||||||
|
------------------------------------------------------------------------
|
||||||
|
```
|
||||||
|
|
||||||
|
## Errors (WIP)
|
||||||
|
|
||||||
|
**TODO**: Once we have implementation for the *happy path* then we will think
|
||||||
|
how to present errors.
|
||||||
|
|
||||||
|
## Not only for humans
|
||||||
|
|
||||||
|
Terse, machine-readable output formats can also be useful but shouldn’t get in
|
||||||
|
the way of making beautiful CLI output. When needed, commands should offer a
|
||||||
|
`--json` flag to allow users to easily parse and script the CLI.
|
||||||
|
|
||||||
|
When TTY is not detected on STDOUT we should remove all design elements (no
|
||||||
|
colors, no emojis and using ASCII instead of Unicode symbols). The same should
|
||||||
|
happen when TTY is not detected on STDERR. We should not display progress /
|
||||||
|
status section, but only print warnings and errors.
|
||||||
|
|
||||||
|
## Dialog with the user
|
||||||
|
|
||||||
|
CLIs don't always make it clear when an action has taken place. For every
|
||||||
|
action a user performs, your CLI should provide an equal and appropriate
|
||||||
|
reaction, clearly highlighting the what just happened. For example:
|
||||||
|
|
||||||
|
```shell
|
||||||
|
$ nix build
|
||||||
|
Downloaded python3.8-poetry 1.2.3 in 5.3 seconds
|
||||||
|
Downloaded python3.8-requests 1.2.3 in 5.3 seconds
|
||||||
|
...
|
||||||
|
Success! You have successfully built my-project.
|
||||||
|
$
|
||||||
|
```
|
||||||
|
|
||||||
|
Above command clearly states that command successfully completed. And in case
|
||||||
|
of `nix build`, which is a command that might take some time to complete, it is
|
||||||
|
equally important to also show that a command started.
|
||||||
|
|
||||||
|
## Text alignment
|
||||||
|
|
||||||
|
Text alignment is the number one design element that will present all of the
|
||||||
|
Nix commands as a family and not as separate tools glued together.
|
||||||
|
|
||||||
|
The format we should follow is:
|
||||||
|
|
||||||
|
```shell
|
||||||
|
$ nix COMMAND
|
||||||
|
VERB_1 NOUN and other words
|
||||||
|
VERB__1 NOUN and other words
|
||||||
|
|> Some details
|
||||||
|
```
|
||||||
|
|
||||||
|
Few rules that we can extract from above example:
|
||||||
|
|
||||||
|
- Each line should start at least with one space.
|
||||||
|
- First word should be a VERB and must be aligned to the right.
|
||||||
|
- Second word should be a NOUN and must be aligned to the left.
|
||||||
|
- If you can not find a good VERB / NOUN pair, don’t worry make it as
|
||||||
|
understandable to the user as possible.
|
||||||
|
- More details of each line can be provided by `|>` character which is serving
|
||||||
|
as the first word when aligning the text
|
||||||
|
|
||||||
|
Don’t forget you should also test your terminal output with colors and emojis
|
||||||
|
off (`--no-colors --no-emojis`).
|
||||||
|
|
||||||
|
## Dim / Bright
|
||||||
|
|
||||||
|
After comparing few terminals with different color schemes we would **recommend
|
||||||
|
to avoid using dimmed text**. The difference from the rest of the text is very
|
||||||
|
little in many terminal and color scheme combinations. Sometimes the difference
|
||||||
|
is not even notable, therefore relying on it wouldn’t make much sense.
|
||||||
|
|
||||||
|
**The bright text is much better supported** across terminals and color
|
||||||
|
schemes. Most of the time the difference is perceived as if the bright text
|
||||||
|
would be bold.
|
||||||
|
|
||||||
|
## Colors
|
||||||
|
|
||||||
|
Humans are already conditioned by society to attach certain meaning to certain
|
||||||
|
colors. While the meaning is not universal, a simple collection of colors is
|
||||||
|
used to represent basic emotions.
|
||||||
|
|
||||||
|
Colors that can be used in output
|
||||||
|
|
||||||
|
- Red = error, danger, stop
|
||||||
|
- Green = success, good
|
||||||
|
- Yellow/Orange = proceed with caution, warning, in progress
|
||||||
|
- Blue/Magenta = stability, calm
|
||||||
|
|
||||||
|
While colors are nice, when command line is used by machines (in automation
|
||||||
|
scripts) you want to remove the colors. There should be a global `--no-colors`
|
||||||
|
option that would remove the colors.
|
||||||
|
|
||||||
|
## Special (Unicode) characters
|
||||||
|
|
||||||
|
Most of the terminal have good support for Unicode characters and you should
|
||||||
|
use them in your output by default. But always have a backup solution that is
|
||||||
|
implemented only with ASCII characters and will be used when `--ascii` option
|
||||||
|
is going to be passed in. Please make sure that you test your output also
|
||||||
|
without Unicode characters
|
||||||
|
|
||||||
|
More they showing all the different Unicode characters it is important to
|
||||||
|
**establish common set of characters** that we use for certain situations.
|
||||||
|
|
||||||
|
## Emojis
|
||||||
|
|
||||||
|
Emojis help channel emotions even better than text, colors and special
|
||||||
|
characters.
|
||||||
|
|
||||||
|
We recommend **keeping the set of emojis to a minimum**. This will enable each
|
||||||
|
emoji to stand out more.
|
||||||
|
|
||||||
|
As not everybody is happy about emojis we should provide an `--no-emojis`
|
||||||
|
option to disable them. Please make sure that you test your output also without
|
||||||
|
emojis.
|
||||||
|
|
||||||
|
## Tables
|
||||||
|
|
||||||
|
All commands that are listing certain data can be implemented in some sort of a
|
||||||
|
table. It’s important that each row of your output is a single ‘entry’ of data.
|
||||||
|
Never output table borders. It’s noisy and a huge pain for parsing using other
|
||||||
|
tools such as `grep`.
|
||||||
|
|
||||||
|
Be mindful of the screen width. Only show a few columns by default with the
|
||||||
|
table header, for more the table can be manipulated by the following options:
|
||||||
|
|
||||||
|
- `--no-headers`: Show column headers by default but allow to hide them.
|
||||||
|
- `--columns`: Comma-separated list of column names to add.
|
||||||
|
- `--sort`: Allow sorting by column. Allow inverse and multi-column sort as well.
|
||||||
|
|
||||||
|
## Interactive output
|
||||||
|
|
||||||
|
Interactive output was selected to be able to strike the balance between
|
||||||
|
beginners and advance users. While the default output will target beginners it
|
||||||
|
can, with a few key strokes, be changed into and advance introspection tool.
|
||||||
|
|
||||||
|
### Progress
|
||||||
|
|
||||||
|
For longer running commands we should provide and overview of the progress.
|
||||||
|
This is shown best in `nix build` example:
|
||||||
|
|
||||||
|
```shell
|
||||||
|
$ nix build
|
||||||
|
Started building my-project 1.2.3
|
||||||
|
Downloaded python3.8-poetry 1.2.3 in 5.3 seconds
|
||||||
|
Downloaded python3.8-requests 1.2.3 in 5.3 seconds
|
||||||
|
------------------------------------------------------------------------
|
||||||
|
Press `v` to increase logs verbosity
|
||||||
|
|> `?` to see other options
|
||||||
|
------------------------------------------------------------------------
|
||||||
|
Learn something new with every build...
|
||||||
|
|> See last logs of a build with `nix log --last` command.
|
||||||
|
------------------------------------------------------------------------
|
||||||
|
Evaluated my-project 1.2.3 in 14.43 seconds
|
||||||
|
Downloading [12 / 200]
|
||||||
|
|> firefox 1.2.3 [#########> ] 10Mb/s | 2min left
|
||||||
|
Building [2 / 20]
|
||||||
|
|> glibc 1.2.3 -> buildPhase: <last log line>
|
||||||
|
------------------------------------------------------------------------
|
||||||
|
```
|
||||||
|
|
||||||
|
### Search
|
||||||
|
|
||||||
|
Use a `fzf` like fuzzy search when there are multiple options to choose from.
|
||||||
|
|
||||||
|
```shell
|
||||||
|
$ nix init
|
||||||
|
Initializing Nix project at `/path/to/here`.
|
||||||
|
Select a template for you new project:
|
||||||
|
|> py
|
||||||
|
template#python-pip
|
||||||
|
template#python-poetry
|
||||||
|
[ Showing 2 templates from 1345 templates ]
|
||||||
|
```
|
||||||
|
|
||||||
|
### Prompt
|
||||||
|
|
||||||
|
In some situations we need to prompt the user and inform the user about what is
|
||||||
|
going to happen.
|
||||||
|
|
||||||
|
```shell
|
||||||
|
$ nix build --option substitutors https://cache.example.org
|
||||||
|
------------------------------------------------------------------------
|
||||||
|
Warning! A security related question need to be answered.
|
||||||
|
------------------------------------------------------------------------
|
||||||
|
The following substitutors will be used to in `my-project`:
|
||||||
|
- https://cache.example.org
|
||||||
|
|
||||||
|
Do you allow `my-project` to use above mentioned substitutors?
|
||||||
|
[y/N] |> y
|
||||||
|
```
|
||||||
|
|
||||||
|
## Verbosity
|
||||||
|
|
||||||
|
There are many ways that you can control verbosity.
|
||||||
|
|
||||||
|
Verbosity levels are:
|
||||||
|
|
||||||
|
- `ERROR` (level 0)
|
||||||
|
- `WARN` (level 1)
|
||||||
|
- `NOTICE` (level 2)
|
||||||
|
- `INFO` (level 3)
|
||||||
|
- `TALKATIVE` (level 4)
|
||||||
|
- `CHATTY` (level 5)
|
||||||
|
- `DEBUG` (level 6)
|
||||||
|
- `VOMIT` (level 7)
|
||||||
|
|
||||||
|
The default level that the command starts is `ERROR`. The simplest way to
|
||||||
|
increase the verbosity by stacking `-v` option (eg: `-vvv == level 3 == INFO`).
|
||||||
|
There are also two shortcuts, `--debug` to run in `DEBUG` verbosity level and
|
||||||
|
`--quiet` to run in `ERROR` verbosity level.
|
||||||
|
|
||||||
|
----------
|
||||||
|
|
||||||
|
# Appendix 1: Commands naming exceptions
|
||||||
|
|
||||||
|
`nix init` and `nix repl` are well established
|
1
doc/manual/src/contributing/contributing.md
Normal file
1
doc/manual/src/contributing/contributing.md
Normal file
|
@ -0,0 +1 @@
|
||||||
|
# Contributing
|
|
@ -195,7 +195,7 @@ If you are comfortable navigating these tradeoffs, you can encrypt the
|
||||||
volume with something along the lines of:
|
volume with something along the lines of:
|
||||||
|
|
||||||
```console
|
```console
|
||||||
alice$ diskutil apfs enableFileVault /nix -user disk
|
$ diskutil apfs enableFileVault /nix -user disk
|
||||||
```
|
```
|
||||||
|
|
||||||
## Symlink the Nix store to a custom location
|
## Symlink the Nix store to a custom location
|
||||||
|
@ -234,13 +234,13 @@ as a helpful reference if you run into trouble.
|
||||||
without a reboot:
|
without a reboot:
|
||||||
|
|
||||||
```console
|
```console
|
||||||
alice$ /System/Library/Filesystems/apfs.fs/Contents/Resources/apfs.util -B
|
$ /System/Library/Filesystems/apfs.fs/Contents/Resources/apfs.util -B
|
||||||
```
|
```
|
||||||
|
|
||||||
3. Create the new APFS volume with diskutil:
|
3. Create the new APFS volume with diskutil:
|
||||||
|
|
||||||
```console
|
```console
|
||||||
alice$ sudo diskutil apfs addVolume diskX APFS 'Nix Store' -mountpoint /nix
|
$ sudo diskutil apfs addVolume diskX APFS 'Nix Store' -mountpoint /nix
|
||||||
```
|
```
|
||||||
|
|
||||||
4. Using `vifs`, add the new mount to `/etc/fstab`. If it doesn't
|
4. Using `vifs`, add the new mount to `/etc/fstab`. If it doesn't
|
||||||
|
@ -280,10 +280,10 @@ it somewhere (e.g. in `/tmp`), and then run the script named `install`
|
||||||
inside the binary tarball:
|
inside the binary tarball:
|
||||||
|
|
||||||
```console
|
```console
|
||||||
alice$ cd /tmp
|
$ cd /tmp
|
||||||
alice$ tar xfj nix-1.8-x86_64-darwin.tar.bz2
|
$ tar xfj nix-1.8-x86_64-darwin.tar.bz2
|
||||||
alice$ cd nix-1.8-x86_64-darwin
|
$ cd nix-1.8-x86_64-darwin
|
||||||
alice$ ./install
|
$ ./install
|
||||||
```
|
```
|
||||||
|
|
||||||
If you need to edit the multi-user installation script to use different
|
If you need to edit the multi-user installation script to use different
|
||||||
|
|
|
@ -18,16 +18,16 @@
|
||||||
},
|
},
|
||||||
"nixpkgs": {
|
"nixpkgs": {
|
||||||
"locked": {
|
"locked": {
|
||||||
"lastModified": 1591633336,
|
"lastModified": 1602702596,
|
||||||
"narHash": "sha256-oVXv4xAnDJB03LvZGbC72vSVlIbbJr8tpjEW5o/Fdek=",
|
"narHash": "sha256-fqJ4UgOb4ZUnCDIapDb4gCrtAah5Rnr2/At3IzMitig=",
|
||||||
"owner": "NixOS",
|
"owner": "NixOS",
|
||||||
"repo": "nixpkgs",
|
"repo": "nixpkgs",
|
||||||
"rev": "70717a337f7ae4e486ba71a500367cad697e5f09",
|
"rev": "ad0d20345219790533ebe06571f82ed6b034db31",
|
||||||
"type": "github"
|
"type": "github"
|
||||||
},
|
},
|
||||||
"original": {
|
"original": {
|
||||||
"id": "nixpkgs",
|
"id": "nixpkgs",
|
||||||
"ref": "nixos-20.03-small",
|
"ref": "nixos-20.09-small",
|
||||||
"type": "indirect"
|
"type": "indirect"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
|
179
flake.nix
179
flake.nix
|
@ -1,7 +1,7 @@
|
||||||
{
|
{
|
||||||
description = "The purely functional package manager";
|
description = "The purely functional package manager";
|
||||||
|
|
||||||
inputs.nixpkgs.url = "nixpkgs/nixos-20.03-small";
|
inputs.nixpkgs.url = "nixpkgs/nixos-20.09-small";
|
||||||
inputs.lowdown-src = { url = "github:kristapsdz/lowdown"; flake = false; };
|
inputs.lowdown-src = { url = "github:kristapsdz/lowdown"; flake = false; };
|
||||||
|
|
||||||
outputs = { self, nixpkgs, lowdown-src }:
|
outputs = { self, nixpkgs, lowdown-src }:
|
||||||
|
@ -12,11 +12,13 @@
|
||||||
versionSuffix =
|
versionSuffix =
|
||||||
if officialRelease
|
if officialRelease
|
||||||
then ""
|
then ""
|
||||||
else "pre${builtins.substring 0 8 (self.lastModifiedDate or self.lastModified)}_${self.shortRev or "dirty"}";
|
else "pre${builtins.substring 0 8 (self.lastModifiedDate or self.lastModified or "19700101")}_${self.shortRev or "dirty"}";
|
||||||
|
|
||||||
officialRelease = false;
|
officialRelease = false;
|
||||||
|
|
||||||
systems = [ "x86_64-linux" "i686-linux" "x86_64-darwin" "aarch64-linux" ];
|
linux64BitSystems = [ "x86_64-linux" "aarch64-linux" ];
|
||||||
|
linuxSystems = linux64BitSystems ++ [ "i686-linux" ];
|
||||||
|
systems = linuxSystems ++ [ "x86_64-darwin" ];
|
||||||
|
|
||||||
forAllSystems = f: nixpkgs.lib.genAttrs systems (system: f system);
|
forAllSystems = f: nixpkgs.lib.genAttrs systems (system: f system);
|
||||||
|
|
||||||
|
@ -61,30 +63,37 @@
|
||||||
"LDFLAGS=-fuse-ld=gold"
|
"LDFLAGS=-fuse-ld=gold"
|
||||||
];
|
];
|
||||||
|
|
||||||
buildDeps =
|
|
||||||
[ bison
|
|
||||||
flex
|
|
||||||
mdbook
|
|
||||||
lowdown
|
|
||||||
autoconf-archive
|
|
||||||
autoreconfHook
|
|
||||||
|
|
||||||
curl
|
nativeBuildDeps =
|
||||||
|
[
|
||||||
|
buildPackages.bison
|
||||||
|
buildPackages.flex
|
||||||
|
(lib.getBin buildPackages.lowdown)
|
||||||
|
buildPackages.mdbook
|
||||||
|
buildPackages.autoconf-archive
|
||||||
|
buildPackages.autoreconfHook
|
||||||
|
buildPackages.pkgconfig
|
||||||
|
|
||||||
|
# Tests
|
||||||
|
buildPackages.git
|
||||||
|
buildPackages.mercurial
|
||||||
|
buildPackages.jq
|
||||||
|
];
|
||||||
|
|
||||||
|
buildDeps =
|
||||||
|
[ curl
|
||||||
bzip2 xz brotli zlib editline
|
bzip2 xz brotli zlib editline
|
||||||
openssl pkgconfig sqlite
|
openssl sqlite
|
||||||
libarchive
|
libarchive
|
||||||
boost
|
boost
|
||||||
nlohmann_json
|
nlohmann_json
|
||||||
|
lowdown
|
||||||
# Tests
|
|
||||||
git
|
|
||||||
mercurial
|
|
||||||
jq
|
|
||||||
gmock
|
gmock
|
||||||
]
|
]
|
||||||
++ lib.optionals stdenv.isLinux [libseccomp utillinuxMinimal]
|
++ lib.optionals stdenv.isLinux [libseccomp utillinuxMinimal]
|
||||||
++ lib.optional (stdenv.isLinux || stdenv.isDarwin) libsodium
|
++ lib.optional (stdenv.isLinux || stdenv.isDarwin) libsodium;
|
||||||
++ lib.optional (stdenv.isLinux || stdenv.isDarwin)
|
|
||||||
|
awsDeps = lib.optional (stdenv.isLinux || stdenv.isDarwin)
|
||||||
(aws-sdk-cpp.override {
|
(aws-sdk-cpp.override {
|
||||||
apis = ["s3" "transfer"];
|
apis = ["s3" "transfer"];
|
||||||
customMemoryManagement = false;
|
customMemoryManagement = false;
|
||||||
|
@ -108,6 +117,7 @@
|
||||||
|
|
||||||
nix = with final; with commonDeps pkgs; (stdenv.mkDerivation {
|
nix = with final; with commonDeps pkgs; (stdenv.mkDerivation {
|
||||||
name = "nix-${version}";
|
name = "nix-${version}";
|
||||||
|
inherit version;
|
||||||
|
|
||||||
src = self;
|
src = self;
|
||||||
|
|
||||||
|
@ -115,7 +125,8 @@
|
||||||
|
|
||||||
outputs = [ "out" "dev" "doc" ];
|
outputs = [ "out" "dev" "doc" ];
|
||||||
|
|
||||||
buildInputs = buildDeps;
|
nativeBuildInputs = nativeBuildDeps;
|
||||||
|
buildInputs = buildDeps ++ awsDeps;
|
||||||
|
|
||||||
propagatedBuildInputs = propagatedDeps;
|
propagatedBuildInputs = propagatedDeps;
|
||||||
|
|
||||||
|
@ -159,14 +170,17 @@
|
||||||
|
|
||||||
src = self;
|
src = self;
|
||||||
|
|
||||||
|
nativeBuildInputs =
|
||||||
|
[ buildPackages.autoconf-archive
|
||||||
|
buildPackages.autoreconfHook
|
||||||
|
buildPackages.pkgconfig
|
||||||
|
];
|
||||||
|
|
||||||
buildInputs =
|
buildInputs =
|
||||||
[ autoconf-archive
|
[ nix
|
||||||
autoreconfHook
|
|
||||||
nix
|
|
||||||
curl
|
curl
|
||||||
bzip2
|
bzip2
|
||||||
xz
|
xz
|
||||||
pkgconfig
|
|
||||||
pkgs.perl
|
pkgs.perl
|
||||||
boost
|
boost
|
||||||
nlohmann_json
|
nlohmann_json
|
||||||
|
@ -197,15 +211,15 @@
|
||||||
|
|
||||||
src = lowdown-src;
|
src = lowdown-src;
|
||||||
|
|
||||||
outputs = [ "out" "dev" ];
|
outputs = [ "out" "bin" "dev" ];
|
||||||
|
|
||||||
buildInputs = [ which ];
|
nativeBuildInputs = [ which ];
|
||||||
|
|
||||||
configurePhase =
|
configurePhase =
|
||||||
''
|
''
|
||||||
./configure \
|
./configure \
|
||||||
PREFIX=${placeholder "dev"} \
|
PREFIX=${placeholder "dev"} \
|
||||||
BINDIR=${placeholder "out"}/bin
|
BINDIR=${placeholder "bin"}/bin
|
||||||
'';
|
'';
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -214,10 +228,12 @@
|
||||||
hydraJobs = {
|
hydraJobs = {
|
||||||
|
|
||||||
# Binary package for various platforms.
|
# Binary package for various platforms.
|
||||||
build = nixpkgs.lib.genAttrs systems (system: nixpkgsFor.${system}.nix);
|
build = nixpkgs.lib.genAttrs systems (system: self.packages.${system}.nix);
|
||||||
|
|
||||||
|
buildStatic = nixpkgs.lib.genAttrs linux64BitSystems (system: self.packages.${system}.nix-static);
|
||||||
|
|
||||||
# Perl bindings for various platforms.
|
# Perl bindings for various platforms.
|
||||||
perlBindings = nixpkgs.lib.genAttrs systems (system: nixpkgsFor.${system}.nix.perl-bindings);
|
perlBindings = nixpkgs.lib.genAttrs systems (system: self.packages.${system}.nix.perl-bindings);
|
||||||
|
|
||||||
# Binary tarball for various platforms, containing a Nix store
|
# Binary tarball for various platforms, containing a Nix store
|
||||||
# with the closure of 'nix' package, and the second half of
|
# with the closure of 'nix' package, and the second half of
|
||||||
|
@ -236,6 +252,7 @@
|
||||||
}
|
}
|
||||||
''
|
''
|
||||||
cp ${installerClosureInfo}/registration $TMPDIR/reginfo
|
cp ${installerClosureInfo}/registration $TMPDIR/reginfo
|
||||||
|
cp ${./scripts/create-darwin-volume.sh} $TMPDIR/create-darwin-volume.sh
|
||||||
substitute ${./scripts/install-nix-from-closure.sh} $TMPDIR/install \
|
substitute ${./scripts/install-nix-from-closure.sh} $TMPDIR/install \
|
||||||
--subst-var-by nix ${nix} \
|
--subst-var-by nix ${nix} \
|
||||||
--subst-var-by cacert ${cacert}
|
--subst-var-by cacert ${cacert}
|
||||||
|
@ -254,6 +271,7 @@
|
||||||
# SC1090: Don't worry about not being able to find
|
# SC1090: Don't worry about not being able to find
|
||||||
# $nix/etc/profile.d/nix.sh
|
# $nix/etc/profile.d/nix.sh
|
||||||
shellcheck --exclude SC1090 $TMPDIR/install
|
shellcheck --exclude SC1090 $TMPDIR/install
|
||||||
|
shellcheck $TMPDIR/create-darwin-volume.sh
|
||||||
shellcheck $TMPDIR/install-darwin-multi-user.sh
|
shellcheck $TMPDIR/install-darwin-multi-user.sh
|
||||||
shellcheck $TMPDIR/install-systemd-multi-user.sh
|
shellcheck $TMPDIR/install-systemd-multi-user.sh
|
||||||
|
|
||||||
|
@ -269,6 +287,7 @@
|
||||||
fi
|
fi
|
||||||
|
|
||||||
chmod +x $TMPDIR/install
|
chmod +x $TMPDIR/install
|
||||||
|
chmod +x $TMPDIR/create-darwin-volume.sh
|
||||||
chmod +x $TMPDIR/install-darwin-multi-user.sh
|
chmod +x $TMPDIR/install-darwin-multi-user.sh
|
||||||
chmod +x $TMPDIR/install-systemd-multi-user.sh
|
chmod +x $TMPDIR/install-systemd-multi-user.sh
|
||||||
chmod +x $TMPDIR/install-multi-user
|
chmod +x $TMPDIR/install-multi-user
|
||||||
|
@ -281,11 +300,15 @@
|
||||||
--absolute-names \
|
--absolute-names \
|
||||||
--hard-dereference \
|
--hard-dereference \
|
||||||
--transform "s,$TMPDIR/install,$dir/install," \
|
--transform "s,$TMPDIR/install,$dir/install," \
|
||||||
|
--transform "s,$TMPDIR/create-darwin-volume.sh,$dir/create-darwin-volume.sh," \
|
||||||
--transform "s,$TMPDIR/reginfo,$dir/.reginfo," \
|
--transform "s,$TMPDIR/reginfo,$dir/.reginfo," \
|
||||||
--transform "s,$NIX_STORE,$dir/store,S" \
|
--transform "s,$NIX_STORE,$dir/store,S" \
|
||||||
$TMPDIR/install $TMPDIR/install-darwin-multi-user.sh \
|
$TMPDIR/install \
|
||||||
|
$TMPDIR/create-darwin-volume.sh \
|
||||||
|
$TMPDIR/install-darwin-multi-user.sh \
|
||||||
$TMPDIR/install-systemd-multi-user.sh \
|
$TMPDIR/install-systemd-multi-user.sh \
|
||||||
$TMPDIR/install-multi-user $TMPDIR/reginfo \
|
$TMPDIR/install-multi-user \
|
||||||
|
$TMPDIR/reginfo \
|
||||||
$(cat ${installerClosureInfo}/store-paths)
|
$(cat ${installerClosureInfo}/store-paths)
|
||||||
'');
|
'');
|
||||||
|
|
||||||
|
@ -301,9 +324,26 @@
|
||||||
''
|
''
|
||||||
mkdir -p $out/nix-support
|
mkdir -p $out/nix-support
|
||||||
|
|
||||||
|
# Converts /nix/store/50p3qk8kka9dl6wyq40vydq945k0j3kv-nix-2.4pre20201102_550e11f/bin/nix
|
||||||
|
# To 50p3qk8kka9dl6wyq40vydq945k0j3kv/bin/nix
|
||||||
|
tarballPath() {
|
||||||
|
# Remove the store prefix
|
||||||
|
local path=''${1#${builtins.storeDir}/}
|
||||||
|
# Get the path relative to the derivation root
|
||||||
|
local rest=''${path#*/}
|
||||||
|
# Get the derivation hash
|
||||||
|
local drvHash=''${path%%-*}
|
||||||
|
echo "$drvHash/$rest"
|
||||||
|
}
|
||||||
|
|
||||||
substitute ${./scripts/install.in} $out/install \
|
substitute ${./scripts/install.in} $out/install \
|
||||||
${pkgs.lib.concatMapStrings
|
${pkgs.lib.concatMapStrings
|
||||||
(system: "--replace '@binaryTarball_${system}@' $(nix --experimental-features nix-command hash-file --base16 --type sha256 ${self.hydraJobs.binaryTarball.${system}}/*.tar.xz) ")
|
(system:
|
||||||
|
'' \
|
||||||
|
--replace '@tarballHash_${system}@' $(nix --experimental-features nix-command hash-file --base16 --type sha256 ${self.hydraJobs.binaryTarball.${system}}/*.tar.xz) \
|
||||||
|
--replace '@tarballPath_${system}@' $(tarballPath ${self.hydraJobs.binaryTarball.${system}}/*.tar.xz) \
|
||||||
|
''
|
||||||
|
)
|
||||||
[ "x86_64-linux" "i686-linux" "x86_64-darwin" "aarch64-linux" ]
|
[ "x86_64-linux" "i686-linux" "x86_64-darwin" "aarch64-linux" ]
|
||||||
} \
|
} \
|
||||||
--replace '@nixVersion@' ${version}
|
--replace '@nixVersion@' ${version}
|
||||||
|
@ -323,7 +363,8 @@
|
||||||
|
|
||||||
enableParallelBuilding = true;
|
enableParallelBuilding = true;
|
||||||
|
|
||||||
buildInputs = buildDeps ++ propagatedDeps;
|
nativeBuildInputs = nativeBuildDeps;
|
||||||
|
buildInputs = buildDeps ++ propagatedDeps ++ awsDeps;
|
||||||
|
|
||||||
dontInstall = false;
|
dontInstall = false;
|
||||||
|
|
||||||
|
@ -364,38 +405,6 @@
|
||||||
inherit (self) overlay;
|
inherit (self) overlay;
|
||||||
});
|
});
|
||||||
|
|
||||||
# Test whether the binary tarball works in an Ubuntu system.
|
|
||||||
tests.binaryTarball =
|
|
||||||
with nixpkgsFor.x86_64-linux;
|
|
||||||
vmTools.runInLinuxImage (runCommand "nix-binary-tarball-test"
|
|
||||||
{ diskImage = vmTools.diskImages.ubuntu1204x86_64;
|
|
||||||
}
|
|
||||||
''
|
|
||||||
set -x
|
|
||||||
useradd -m alice
|
|
||||||
su - alice -c 'tar xf ${self.hydraJobs.binaryTarball.x86_64-linux}/*.tar.*'
|
|
||||||
mkdir /dest-nix
|
|
||||||
mount -o bind /dest-nix /nix # Provide a writable /nix.
|
|
||||||
chown alice /nix
|
|
||||||
su - alice -c '_NIX_INSTALLER_TEST=1 ./nix-*/install'
|
|
||||||
su - alice -c 'nix-store --verify'
|
|
||||||
su - alice -c 'PAGER= nix-store -qR ${self.hydraJobs.build.x86_64-linux}'
|
|
||||||
|
|
||||||
# Check whether 'nix upgrade-nix' works.
|
|
||||||
cat > /tmp/paths.nix <<EOF
|
|
||||||
{
|
|
||||||
x86_64-linux = "${self.hydraJobs.build.x86_64-linux}";
|
|
||||||
}
|
|
||||||
EOF
|
|
||||||
su - alice -c 'nix --experimental-features nix-command upgrade-nix -vvv --nix-store-paths-url file:///tmp/paths.nix'
|
|
||||||
(! [ -L /home/alice/.profile-1-link ])
|
|
||||||
su - alice -c 'PAGER= nix-store -qR ${self.hydraJobs.build.x86_64-linux}'
|
|
||||||
|
|
||||||
mkdir -p $out/nix-support
|
|
||||||
touch $out/nix-support/hydra-build-products
|
|
||||||
umount /nix
|
|
||||||
'');
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
# Check whether we can still evaluate all of Nixpkgs.
|
# Check whether we can still evaluate all of Nixpkgs.
|
||||||
tests.evalNixpkgs =
|
tests.evalNixpkgs =
|
||||||
|
@ -429,6 +438,43 @@
|
||||||
|
|
||||||
packages = forAllSystems (system: {
|
packages = forAllSystems (system: {
|
||||||
inherit (nixpkgsFor.${system}) nix;
|
inherit (nixpkgsFor.${system}) nix;
|
||||||
|
} // nixpkgs.lib.optionalAttrs (builtins.elem system linux64BitSystems) {
|
||||||
|
nix-static = let
|
||||||
|
nixpkgs = nixpkgsFor.${system}.pkgsStatic;
|
||||||
|
in with commonDeps nixpkgs; nixpkgs.stdenv.mkDerivation {
|
||||||
|
name = "nix-${version}";
|
||||||
|
|
||||||
|
src = self;
|
||||||
|
|
||||||
|
VERSION_SUFFIX = versionSuffix;
|
||||||
|
|
||||||
|
outputs = [ "out" "dev" "doc" ];
|
||||||
|
|
||||||
|
nativeBuildInputs = nativeBuildDeps;
|
||||||
|
buildInputs = buildDeps ++ propagatedDeps;
|
||||||
|
|
||||||
|
configureFlags = [ "--sysconfdir=/etc" ];
|
||||||
|
|
||||||
|
enableParallelBuilding = true;
|
||||||
|
|
||||||
|
makeFlags = "profiledir=$(out)/etc/profile.d";
|
||||||
|
|
||||||
|
doCheck = true;
|
||||||
|
|
||||||
|
installFlags = "sysconfdir=$(out)/etc";
|
||||||
|
|
||||||
|
postInstall = ''
|
||||||
|
mkdir -p $doc/nix-support
|
||||||
|
echo "doc manual $doc/share/doc/nix/manual" >> $doc/nix-support/hydra-build-products
|
||||||
|
mkdir -p $out/nix-support
|
||||||
|
echo "file binary-dist $out/bin/nix" >> $out/nix-support/hydra-build-products
|
||||||
|
'';
|
||||||
|
|
||||||
|
doInstallCheck = true;
|
||||||
|
installCheckFlags = "sysconfdir=$(out)/etc";
|
||||||
|
|
||||||
|
stripAllList = ["bin"];
|
||||||
|
};
|
||||||
});
|
});
|
||||||
|
|
||||||
defaultPackage = forAllSystems (system: self.packages.${system}.nix);
|
defaultPackage = forAllSystems (system: self.packages.${system}.nix);
|
||||||
|
@ -442,7 +488,8 @@
|
||||||
|
|
||||||
outputs = [ "out" "dev" "doc" ];
|
outputs = [ "out" "dev" "doc" ];
|
||||||
|
|
||||||
buildInputs = buildDeps ++ propagatedDeps ++ perlDeps;
|
nativeBuildInputs = nativeBuildDeps;
|
||||||
|
buildInputs = buildDeps ++ propagatedDeps ++ awsDeps ++ perlDeps;
|
||||||
|
|
||||||
inherit configureFlags;
|
inherit configureFlags;
|
||||||
|
|
||||||
|
|
6
local.mk
6
local.mk
|
@ -1,9 +1,3 @@
|
||||||
ifeq ($(MAKECMDGOALS), dist)
|
|
||||||
dist-files += $(shell cat .dist-files)
|
|
||||||
endif
|
|
||||||
|
|
||||||
dist-files += configure config.h.in perl/configure
|
|
||||||
|
|
||||||
clean-files += Makefile.config
|
clean-files += Makefile.config
|
||||||
|
|
||||||
GLOBAL_CXXFLAGS += -Wno-deprecated-declarations
|
GLOBAL_CXXFLAGS += -Wno-deprecated-declarations
|
||||||
|
|
|
@ -2,4 +2,6 @@ ifeq ($(OS), Linux)
|
||||||
|
|
||||||
$(foreach n, nix-daemon.socket nix-daemon.service, $(eval $(call install-file-in, $(d)/$(n), $(prefix)/lib/systemd/system, 0644)))
|
$(foreach n, nix-daemon.socket nix-daemon.service, $(eval $(call install-file-in, $(d)/$(n), $(prefix)/lib/systemd/system, 0644)))
|
||||||
|
|
||||||
|
clean-files += $(d)/nix-daemon.socket $(d)/nix-daemon.service
|
||||||
|
|
||||||
endif
|
endif
|
||||||
|
|
|
@ -2,4 +2,6 @@ ifeq ($(OS), Linux)
|
||||||
|
|
||||||
$(foreach n, nix-daemon.conf, $(eval $(call install-file-in, $(d)/$(n), $(sysconfdir)/init, 0644)))
|
$(foreach n, nix-daemon.conf, $(eval $(call install-file-in, $(d)/$(n), $(sysconfdir)/init, 0644)))
|
||||||
|
|
||||||
|
clean-files += $(d)/nix-daemon.conf
|
||||||
|
|
||||||
endif
|
endif
|
||||||
|
|
17
mk/dist.mk
17
mk/dist.mk
|
@ -1,17 +0,0 @@
|
||||||
ifdef PACKAGE_NAME
|
|
||||||
|
|
||||||
dist-name = $(PACKAGE_NAME)-$(PACKAGE_VERSION)
|
|
||||||
|
|
||||||
dist: $(dist-name).tar.bz2 $(dist-name).tar.xz
|
|
||||||
|
|
||||||
$(dist-name).tar.bz2: $(dist-files)
|
|
||||||
$(trace-gen) tar cfj $@ $(sort $(dist-files)) --transform 's,^,$(dist-name)/,'
|
|
||||||
|
|
||||||
$(dist-name).tar.xz: $(dist-files)
|
|
||||||
$(trace-gen) tar cfJ $@ $(sort $(dist-files)) --transform 's,^,$(dist-name)/,'
|
|
||||||
|
|
||||||
clean-files += $(dist-name).tar.bz2 $(dist-name).tar.xz
|
|
||||||
|
|
||||||
print-top-help += echo " dist: Generate a source distribution";
|
|
||||||
|
|
||||||
endif
|
|
|
@ -10,7 +10,6 @@ bin-scripts :=
|
||||||
noinst-scripts :=
|
noinst-scripts :=
|
||||||
man-pages :=
|
man-pages :=
|
||||||
install-tests :=
|
install-tests :=
|
||||||
dist-files :=
|
|
||||||
OS = $(shell uname -s)
|
OS = $(shell uname -s)
|
||||||
|
|
||||||
|
|
||||||
|
@ -112,9 +111,6 @@ $(foreach test, $(install-tests), $(eval $(call run-install-test,$(test))))
|
||||||
$(foreach file, $(man-pages), $(eval $(call install-data-in, $(file), $(mandir)/man$(patsubst .%,%,$(suffix $(file))))))
|
$(foreach file, $(man-pages), $(eval $(call install-data-in, $(file), $(mandir)/man$(patsubst .%,%,$(suffix $(file))))))
|
||||||
|
|
||||||
|
|
||||||
include mk/dist.mk
|
|
||||||
|
|
||||||
|
|
||||||
.PHONY: default all man help
|
.PHONY: default all man help
|
||||||
|
|
||||||
all: $(programs-list) $(libs-list) $(jars-list) $(man-pages)
|
all: $(programs-list) $(libs-list) $(jars-list) $(man-pages)
|
||||||
|
|
|
@ -159,5 +159,4 @@ define build-library
|
||||||
libs-list += $$($(1)_PATH)
|
libs-list += $$($(1)_PATH)
|
||||||
endif
|
endif
|
||||||
clean-files += $$(_d)/*.a $$(_d)/*.$(SO_EXT) $$(_d)/*.o $$(_d)/.*.dep $$($(1)_DEPS) $$($(1)_OBJS)
|
clean-files += $$(_d)/*.a $$(_d)/*.$(SO_EXT) $$(_d)/*.o $$(_d)/.*.dep $$($(1)_DEPS) $$($(1)_OBJS)
|
||||||
dist-files += $$(_srcs)
|
|
||||||
endef
|
endef
|
||||||
|
|
|
@ -79,7 +79,6 @@ define build-program
|
||||||
|
|
||||||
programs-list += $$($(1)_PATH)
|
programs-list += $$($(1)_PATH)
|
||||||
clean-files += $$($(1)_PATH) $$(_d)/*.o $$(_d)/.*.dep $$($(1)_DEPS) $$($(1)_OBJS)
|
clean-files += $$($(1)_PATH) $$(_d)/*.o $$(_d)/.*.dep $$($(1)_DEPS) $$($(1)_OBJS)
|
||||||
dist-files += $$(_srcs)
|
|
||||||
|
|
||||||
# Phony target to run this program (typically as a dependency of 'check').
|
# Phony target to run this program (typically as a dependency of 'check').
|
||||||
.PHONY: $(1)_RUN
|
.PHONY: $(1)_RUN
|
||||||
|
|
|
@ -30,8 +30,6 @@ ifeq ($(OS), Darwin)
|
||||||
install_name_tool -id $@ $@
|
install_name_tool -id $@ $@
|
||||||
endif
|
endif
|
||||||
|
|
||||||
dist-files += $(d)/vendor
|
|
||||||
|
|
||||||
clean: clean-rust
|
clean: clean-rust
|
||||||
|
|
||||||
clean-rust:
|
clean-rust:
|
||||||
|
|
|
@ -110,10 +110,14 @@ SV * queryPathInfo(char * path, int base32)
|
||||||
XPUSHs(sv_2mortal(newSVpv(s.c_str(), 0)));
|
XPUSHs(sv_2mortal(newSVpv(s.c_str(), 0)));
|
||||||
mXPUSHi(info->registrationTime);
|
mXPUSHi(info->registrationTime);
|
||||||
mXPUSHi(info->narSize);
|
mXPUSHi(info->narSize);
|
||||||
AV * arr = newAV();
|
AV * refs = newAV();
|
||||||
for (auto & i : info->references)
|
for (auto & i : info->references)
|
||||||
av_push(arr, newSVpv(store()->printStorePath(i).c_str(), 0));
|
av_push(refs, newSVpv(store()->printStorePath(i).c_str(), 0));
|
||||||
XPUSHs(sv_2mortal(newRV((SV *) arr)));
|
XPUSHs(sv_2mortal(newRV((SV *) refs)));
|
||||||
|
AV * sigs = newAV();
|
||||||
|
for (auto & i : info->sigs)
|
||||||
|
av_push(sigs, newSVpv(i.c_str(), 0));
|
||||||
|
XPUSHs(sv_2mortal(newRV((SV *) sigs)));
|
||||||
} catch (Error & e) {
|
} catch (Error & e) {
|
||||||
croak("%s", e.what());
|
croak("%s", e.what());
|
||||||
}
|
}
|
||||||
|
|
|
@ -5,42 +5,13 @@ root_disk() {
|
||||||
diskutil info -plist /
|
diskutil info -plist /
|
||||||
}
|
}
|
||||||
|
|
||||||
apfs_volumes_for() {
|
# i.e., "disk1"
|
||||||
disk=$1
|
root_disk_identifier() {
|
||||||
diskutil apfs list -plist "$disk"
|
diskutil info -plist / | xmllint --xpath "/plist/dict/key[text()='ParentWholeDisk']/following-sibling::string[1]/text()" -
|
||||||
}
|
|
||||||
|
|
||||||
disk_identifier() {
|
|
||||||
xpath "/plist/dict/key[text()='ParentWholeDisk']/following-sibling::string[1]/text()" 2>/dev/null
|
|
||||||
}
|
|
||||||
|
|
||||||
volume_list_true() {
|
|
||||||
key=$1
|
|
||||||
xpath "/plist/dict/array/dict/key[text()='Volumes']/following-sibling::array/dict/key[text()='$key']/following-sibling::true[1]" 2> /dev/null
|
|
||||||
}
|
|
||||||
|
|
||||||
volume_get_string() {
|
|
||||||
key=$1 i=$2
|
|
||||||
xpath "/plist/dict/array/dict/key[text()='Volumes']/following-sibling::array/dict[$i]/key[text()='$key']/following-sibling::string[1]/text()" 2> /dev/null
|
|
||||||
}
|
}
|
||||||
|
|
||||||
find_nix_volume() {
|
find_nix_volume() {
|
||||||
disk=$1
|
diskutil apfs list -plist "$1" | xmllint --xpath "(/plist/dict/array/dict/key[text()='Volumes']/following-sibling::array/dict/key[text()='Name']/following-sibling::string[starts-with(translate(text(),'N','n'),'nix')]/text())[1]" - 2>/dev/null || true
|
||||||
i=1
|
|
||||||
volumes=$(apfs_volumes_for "$disk")
|
|
||||||
while true; do
|
|
||||||
name=$(echo "$volumes" | volume_get_string "Name" "$i")
|
|
||||||
if [ -z "$name" ]; then
|
|
||||||
break
|
|
||||||
fi
|
|
||||||
case "$name" in
|
|
||||||
[Nn]ix*)
|
|
||||||
echo "$name"
|
|
||||||
break
|
|
||||||
;;
|
|
||||||
esac
|
|
||||||
i=$((i+1))
|
|
||||||
done
|
|
||||||
}
|
}
|
||||||
|
|
||||||
test_fstab() {
|
test_fstab() {
|
||||||
|
@ -55,6 +26,20 @@ test_synthetic_conf() {
|
||||||
grep -q "^nix$" /etc/synthetic.conf 2>/dev/null
|
grep -q "^nix$" /etc/synthetic.conf 2>/dev/null
|
||||||
}
|
}
|
||||||
|
|
||||||
|
# Create the paths defined in synthetic.conf, saving us a reboot.
|
||||||
|
create_synthetic_objects(){
|
||||||
|
# Big Sur takes away the -B flag we were using and replaces it
|
||||||
|
# with a -t flag that appears to do the same thing (but they
|
||||||
|
# don't behave exactly the same way in terms of return values).
|
||||||
|
# This feels a little dirty, but as far as I can tell the
|
||||||
|
# simplest way to get the right one is to just throw away stderr
|
||||||
|
# and call both... :]
|
||||||
|
{
|
||||||
|
/System/Library/Filesystems/apfs.fs/Contents/Resources/apfs.util -t || true # Big Sur
|
||||||
|
/System/Library/Filesystems/apfs.fs/Contents/Resources/apfs.util -B || true # Catalina
|
||||||
|
} >/dev/null 2>&1
|
||||||
|
}
|
||||||
|
|
||||||
test_nix() {
|
test_nix() {
|
||||||
test -d "/nix"
|
test -d "/nix"
|
||||||
}
|
}
|
||||||
|
@ -89,9 +74,7 @@ test_t2_chip_present(){
|
||||||
}
|
}
|
||||||
|
|
||||||
test_filevault_in_use() {
|
test_filevault_in_use() {
|
||||||
disk=$1
|
fdesetup isactive >/dev/null
|
||||||
# list vols on disk | get value of Filevault key | value is true
|
|
||||||
apfs_volumes_for "$disk" | volume_list_true FileVault | grep -q true
|
|
||||||
}
|
}
|
||||||
|
|
||||||
# use after error msg for conditions we don't understand
|
# use after error msg for conditions we don't understand
|
||||||
|
@ -132,7 +115,7 @@ main() {
|
||||||
|
|
||||||
if ! test_nix; then
|
if ! test_nix; then
|
||||||
echo "Creating mountpoint for /nix..." >&2
|
echo "Creating mountpoint for /nix..." >&2
|
||||||
/System/Library/Filesystems/apfs.fs/Contents/Resources/apfs.util -B || true
|
create_synthetic_objects # the ones we defined in synthetic.conf
|
||||||
if ! test_nix; then
|
if ! test_nix; then
|
||||||
sudo mkdir -p /nix 2>/dev/null || true
|
sudo mkdir -p /nix 2>/dev/null || true
|
||||||
fi
|
fi
|
||||||
|
@ -143,12 +126,12 @@ main() {
|
||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
|
|
||||||
disk=$(root_disk | disk_identifier)
|
disk="$(root_disk_identifier)"
|
||||||
volume=$(find_nix_volume "$disk")
|
volume=$(find_nix_volume "$disk")
|
||||||
if [ -z "$volume" ]; then
|
if [ -z "$volume" ]; then
|
||||||
echo "Creating a Nix Store volume..." >&2
|
echo "Creating a Nix Store volume..." >&2
|
||||||
|
|
||||||
if test_filevault_in_use "$disk"; then
|
if test_filevault_in_use; then
|
||||||
# TODO: Not sure if it's in-scope now, but `diskutil apfs list`
|
# TODO: Not sure if it's in-scope now, but `diskutil apfs list`
|
||||||
# shows both filevault and encrypted at rest status, and it
|
# shows both filevault and encrypted at rest status, and it
|
||||||
# may be the more semantic way to test for this? It'll show
|
# may be the more semantic way to test for this? It'll show
|
||||||
|
@ -178,6 +161,7 @@ main() {
|
||||||
if ! test_fstab; then
|
if ! test_fstab; then
|
||||||
echo "Configuring /etc/fstab..." >&2
|
echo "Configuring /etc/fstab..." >&2
|
||||||
label=$(echo "$volume" | sed 's/ /\\040/g')
|
label=$(echo "$volume" | sed 's/ /\\040/g')
|
||||||
|
# shellcheck disable=SC2209
|
||||||
printf "\$a\nLABEL=%s /nix apfs rw,nobrowse\n.\nwq\n" "$label" | EDITOR=ed sudo vifs
|
printf "\$a\nLABEL=%s /nix apfs rw,nobrowse\n.\nwq\n" "$label" | EDITOR=ed sudo vifs
|
||||||
fi
|
fi
|
||||||
}
|
}
|
||||||
|
|
|
@ -37,6 +37,13 @@ poly_service_setup_note() {
|
||||||
EOF
|
EOF
|
||||||
}
|
}
|
||||||
|
|
||||||
|
poly_extra_try_me_commands(){
|
||||||
|
:
|
||||||
|
}
|
||||||
|
poly_extra_setup_instructions(){
|
||||||
|
:
|
||||||
|
}
|
||||||
|
|
||||||
poly_configure_nix_daemon_service() {
|
poly_configure_nix_daemon_service() {
|
||||||
_sudo "to set up the nix-daemon as a LaunchDaemon" \
|
_sudo "to set up the nix-daemon as a LaunchDaemon" \
|
||||||
cp -f "/nix/var/nix/profiles/default$PLIST_DEST" "$PLIST_DEST"
|
cp -f "/nix/var/nix/profiles/default$PLIST_DEST" "$PLIST_DEST"
|
||||||
|
|
|
@ -71,11 +71,9 @@ uninstall_directions() {
|
||||||
subheader "Uninstalling nix:"
|
subheader "Uninstalling nix:"
|
||||||
local step=0
|
local step=0
|
||||||
|
|
||||||
if [ -e /run/systemd/system ] && poly_service_installed_check; then
|
if poly_service_installed_check; then
|
||||||
step=$((step + 1))
|
step=$((step + 1))
|
||||||
poly_service_uninstall_directions "$step"
|
poly_service_uninstall_directions "$step"
|
||||||
else
|
|
||||||
step=$((step + 1))
|
|
||||||
fi
|
fi
|
||||||
|
|
||||||
for profile_target in "${PROFILE_TARGETS[@]}"; do
|
for profile_target in "${PROFILE_TARGETS[@]}"; do
|
||||||
|
@ -255,40 +253,20 @@ function finish_success {
|
||||||
echo "To try again later, run \"sudo -i nix-channel --update nixpkgs\"."
|
echo "To try again later, run \"sudo -i nix-channel --update nixpkgs\"."
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [ -e /run/systemd/system ]; then
|
|
||||||
cat <<EOF
|
cat <<EOF
|
||||||
|
|
||||||
Before Nix will work in your existing shells, you'll need to close
|
Before Nix will work in your existing shells, you'll need to close
|
||||||
them and open them again. Other than that, you should be ready to go.
|
them and open them again. Other than that, you should be ready to go.
|
||||||
|
|
||||||
Try it! Open a new terminal, and type:
|
Try it! Open a new terminal, and type:
|
||||||
|
$(poly_extra_try_me_commands)
|
||||||
$ nix-shell -p nix-info --run "nix-info -m"
|
$ nix-shell -p nix-info --run "nix-info -m"
|
||||||
|
$(poly_extra_setup_instructions)
|
||||||
Thank you for using this installer. If you have any feedback, don't
|
Thank you for using this installer. If you have any feedback, don't
|
||||||
hesitate:
|
hesitate:
|
||||||
|
|
||||||
$(contactme)
|
$(contactme)
|
||||||
EOF
|
EOF
|
||||||
else
|
|
||||||
cat <<EOF
|
|
||||||
|
|
||||||
Before Nix will work in your existing shells, you'll need to close
|
|
||||||
them and open them again. Other than that, you should be ready to go.
|
|
||||||
|
|
||||||
Try it! Open a new terminal, and type:
|
|
||||||
|
|
||||||
$ sudo nix-daemon
|
|
||||||
$ nix-shell -p nix-info --run "nix-info -m"
|
|
||||||
|
|
||||||
Additionally, you may want to add nix-daemon to your init-system.
|
|
||||||
|
|
||||||
Thank you for using this installer. If you have any feedback, don't
|
|
||||||
hesitate:
|
|
||||||
|
|
||||||
$(contactme)
|
|
||||||
EOF
|
|
||||||
fi
|
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -630,24 +608,20 @@ EOF
|
||||||
}
|
}
|
||||||
|
|
||||||
configure_shell_profile() {
|
configure_shell_profile() {
|
||||||
# If there is an /etc/profile.d directory, we want to ensure there
|
|
||||||
# is a nix.sh within it, so we can use the following loop to add
|
|
||||||
# the source lines to it. Note that I'm _not_ adding the source
|
|
||||||
# lines here, because we want to be using the regular machinery.
|
|
||||||
#
|
|
||||||
# If we go around that machinery, it becomes more complicated and
|
|
||||||
# adds complications to the uninstall instruction generator and
|
|
||||||
# old instruction sniffer as well.
|
|
||||||
if [ -d /etc/profile.d ]; then
|
|
||||||
_sudo "create a stub /etc/profile.d/nix.sh which will be updated" \
|
|
||||||
touch /etc/profile.d/nix.sh
|
|
||||||
fi
|
|
||||||
|
|
||||||
for profile_target in "${PROFILE_TARGETS[@]}"; do
|
for profile_target in "${PROFILE_TARGETS[@]}"; do
|
||||||
if [ -e "$profile_target" ]; then
|
if [ -e "$profile_target" ]; then
|
||||||
_sudo "to back up your current $profile_target to $profile_target$PROFILE_BACKUP_SUFFIX" \
|
_sudo "to back up your current $profile_target to $profile_target$PROFILE_BACKUP_SUFFIX" \
|
||||||
cp "$profile_target" "$profile_target$PROFILE_BACKUP_SUFFIX"
|
cp "$profile_target" "$profile_target$PROFILE_BACKUP_SUFFIX"
|
||||||
|
else
|
||||||
|
# try to create the file if its directory exists
|
||||||
|
target_dir="$(dirname "$profile_target")"
|
||||||
|
if [ -d "$target_dir" ]; then
|
||||||
|
_sudo "to create a stub $profile_target which will be updated" \
|
||||||
|
touch "$profile_target"
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ -e "$profile_target" ]; then
|
||||||
shell_source_lines \
|
shell_source_lines \
|
||||||
| _sudo "extend your $profile_target with nix-daemon settings" \
|
| _sudo "extend your $profile_target with nix-daemon settings" \
|
||||||
tee -a "$profile_target"
|
tee -a "$profile_target"
|
||||||
|
@ -725,9 +699,7 @@ main() {
|
||||||
setup_default_profile
|
setup_default_profile
|
||||||
place_nix_configuration
|
place_nix_configuration
|
||||||
|
|
||||||
if [ -e /run/systemd/system ]; then
|
|
||||||
poly_configure_nix_daemon_service
|
poly_configure_nix_daemon_service
|
||||||
fi
|
|
||||||
|
|
||||||
trap finish_success EXIT
|
trap finish_success EXIT
|
||||||
}
|
}
|
||||||
|
|
|
@ -26,9 +26,20 @@ fi
|
||||||
|
|
||||||
# macOS support for 10.12.6 or higher
|
# macOS support for 10.12.6 or higher
|
||||||
if [ "$(uname -s)" = "Darwin" ]; then
|
if [ "$(uname -s)" = "Darwin" ]; then
|
||||||
macos_major=$(sw_vers -productVersion | cut -d '.' -f 2)
|
IFS='.' read macos_major macos_minor macos_patch << EOF
|
||||||
macos_minor=$(sw_vers -productVersion | cut -d '.' -f 3)
|
$(sw_vers -productVersion)
|
||||||
if [ "$macos_major" -lt 12 ] || { [ "$macos_major" -eq 12 ] && [ "$macos_minor" -lt 6 ]; }; then
|
EOF
|
||||||
|
# TODO: this is a temporary speed-bump to keep people from naively installing Nix
|
||||||
|
# on macOS Big Sur (11.0+, 10.16+) until nixpkgs updates are ready for them.
|
||||||
|
# *Ideally* this is gone before next Nix release. If you're intentionally working on
|
||||||
|
# Nix + Big Sur, just comment out this block and be on your way :)
|
||||||
|
if [ "$macos_major" -gt 10 ] || { [ "$macos_major" -eq 10 ] && [ "$macos_minor" -gt 15 ]; }; then
|
||||||
|
echo "$0: nixpkgs isn't quite ready to support macOS $(sw_vers -productVersion) yet"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ "$macos_major" -lt 10 ] || { [ "$macos_major" -eq 10 ] && [ "$macos_minor" -lt 12 ]; } || { [ "$macos_minor" -eq 12 ] && [ "$macos_patch" -lt 6 ]; }; then
|
||||||
|
# patch may not be present; command substitution for simplicity
|
||||||
echo "$0: macOS $(sw_vers -productVersion) is not supported, upgrade to 10.12.6 or higher"
|
echo "$0: macOS $(sw_vers -productVersion) is not supported, upgrade to 10.12.6 or higher"
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
@ -87,10 +98,13 @@ while [ $# -gt 0 ]; do
|
||||||
echo ""
|
echo ""
|
||||||
echo " --nix-extra-conf-file: Path to nix.conf to prepend when installing /etc/nix.conf"
|
echo " --nix-extra-conf-file: Path to nix.conf to prepend when installing /etc/nix.conf"
|
||||||
echo ""
|
echo ""
|
||||||
|
if [ -n "${INVOKED_FROM_INSTALL_IN:-}" ]; then
|
||||||
|
echo " --tarball-url-prefix URL: Base URL to download the Nix tarball from."
|
||||||
|
fi
|
||||||
) >&2
|
) >&2
|
||||||
|
|
||||||
# darwin and Catalina+
|
# darwin and Catalina+
|
||||||
if [ "$(uname -s)" = "Darwin" ] && [ "$macos_major" -gt 14 ]; then
|
if [ "$(uname -s)" = "Darwin" ] && { [ "$macos_major" -gt 10 ] || { [ "$macos_major" -eq 10 ] && [ "$macos_minor" -gt 14 ]; }; }; then
|
||||||
(
|
(
|
||||||
echo " --darwin-use-unencrypted-nix-store-volume: Create an APFS volume for the Nix"
|
echo " --darwin-use-unencrypted-nix-store-volume: Create an APFS volume for the Nix"
|
||||||
echo " store and mount it at /nix. This is the recommended way to create"
|
echo " store and mount it at /nix. This is the recommended way to create"
|
||||||
|
@ -110,8 +124,8 @@ if [ "$(uname -s)" = "Darwin" ]; then
|
||||||
"$self/create-darwin-volume.sh"
|
"$self/create-darwin-volume.sh"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
info=$(diskutil info -plist / | xpath "/plist/dict/key[text()='Writable']/following-sibling::true[1]" 2> /dev/null)
|
writable="$(diskutil info -plist / | xmllint --xpath "name(/plist/dict/key[text()='Writable']/following-sibling::*[1])" -)"
|
||||||
if ! [ -e $dest ] && [ -n "$info" ] && [ "$macos_major" -gt 14 ]; then
|
if ! [ -e $dest ] && [ "$writable" = "false" ]; then
|
||||||
(
|
(
|
||||||
echo ""
|
echo ""
|
||||||
echo "Installing on macOS >=10.15 requires relocating the store to an apfs volume."
|
echo "Installing on macOS >=10.15 requires relocating the store to an apfs volume."
|
||||||
|
|
|
@ -72,7 +72,28 @@ poly_service_setup_note() {
|
||||||
EOF
|
EOF
|
||||||
}
|
}
|
||||||
|
|
||||||
|
poly_extra_try_me_commands(){
|
||||||
|
if [ -e /run/systemd/system ]; then
|
||||||
|
:
|
||||||
|
else
|
||||||
|
cat <<EOF
|
||||||
|
$ sudo nix-daemon
|
||||||
|
EOF
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
poly_extra_setup_instructions(){
|
||||||
|
if [ -e /run/systemd/system ]; then
|
||||||
|
:
|
||||||
|
else
|
||||||
|
cat <<EOF
|
||||||
|
Additionally, you may want to add nix-daemon to your init-system.
|
||||||
|
|
||||||
|
EOF
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
poly_configure_nix_daemon_service() {
|
poly_configure_nix_daemon_service() {
|
||||||
|
if [ -e /run/systemd/system ]; then
|
||||||
_sudo "to set up the nix-daemon service" \
|
_sudo "to set up the nix-daemon service" \
|
||||||
systemctl link "/nix/var/nix/profiles/default$SERVICE_SRC"
|
systemctl link "/nix/var/nix/profiles/default$SERVICE_SRC"
|
||||||
|
|
||||||
|
@ -89,7 +110,7 @@ poly_configure_nix_daemon_service() {
|
||||||
|
|
||||||
_sudo "to start the nix-daemon.service" \
|
_sudo "to start the nix-daemon.service" \
|
||||||
systemctl restart nix-daemon.service
|
systemctl restart nix-daemon.service
|
||||||
|
fi
|
||||||
}
|
}
|
||||||
|
|
||||||
poly_group_exists() {
|
poly_group_exists() {
|
||||||
|
|
49
scripts/install.in
Normal file → Executable file
49
scripts/install.in
Normal file → Executable file
|
@ -25,16 +25,52 @@ require_util() {
|
||||||
}
|
}
|
||||||
|
|
||||||
case "$(uname -s).$(uname -m)" in
|
case "$(uname -s).$(uname -m)" in
|
||||||
Linux.x86_64) system=x86_64-linux; hash=@binaryTarball_x86_64-linux@;;
|
Linux.x86_64)
|
||||||
Linux.i?86) system=i686-linux; hash=@binaryTarball_i686-linux@;;
|
hash=@tarballHash_x86_64-linux@
|
||||||
Linux.aarch64) system=aarch64-linux; hash=@binaryTarball_aarch64-linux@;;
|
path=@tarballPath_x86_64-linux@
|
||||||
Darwin.x86_64) system=x86_64-darwin; hash=@binaryTarball_x86_64-darwin@;;
|
system=x86_64-linux
|
||||||
|
;;
|
||||||
|
Linux.i?86)
|
||||||
|
hash=@tarballHash_i686-linux@
|
||||||
|
path=@tarballPath_i686-linux@
|
||||||
|
system=i686-linux
|
||||||
|
;;
|
||||||
|
Linux.aarch64)
|
||||||
|
hash=@tarballHash_aarch64-linux@
|
||||||
|
path=@tarballPath_aarch64-linux@
|
||||||
|
system=aarch64-linux
|
||||||
|
;;
|
||||||
|
Darwin.x86_64)
|
||||||
|
hash=@tarballHash_x86_64-darwin@
|
||||||
|
path=@tarballPath_x86_64-darwin@
|
||||||
|
system=x86_64-darwin
|
||||||
|
;;
|
||||||
|
Darwin.arm64|Darwin.aarch64)
|
||||||
|
# check for Rosetta 2 support
|
||||||
|
if ! [ -f /Library/Apple/System/Library/LaunchDaemons/com.apple.oahd.plist ]; then
|
||||||
|
oops "Rosetta 2 is not installed on this ARM64 macOS machine. Run softwareupdate --install-rosetta then restart installation"
|
||||||
|
fi
|
||||||
|
|
||||||
|
hash=@binaryTarball_x86_64-darwin@
|
||||||
|
path=@tarballPath_x86_64-darwin@
|
||||||
|
# eventually maybe: aarch64-darwin
|
||||||
|
system=x86_64-darwin
|
||||||
|
;;
|
||||||
*) oops "sorry, there is no binary distribution of Nix for your platform";;
|
*) oops "sorry, there is no binary distribution of Nix for your platform";;
|
||||||
esac
|
esac
|
||||||
|
|
||||||
url="https://releases.nixos.org/nix/nix-@nixVersion@/nix-@nixVersion@-$system.tar.xz"
|
# Use this command-line option to fetch the tarballs using nar-serve or Cachix
|
||||||
|
if "${1:---tarball-url-prefix}"; then
|
||||||
|
if [ -z "${2:-}" ]; then
|
||||||
|
oops "missing argument for --tarball-url-prefix"
|
||||||
|
fi
|
||||||
|
url=${2}/${path}
|
||||||
|
shift 2
|
||||||
|
else
|
||||||
|
url=https://releases.nixos.org/nix/nix-@nixVersion@/nix-@nixVersion@-$system.tar.xz
|
||||||
|
fi
|
||||||
|
|
||||||
tarball="$tmpDir/$(basename "$tmpDir/nix-@nixVersion@-$system.tar.xz")"
|
tarball=$tmpDir/nix-@nixVersion@-$system.tar.xz
|
||||||
|
|
||||||
require_util curl "download the binary tarball"
|
require_util curl "download the binary tarball"
|
||||||
require_util tar "unpack the binary tarball"
|
require_util tar "unpack the binary tarball"
|
||||||
|
@ -66,6 +102,7 @@ tar -xJf "$tarball" -C "$unpack" || oops "failed to unpack '$url'"
|
||||||
script=$(echo "$unpack"/*/install)
|
script=$(echo "$unpack"/*/install)
|
||||||
|
|
||||||
[ -e "$script" ] || oops "installation script is missing from the binary tarball!"
|
[ -e "$script" ] || oops "installation script is missing from the binary tarball!"
|
||||||
|
export INVOKED_FROM_INSTALL_IN=1
|
||||||
"$script" "$@"
|
"$script" "$@"
|
||||||
|
|
||||||
} # End of wrapping
|
} # End of wrapping
|
||||||
|
|
|
@ -17,11 +17,21 @@ elif [ -e /etc/pki/tls/certs/ca-bundle.crt ]; then # Fedora, CentOS
|
||||||
export NIX_SSL_CERT_FILE=/etc/pki/tls/certs/ca-bundle.crt
|
export NIX_SSL_CERT_FILE=/etc/pki/tls/certs/ca-bundle.crt
|
||||||
else
|
else
|
||||||
# Fall back to what is in the nix profiles, favouring whatever is defined last.
|
# Fall back to what is in the nix profiles, favouring whatever is defined last.
|
||||||
|
check_nix_profiles() {
|
||||||
|
if [ "$ZSH_VERSION" ]; then
|
||||||
|
# Zsh by default doesn't split words in unquoted parameter expansion.
|
||||||
|
# Set local_options for these options to be reverted at the end of the function
|
||||||
|
# and shwordsplit to force splitting words in $NIX_PROFILES below.
|
||||||
|
setopt local_options shwordsplit
|
||||||
|
fi
|
||||||
for i in $NIX_PROFILES; do
|
for i in $NIX_PROFILES; do
|
||||||
if [ -e $i/etc/ssl/certs/ca-bundle.crt ]; then
|
if [ -e $i/etc/ssl/certs/ca-bundle.crt ]; then
|
||||||
export NIX_SSL_CERT_FILE=$i/etc/ssl/certs/ca-bundle.crt
|
export NIX_SSL_CERT_FILE=$i/etc/ssl/certs/ca-bundle.crt
|
||||||
fi
|
fi
|
||||||
done
|
done
|
||||||
|
}
|
||||||
|
check_nix_profiles
|
||||||
|
unset -f check_nix_profiles
|
||||||
fi
|
fi
|
||||||
|
|
||||||
export PATH="$HOME/.nix-profile/bin:@localstatedir@/nix/profiles/default/bin:$PATH"
|
export PATH="$HOME/.nix-profile/bin:@localstatedir@/nix/profiles/default/bin:$PATH"
|
||||||
|
|
|
@ -201,7 +201,7 @@ static int main_build_remote(int argc, char * * argv)
|
||||||
% concatStringsSep<StringSet>(", ", m.mandatoryFeatures);
|
% concatStringsSep<StringSet>(", ", m.mandatoryFeatures);
|
||||||
}
|
}
|
||||||
|
|
||||||
logErrorInfo(lvlInfo, {
|
logErrorInfo(canBuildLocally ? lvlChatty : lvlWarn, {
|
||||||
.name = "Remote build",
|
.name = "Remote build",
|
||||||
.description = "Failed to find a machine for remote build!",
|
.description = "Failed to find a machine for remote build!",
|
||||||
.hint = hint
|
.hint = hint
|
||||||
|
|
|
@ -525,8 +525,17 @@ string_t AttrCursor::getStringWithContext()
|
||||||
cachedValue = root->db->getAttr(getKey(), root->state.symbols);
|
cachedValue = root->db->getAttr(getKey(), root->state.symbols);
|
||||||
if (cachedValue && !std::get_if<placeholder_t>(&cachedValue->second)) {
|
if (cachedValue && !std::get_if<placeholder_t>(&cachedValue->second)) {
|
||||||
if (auto s = std::get_if<string_t>(&cachedValue->second)) {
|
if (auto s = std::get_if<string_t>(&cachedValue->second)) {
|
||||||
|
bool valid = true;
|
||||||
|
for (auto & c : s->second) {
|
||||||
|
if (!root->state.store->isValidPath(root->state.store->parseStorePath(c.first))) {
|
||||||
|
valid = false;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (valid) {
|
||||||
debug("using cached string attribute '%s'", getAttrPathStr());
|
debug("using cached string attribute '%s'", getAttrPathStr());
|
||||||
return *s;
|
return *s;
|
||||||
|
}
|
||||||
} else
|
} else
|
||||||
throw TypeError("'%s' is not a string", getAttrPathStr());
|
throw TypeError("'%s' is not a string", getAttrPathStr());
|
||||||
}
|
}
|
||||||
|
|
|
@ -27,6 +27,10 @@
|
||||||
#include <gc/gc.h>
|
#include <gc/gc.h>
|
||||||
#include <gc/gc_cpp.h>
|
#include <gc/gc_cpp.h>
|
||||||
|
|
||||||
|
#include <boost/coroutine2/coroutine.hpp>
|
||||||
|
#include <boost/coroutine2/protected_fixedsize_stack.hpp>
|
||||||
|
#include <boost/context/stack_context.hpp>
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
namespace nix {
|
namespace nix {
|
||||||
|
@ -208,7 +212,8 @@ bool Value::isTrivial() const
|
||||||
&& (type != tThunk
|
&& (type != tThunk
|
||||||
|| (dynamic_cast<ExprAttrs *>(thunk.expr)
|
|| (dynamic_cast<ExprAttrs *>(thunk.expr)
|
||||||
&& ((ExprAttrs *) thunk.expr)->dynamicAttrs.empty())
|
&& ((ExprAttrs *) thunk.expr)->dynamicAttrs.empty())
|
||||||
|| dynamic_cast<ExprLambda *>(thunk.expr));
|
|| dynamic_cast<ExprLambda *>(thunk.expr)
|
||||||
|
|| dynamic_cast<ExprList *>(thunk.expr));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -219,6 +224,31 @@ static void * oomHandler(size_t requested)
|
||||||
/* Convert this to a proper C++ exception. */
|
/* Convert this to a proper C++ exception. */
|
||||||
throw std::bad_alloc();
|
throw std::bad_alloc();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
class BoehmGCStackAllocator : public StackAllocator {
|
||||||
|
boost::coroutines2::protected_fixedsize_stack stack {
|
||||||
|
// We allocate 8 MB, the default max stack size on NixOS.
|
||||||
|
// A smaller stack might be quicker to allocate but reduces the stack
|
||||||
|
// depth available for source filter expressions etc.
|
||||||
|
std::max(boost::context::stack_traits::default_size(), static_cast<std::size_t>(8 * 1024 * 1024))
|
||||||
|
};
|
||||||
|
|
||||||
|
public:
|
||||||
|
boost::context::stack_context allocate() override {
|
||||||
|
auto sctx = stack.allocate();
|
||||||
|
GC_add_roots(static_cast<char *>(sctx.sp) - sctx.size, sctx.sp);
|
||||||
|
return sctx;
|
||||||
|
}
|
||||||
|
|
||||||
|
void deallocate(boost::context::stack_context sctx) override {
|
||||||
|
GC_remove_roots(static_cast<char *>(sctx.sp) - sctx.size, sctx.sp);
|
||||||
|
stack.deallocate(sctx);
|
||||||
|
}
|
||||||
|
|
||||||
|
};
|
||||||
|
|
||||||
|
static BoehmGCStackAllocator boehmGCStackAllocator;
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
|
||||||
|
@ -256,6 +286,8 @@ void initGC()
|
||||||
|
|
||||||
GC_set_oom_fn(oomHandler);
|
GC_set_oom_fn(oomHandler);
|
||||||
|
|
||||||
|
StackAllocator::defaultAllocator = &boehmGCStackAllocator;
|
||||||
|
|
||||||
/* Set the initial heap size to something fairly big (25% of
|
/* Set the initial heap size to something fairly big (25% of
|
||||||
physical RAM, up to a maximum of 384 MiB) so that in most cases
|
physical RAM, up to a maximum of 384 MiB) so that in most cases
|
||||||
we don't need to garbage collect at all. (Collection has a
|
we don't need to garbage collect at all. (Collection has a
|
||||||
|
@ -1404,7 +1436,7 @@ void ExprAssert::eval(EvalState & state, Env & env, Value & v)
|
||||||
if (!state.evalBool(env, cond, pos)) {
|
if (!state.evalBool(env, cond, pos)) {
|
||||||
std::ostringstream out;
|
std::ostringstream out;
|
||||||
cond->show(out);
|
cond->show(out);
|
||||||
throwAssertionError(pos, "assertion '%1%' failed at %2%", out.str());
|
throwAssertionError(pos, "assertion '%1%' failed", out.str());
|
||||||
}
|
}
|
||||||
body->eval(state, env, v);
|
body->eval(state, env, v);
|
||||||
}
|
}
|
||||||
|
@ -2072,10 +2104,19 @@ EvalSettings::EvalSettings()
|
||||||
Strings EvalSettings::getDefaultNixPath()
|
Strings EvalSettings::getDefaultNixPath()
|
||||||
{
|
{
|
||||||
Strings res;
|
Strings res;
|
||||||
auto add = [&](const Path & p) { if (pathExists(p)) { res.push_back(p); } };
|
auto add = [&](const Path & p, const std::string & s = std::string()) {
|
||||||
|
if (pathExists(p)) {
|
||||||
|
if (s.empty()) {
|
||||||
|
res.push_back(p);
|
||||||
|
} else {
|
||||||
|
res.push_back(s + "=" + p);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
add(getHome() + "/.nix-defexpr/channels");
|
add(getHome() + "/.nix-defexpr/channels");
|
||||||
add("nixpkgs=" + settings.nixStateDir + "/nix/profiles/per-user/root/channels/nixpkgs");
|
add(settings.nixStateDir + "/profiles/per-user/root/channels/nixpkgs", "nixpkgs");
|
||||||
add(settings.nixStateDir + "/nix/profiles/per-user/root/channels");
|
add(settings.nixStateDir + "/profiles/per-user/root/channels");
|
||||||
return res;
|
return res;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
81
src/libexpr/flake/config.cc
Normal file
81
src/libexpr/flake/config.cc
Normal file
|
@ -0,0 +1,81 @@
|
||||||
|
#include "flake.hh"
|
||||||
|
|
||||||
|
#include <nlohmann/json.hpp>
|
||||||
|
|
||||||
|
namespace nix::flake {
|
||||||
|
|
||||||
|
// setting name -> setting value -> allow or ignore.
|
||||||
|
typedef std::map<std::string, std::map<std::string, bool>> TrustedList;
|
||||||
|
|
||||||
|
Path trustedListPath()
|
||||||
|
{
|
||||||
|
return getDataDir() + "/nix/trusted-settings.json";
|
||||||
|
}
|
||||||
|
|
||||||
|
static TrustedList readTrustedList()
|
||||||
|
{
|
||||||
|
auto path = trustedListPath();
|
||||||
|
if (!pathExists(path)) return {};
|
||||||
|
auto json = nlohmann::json::parse(readFile(path));
|
||||||
|
return json;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void writeTrustedList(const TrustedList & trustedList)
|
||||||
|
{
|
||||||
|
writeFile(trustedListPath(), nlohmann::json(trustedList).dump());
|
||||||
|
}
|
||||||
|
|
||||||
|
void ConfigFile::apply()
|
||||||
|
{
|
||||||
|
std::set<std::string> whitelist{"bash-prompt", "bash-prompt-suffix"};
|
||||||
|
|
||||||
|
for (auto & [name, value] : settings) {
|
||||||
|
|
||||||
|
auto baseName = hasPrefix(name, "extra-") ? std::string(name, 6) : name;
|
||||||
|
|
||||||
|
// FIXME: Move into libutil/config.cc.
|
||||||
|
std::string valueS;
|
||||||
|
if (auto s = std::get_if<std::string>(&value))
|
||||||
|
valueS = *s;
|
||||||
|
else if (auto n = std::get_if<int64_t>(&value))
|
||||||
|
valueS = fmt("%d", n);
|
||||||
|
else if (auto b = std::get_if<Explicit<bool>>(&value))
|
||||||
|
valueS = b->t ? "true" : "false";
|
||||||
|
else if (auto ss = std::get_if<std::vector<std::string>>(&value))
|
||||||
|
valueS = concatStringsSep(" ", *ss); // FIXME: evil
|
||||||
|
else
|
||||||
|
assert(false);
|
||||||
|
|
||||||
|
if (!whitelist.count(baseName)) {
|
||||||
|
auto trustedList = readTrustedList();
|
||||||
|
|
||||||
|
bool trusted = false;
|
||||||
|
|
||||||
|
if (auto saved = get(get(trustedList, name).value_or(std::map<std::string, bool>()), valueS)) {
|
||||||
|
trusted = *saved;
|
||||||
|
} else {
|
||||||
|
// FIXME: filter ANSI escapes, newlines, \r, etc.
|
||||||
|
if (std::tolower(logger->ask(fmt("do you want to allow configuration setting '%s' to be set to '" ANSI_RED "%s" ANSI_NORMAL "' (y/N)?", name, valueS)).value_or('n')) != 'y') {
|
||||||
|
if (std::tolower(logger->ask("do you want to permanently mark this value as untrusted (y/N)?").value_or('n')) == 'y') {
|
||||||
|
trustedList[name][valueS] = false;
|
||||||
|
writeTrustedList(trustedList);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if (std::tolower(logger->ask("do you want to permanently mark this value as trusted (y/N)?").value_or('n')) == 'y') {
|
||||||
|
trustedList[name][valueS] = trusted = true;
|
||||||
|
writeTrustedList(trustedList);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!trusted) {
|
||||||
|
warn("ignoring untrusted flake configuration setting '%s'", name);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
globalConfig.set(name, valueS);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
|
@ -71,11 +71,17 @@ static std::tuple<fetchers::Tree, FlakeRef, FlakeRef> fetchOrSubstituteTree(
|
||||||
return {std::move(tree), resolvedRef, lockedRef};
|
return {std::move(tree), resolvedRef, lockedRef};
|
||||||
}
|
}
|
||||||
|
|
||||||
static void expectType(EvalState & state, ValueType type,
|
static void forceTrivialValue(EvalState & state, Value & value, const Pos & pos)
|
||||||
Value & value, const Pos & pos)
|
|
||||||
{
|
{
|
||||||
if (value.type == tThunk && value.isTrivial())
|
if (value.type == tThunk && value.isTrivial())
|
||||||
state.forceValue(value, pos);
|
state.forceValue(value, pos);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
static void expectType(EvalState & state, ValueType type,
|
||||||
|
Value & value, const Pos & pos)
|
||||||
|
{
|
||||||
|
forceTrivialValue(state, value, pos);
|
||||||
if (value.type != type)
|
if (value.type != type)
|
||||||
throw Error("expected %s but got %s at %s",
|
throw Error("expected %s but got %s at %s",
|
||||||
showType(type), showType(value.type), pos);
|
showType(type), showType(value.type), pos);
|
||||||
|
@ -114,7 +120,6 @@ static FlakeInput parseFlakeInput(EvalState & state,
|
||||||
expectType(state, tString, *attr.value, *attr.pos);
|
expectType(state, tString, *attr.value, *attr.pos);
|
||||||
input.follows = parseInputPath(attr.value->string.s);
|
input.follows = parseInputPath(attr.value->string.s);
|
||||||
} else {
|
} else {
|
||||||
state.forceValue(*attr.value);
|
|
||||||
if (attr.value->type == tString)
|
if (attr.value->type == tString)
|
||||||
attrs.emplace(attr.name, attr.value->string.s);
|
attrs.emplace(attr.name, attr.value->string.s);
|
||||||
else
|
else
|
||||||
|
@ -196,11 +201,6 @@ static Flake getFlake(
|
||||||
|
|
||||||
expectType(state, tAttrs, vInfo, Pos(foFile, state.symbols.create(flakeFile), 0, 0));
|
expectType(state, tAttrs, vInfo, Pos(foFile, state.symbols.create(flakeFile), 0, 0));
|
||||||
|
|
||||||
auto sEdition = state.symbols.create("edition"); // FIXME: remove soon
|
|
||||||
|
|
||||||
if (vInfo.attrs->get(sEdition))
|
|
||||||
warn("flake '%s' has deprecated attribute 'edition'", lockedRef);
|
|
||||||
|
|
||||||
if (auto description = vInfo.attrs->get(state.sDescription)) {
|
if (auto description = vInfo.attrs->get(state.sDescription)) {
|
||||||
expectType(state, tString, *description->value, *description->pos);
|
expectType(state, tString, *description->value, *description->pos);
|
||||||
flake.description = description->value->string.s;
|
flake.description = description->value->string.s;
|
||||||
|
@ -228,11 +228,41 @@ static Flake getFlake(
|
||||||
} else
|
} else
|
||||||
throw Error("flake '%s' lacks attribute 'outputs'", lockedRef);
|
throw Error("flake '%s' lacks attribute 'outputs'", lockedRef);
|
||||||
|
|
||||||
|
auto sNixConfig = state.symbols.create("nixConfig");
|
||||||
|
|
||||||
|
if (auto nixConfig = vInfo.attrs->get(sNixConfig)) {
|
||||||
|
expectType(state, tAttrs, *nixConfig->value, *nixConfig->pos);
|
||||||
|
|
||||||
|
for (auto & setting : *nixConfig->value->attrs) {
|
||||||
|
forceTrivialValue(state, *setting.value, *setting.pos);
|
||||||
|
if (setting.value->type == tString)
|
||||||
|
flake.config.settings.insert({setting.name, state.forceStringNoCtx(*setting.value, *setting.pos)});
|
||||||
|
else if (setting.value->type == tInt)
|
||||||
|
flake.config.settings.insert({setting.name, state.forceInt(*setting.value, *setting.pos)});
|
||||||
|
else if (setting.value->type == tBool)
|
||||||
|
flake.config.settings.insert({setting.name, state.forceBool(*setting.value, *setting.pos)});
|
||||||
|
else if (setting.value->isList()) {
|
||||||
|
std::vector<std::string> ss;
|
||||||
|
for (unsigned int n = 0; n < setting.value->listSize(); ++n) {
|
||||||
|
auto elem = setting.value->listElems()[n];
|
||||||
|
if (elem->type != tString)
|
||||||
|
throw TypeError("list element in flake configuration setting '%s' is %s while a string is expected",
|
||||||
|
setting.name, showType(*setting.value));
|
||||||
|
ss.push_back(state.forceStringNoCtx(*elem, *setting.pos));
|
||||||
|
}
|
||||||
|
flake.config.settings.insert({setting.name, ss});
|
||||||
|
}
|
||||||
|
else
|
||||||
|
throw TypeError("flake configuration setting '%s' is %s",
|
||||||
|
setting.name, showType(*setting.value));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
for (auto & attr : *vInfo.attrs) {
|
for (auto & attr : *vInfo.attrs) {
|
||||||
if (attr.name != sEdition &&
|
if (attr.name != state.sDescription &&
|
||||||
attr.name != state.sDescription &&
|
|
||||||
attr.name != sInputs &&
|
attr.name != sInputs &&
|
||||||
attr.name != sOutputs)
|
attr.name != sOutputs &&
|
||||||
|
attr.name != sNixConfig)
|
||||||
throw Error("flake '%s' has an unsupported attribute '%s', at %s",
|
throw Error("flake '%s' has an unsupported attribute '%s', at %s",
|
||||||
lockedRef, attr.name, *attr.pos);
|
lockedRef, attr.name, *attr.pos);
|
||||||
}
|
}
|
||||||
|
|
|
@ -17,23 +17,55 @@ struct FlakeInput;
|
||||||
|
|
||||||
typedef std::map<FlakeId, FlakeInput> FlakeInputs;
|
typedef std::map<FlakeId, FlakeInput> FlakeInputs;
|
||||||
|
|
||||||
|
/* FlakeInput is the 'Flake'-level parsed form of the "input" entries
|
||||||
|
* in the flake file.
|
||||||
|
*
|
||||||
|
* A FlakeInput is normally constructed by the 'parseFlakeInput'
|
||||||
|
* function which parses the input specification in the '.flake' file
|
||||||
|
* to create a 'FlakeRef' (a fetcher, the fetcher-specific
|
||||||
|
* representation of the input specification, and possibly the fetched
|
||||||
|
* local store path result) and then creating this FlakeInput to hold
|
||||||
|
* that FlakeRef, along with anything that might override that
|
||||||
|
* FlakeRef (like command-line overrides or "follows" specifications).
|
||||||
|
*
|
||||||
|
* A FlakeInput is also sometimes constructed directly from a FlakeRef
|
||||||
|
* instead of starting at the flake-file input specification
|
||||||
|
* (e.g. overrides, follows, and implicit inputs).
|
||||||
|
*
|
||||||
|
* A FlakeInput will usually have one of either "ref" or "follows"
|
||||||
|
* set. If not otherwise specified, a "ref" will be generated to a
|
||||||
|
* 'type="indirect"' flake, which is treated as simply the name of a
|
||||||
|
* flake to be resolved in the registry.
|
||||||
|
*/
|
||||||
|
|
||||||
struct FlakeInput
|
struct FlakeInput
|
||||||
{
|
{
|
||||||
std::optional<FlakeRef> ref;
|
std::optional<FlakeRef> ref;
|
||||||
bool isFlake = true;
|
bool isFlake = true; // true = process flake to get outputs, false = (fetched) static source path
|
||||||
std::optional<InputPath> follows;
|
std::optional<InputPath> follows;
|
||||||
bool absolute = false; // whether 'follows' is relative to the flake root
|
bool absolute = false; // whether 'follows' is relative to the flake root
|
||||||
FlakeInputs overrides;
|
FlakeInputs overrides;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
struct ConfigFile
|
||||||
|
{
|
||||||
|
using ConfigValue = std::variant<std::string, int64_t, Explicit<bool>, std::vector<std::string>>;
|
||||||
|
|
||||||
|
std::map<std::string, ConfigValue> settings;
|
||||||
|
|
||||||
|
void apply();
|
||||||
|
};
|
||||||
|
|
||||||
|
/* The contents of a flake.nix file. */
|
||||||
struct Flake
|
struct Flake
|
||||||
{
|
{
|
||||||
FlakeRef originalRef;
|
FlakeRef originalRef; // the original flake specification (by the user)
|
||||||
FlakeRef resolvedRef;
|
FlakeRef resolvedRef; // registry references and caching resolved to the specific underlying flake
|
||||||
FlakeRef lockedRef;
|
FlakeRef lockedRef; // the specific local store result of invoking the fetcher
|
||||||
std::optional<std::string> description;
|
std::optional<std::string> description;
|
||||||
std::shared_ptr<const fetchers::Tree> sourceInfo;
|
std::shared_ptr<const fetchers::Tree> sourceInfo;
|
||||||
FlakeInputs inputs;
|
FlakeInputs inputs;
|
||||||
|
ConfigFile config; // 'nixConfig' attribute
|
||||||
~Flake();
|
~Flake();
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -12,10 +12,33 @@ class Store;
|
||||||
|
|
||||||
typedef std::string FlakeId;
|
typedef std::string FlakeId;
|
||||||
|
|
||||||
|
/* A flake reference specifies how to fetch a flake or raw source
|
||||||
|
* (e.g. from a Git repository). It is created from a URL-like syntax
|
||||||
|
* (e.g. 'github:NixOS/patchelf'), an attrset representation (e.g. '{
|
||||||
|
* type="github"; owner = "NixOS"; repo = "patchelf"; }'), or a local
|
||||||
|
* path.
|
||||||
|
*
|
||||||
|
* Each flake will have a number of FlakeRef objects: one for each
|
||||||
|
* input to the flake.
|
||||||
|
*
|
||||||
|
* The normal method of constructing a FlakeRef is by starting with an
|
||||||
|
* input description (usually the attrs or a url from the flake file),
|
||||||
|
* locating a fetcher for that input, and then capturing the Input
|
||||||
|
* object that fetcher generates (usually via
|
||||||
|
* FlakeRef::fromAttrs(attrs) or parseFlakeRef(url) calls).
|
||||||
|
*
|
||||||
|
* The actual fetch not have been performed yet (i.e. a FlakeRef may
|
||||||
|
* be lazy), but the fetcher can be invoked at any time via the
|
||||||
|
* FlakeRef to ensure the store is populated with this input.
|
||||||
|
*/
|
||||||
|
|
||||||
struct FlakeRef
|
struct FlakeRef
|
||||||
{
|
{
|
||||||
|
/* fetcher-specific representation of the input, sufficient to
|
||||||
|
perform the fetch operation. */
|
||||||
fetchers::Input input;
|
fetchers::Input input;
|
||||||
|
|
||||||
|
/* sub-path within the fetched input that represents this input */
|
||||||
Path subdir;
|
Path subdir;
|
||||||
|
|
||||||
bool operator==(const FlakeRef & other) const;
|
bool operator==(const FlakeRef & other) const;
|
||||||
|
|
|
@ -34,7 +34,8 @@ LockedNode::LockedNode(const nlohmann::json & json)
|
||||||
, isFlake(json.find("flake") != json.end() ? (bool) json["flake"] : true)
|
, isFlake(json.find("flake") != json.end() ? (bool) json["flake"] : true)
|
||||||
{
|
{
|
||||||
if (!lockedRef.input.isImmutable())
|
if (!lockedRef.input.isImmutable())
|
||||||
throw Error("lockfile contains mutable lock '%s'", attrsToJson(lockedRef.input.toAttrs()));
|
throw Error("lockfile contains mutable lock '%s'",
|
||||||
|
fetchers::attrsToJSON(lockedRef.input.toAttrs()));
|
||||||
}
|
}
|
||||||
|
|
||||||
StorePath LockedNode::computeStorePath(Store & store) const
|
StorePath LockedNode::computeStorePath(Store & store) const
|
||||||
|
@ -77,7 +78,7 @@ LockFile::LockFile(const nlohmann::json & json, const Path & path)
|
||||||
{
|
{
|
||||||
if (jsonNode.find("inputs") == jsonNode.end()) return;
|
if (jsonNode.find("inputs") == jsonNode.end()) return;
|
||||||
for (auto & i : jsonNode["inputs"].items()) {
|
for (auto & i : jsonNode["inputs"].items()) {
|
||||||
if (i.value().is_array()) {
|
if (i.value().is_array()) { // FIXME: remove, obsolete
|
||||||
InputPath path;
|
InputPath path;
|
||||||
for (auto & j : i.value())
|
for (auto & j : i.value())
|
||||||
path.push_back(j);
|
path.push_back(j);
|
||||||
|
@ -86,10 +87,13 @@ LockFile::LockFile(const nlohmann::json & json, const Path & path)
|
||||||
std::string inputKey = i.value();
|
std::string inputKey = i.value();
|
||||||
auto k = nodeMap.find(inputKey);
|
auto k = nodeMap.find(inputKey);
|
||||||
if (k == nodeMap.end()) {
|
if (k == nodeMap.end()) {
|
||||||
auto jsonNode2 = json["nodes"][inputKey];
|
auto nodes = json["nodes"];
|
||||||
auto input = std::make_shared<LockedNode>(jsonNode2);
|
auto jsonNode2 = nodes.find(inputKey);
|
||||||
|
if (jsonNode2 == nodes.end())
|
||||||
|
throw Error("lock file references missing node '%s'", inputKey);
|
||||||
|
auto input = std::make_shared<LockedNode>(*jsonNode2);
|
||||||
k = nodeMap.insert_or_assign(inputKey, input).first;
|
k = nodeMap.insert_or_assign(inputKey, input).first;
|
||||||
getInputs(*input, jsonNode2);
|
getInputs(*input, *jsonNode2);
|
||||||
}
|
}
|
||||||
if (auto child = std::dynamic_pointer_cast<LockedNode>(k->second))
|
if (auto child = std::dynamic_pointer_cast<LockedNode>(k->second))
|
||||||
node.inputs.insert_or_assign(i.key(), child);
|
node.inputs.insert_or_assign(i.key(), child);
|
||||||
|
@ -110,7 +114,7 @@ LockFile::LockFile(const nlohmann::json & json, const Path & path)
|
||||||
// a bit since we don't need to worry about cycles.
|
// a bit since we don't need to worry about cycles.
|
||||||
}
|
}
|
||||||
|
|
||||||
nlohmann::json LockFile::toJson() const
|
nlohmann::json LockFile::toJSON() const
|
||||||
{
|
{
|
||||||
nlohmann::json nodes;
|
nlohmann::json nodes;
|
||||||
std::unordered_map<std::shared_ptr<const Node>, std::string> nodeKeys;
|
std::unordered_map<std::shared_ptr<const Node>, std::string> nodeKeys;
|
||||||
|
@ -154,8 +158,8 @@ nlohmann::json LockFile::toJson() const
|
||||||
}
|
}
|
||||||
|
|
||||||
if (auto lockedNode = std::dynamic_pointer_cast<const LockedNode>(node)) {
|
if (auto lockedNode = std::dynamic_pointer_cast<const LockedNode>(node)) {
|
||||||
n["original"] = fetchers::attrsToJson(lockedNode->originalRef.toAttrs());
|
n["original"] = fetchers::attrsToJSON(lockedNode->originalRef.toAttrs());
|
||||||
n["locked"] = fetchers::attrsToJson(lockedNode->lockedRef.toAttrs());
|
n["locked"] = fetchers::attrsToJSON(lockedNode->lockedRef.toAttrs());
|
||||||
if (!lockedNode->isFlake) n["flake"] = false;
|
if (!lockedNode->isFlake) n["flake"] = false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -174,7 +178,7 @@ nlohmann::json LockFile::toJson() const
|
||||||
|
|
||||||
std::string LockFile::to_string() const
|
std::string LockFile::to_string() const
|
||||||
{
|
{
|
||||||
return toJson().dump(2);
|
return toJSON().dump(2);
|
||||||
}
|
}
|
||||||
|
|
||||||
LockFile LockFile::read(const Path & path)
|
LockFile LockFile::read(const Path & path)
|
||||||
|
@ -185,7 +189,7 @@ LockFile LockFile::read(const Path & path)
|
||||||
|
|
||||||
std::ostream & operator <<(std::ostream & stream, const LockFile & lockFile)
|
std::ostream & operator <<(std::ostream & stream, const LockFile & lockFile)
|
||||||
{
|
{
|
||||||
stream << lockFile.toJson().dump(2);
|
stream << lockFile.toJSON().dump(2);
|
||||||
return stream;
|
return stream;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -223,7 +227,7 @@ bool LockFile::isImmutable() const
|
||||||
bool LockFile::operator ==(const LockFile & other) const
|
bool LockFile::operator ==(const LockFile & other) const
|
||||||
{
|
{
|
||||||
// FIXME: slow
|
// FIXME: slow
|
||||||
return toJson() == other.toJson();
|
return toJSON() == other.toJSON();
|
||||||
}
|
}
|
||||||
|
|
||||||
InputPath parseInputPath(std::string_view s)
|
InputPath parseInputPath(std::string_view s)
|
||||||
|
|
|
@ -52,7 +52,7 @@ struct LockFile
|
||||||
LockFile() {};
|
LockFile() {};
|
||||||
LockFile(const nlohmann::json & json, const Path & path);
|
LockFile(const nlohmann::json & json, const Path & path);
|
||||||
|
|
||||||
nlohmann::json toJson() const;
|
nlohmann::json toJSON() const;
|
||||||
|
|
||||||
std::string to_string() const;
|
std::string to_string() const;
|
||||||
|
|
||||||
|
|
|
@ -12,6 +12,10 @@
|
||||||
|
|
||||||
|
|
||||||
%{
|
%{
|
||||||
|
#ifdef __clang__
|
||||||
|
#pragma clang diagnostic ignored "-Wunneeded-internal-declaration"
|
||||||
|
#endif
|
||||||
|
|
||||||
#include <boost/lexical_cast.hpp>
|
#include <boost/lexical_cast.hpp>
|
||||||
|
|
||||||
#include "nixexpr.hh"
|
#include "nixexpr.hh"
|
||||||
|
|
|
@ -15,7 +15,7 @@ libexpr_CXXFLAGS += -I src/libutil -I src/libstore -I src/libfetchers -I src/lib
|
||||||
|
|
||||||
libexpr_LIBS = libutil libstore libfetchers
|
libexpr_LIBS = libutil libstore libfetchers
|
||||||
|
|
||||||
libexpr_LDFLAGS =
|
libexpr_LDFLAGS = -lboost_context
|
||||||
ifneq ($(OS), FreeBSD)
|
ifneq ($(OS), FreeBSD)
|
||||||
libexpr_LDFLAGS += -ldl
|
libexpr_LDFLAGS += -ldl
|
||||||
endif
|
endif
|
||||||
|
@ -35,8 +35,6 @@ $(d)/lexer-tab.cc $(d)/lexer-tab.hh: $(d)/lexer.l
|
||||||
|
|
||||||
clean-files += $(d)/parser-tab.cc $(d)/parser-tab.hh $(d)/lexer-tab.cc $(d)/lexer-tab.hh
|
clean-files += $(d)/parser-tab.cc $(d)/parser-tab.hh $(d)/lexer-tab.cc $(d)/lexer-tab.hh
|
||||||
|
|
||||||
dist-files += $(d)/parser-tab.cc $(d)/parser-tab.hh $(d)/lexer-tab.cc $(d)/lexer-tab.hh
|
|
||||||
|
|
||||||
$(eval $(call install-file-in, $(d)/nix-expr.pc, $(prefix)/lib/pkgconfig, 0644))
|
$(eval $(call install-file-in, $(d)/nix-expr.pc, $(prefix)/lib/pkgconfig, 0644))
|
||||||
|
|
||||||
$(foreach i, $(wildcard src/libexpr/flake/*.hh), \
|
$(foreach i, $(wildcard src/libexpr/flake/*.hh), \
|
||||||
|
|
|
@ -1089,8 +1089,9 @@ static void prim_derivationStrict(EvalState & state, const Pos & pos, Value * *
|
||||||
|
|
||||||
// Regular, non-CA derivation should always return a single hash and not
|
// Regular, non-CA derivation should always return a single hash and not
|
||||||
// hash per output.
|
// hash per output.
|
||||||
Hash h = std::get<0>(hashDerivationModulo(*state.store, Derivation(drv), true));
|
auto hashModulo = hashDerivationModulo(*state.store, Derivation(drv), true);
|
||||||
|
std::visit(overloaded {
|
||||||
|
[&](Hash h) {
|
||||||
for (auto & i : outputs) {
|
for (auto & i : outputs) {
|
||||||
auto outPath = state.store->makeOutputPath(i, h, drvName);
|
auto outPath = state.store->makeOutputPath(i, h, drvName);
|
||||||
drv.env[i] = state.store->printStorePath(outPath);
|
drv.env[i] = state.store->printStorePath(outPath);
|
||||||
|
@ -1101,6 +1102,22 @@ static void prim_derivationStrict(EvalState & state, const Pos & pos, Value * *
|
||||||
},
|
},
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
},
|
||||||
|
[&](CaOutputHashes) {
|
||||||
|
// Shouldn't happen as the toplevel derivation is not CA.
|
||||||
|
assert(false);
|
||||||
|
},
|
||||||
|
[&](DeferredHash _) {
|
||||||
|
for (auto & i : outputs) {
|
||||||
|
drv.outputs.insert_or_assign(i,
|
||||||
|
DerivationOutput {
|
||||||
|
.output = DerivationOutputDeferred{},
|
||||||
|
});
|
||||||
|
}
|
||||||
|
},
|
||||||
|
},
|
||||||
|
hashModulo);
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Write the resulting term into the Nix store directory. */
|
/* Write the resulting term into the Nix store directory. */
|
||||||
|
@ -1115,9 +1132,10 @@ static void prim_derivationStrict(EvalState & state, const Pos & pos, Value * *
|
||||||
|
|
||||||
However, we don't bother doing this for floating CA derivations because
|
However, we don't bother doing this for floating CA derivations because
|
||||||
their "hash modulo" is indeterminate until built. */
|
their "hash modulo" is indeterminate until built. */
|
||||||
if (drv.type() != DerivationType::CAFloating)
|
if (drv.type() != DerivationType::CAFloating) {
|
||||||
drvHashes.insert_or_assign(drvPath,
|
auto h = hashDerivationModulo(*state.store, Derivation(drv), false);
|
||||||
hashDerivationModulo(*state.store, Derivation(drv), false));
|
drvHashes.lock()->insert_or_assign(drvPath, h);
|
||||||
|
}
|
||||||
|
|
||||||
state.mkAttrs(v, 1 + drv.outputs.size());
|
state.mkAttrs(v, 1 + drv.outputs.size());
|
||||||
mkString(*state.allocAttr(v, state.sDrvPath), drvPathS, {"=" + drvPathS});
|
mkString(*state.allocAttr(v, state.sDrvPath), drvPathS, {"=" + drvPathS});
|
||||||
|
@ -1603,7 +1621,12 @@ static RegisterPrimOp primop_toJSON({
|
||||||
static void prim_fromJSON(EvalState & state, const Pos & pos, Value * * args, Value & v)
|
static void prim_fromJSON(EvalState & state, const Pos & pos, Value * * args, Value & v)
|
||||||
{
|
{
|
||||||
string s = state.forceStringNoCtx(*args[0], pos);
|
string s = state.forceStringNoCtx(*args[0], pos);
|
||||||
|
try {
|
||||||
parseJSON(state, s, v);
|
parseJSON(state, s, v);
|
||||||
|
} catch (JSONParseError &e) {
|
||||||
|
e.addTrace(pos, "while decoding a JSON string");
|
||||||
|
throw e;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static RegisterPrimOp primop_fromJSON({
|
static RegisterPrimOp primop_fromJSON({
|
||||||
|
|
|
@ -39,11 +39,12 @@ void emitTreeAttrs(
|
||||||
// Backwards compat for `builtins.fetchGit`: dirty repos return an empty sha1 as rev
|
// Backwards compat for `builtins.fetchGit`: dirty repos return an empty sha1 as rev
|
||||||
auto emptyHash = Hash(htSHA1);
|
auto emptyHash = Hash(htSHA1);
|
||||||
mkString(*state.allocAttr(v, state.symbols.create("rev")), emptyHash.gitRev());
|
mkString(*state.allocAttr(v, state.symbols.create("rev")), emptyHash.gitRev());
|
||||||
mkString(*state.allocAttr(v, state.symbols.create("shortRev")), emptyHash.gitRev());
|
mkString(*state.allocAttr(v, state.symbols.create("shortRev")), emptyHash.gitShortRev());
|
||||||
}
|
}
|
||||||
|
|
||||||
if (input.getType() == "git")
|
if (input.getType() == "git")
|
||||||
mkBool(*state.allocAttr(v, state.symbols.create("submodules")), maybeGetBoolAttr(input.attrs, "submodules").value_or(false));
|
mkBool(*state.allocAttr(v, state.symbols.create("submodules")),
|
||||||
|
fetchers::maybeGetBoolAttr(input.attrs, "submodules").value_or(false));
|
||||||
|
|
||||||
if (auto revCount = input.getRevCount())
|
if (auto revCount = input.getRevCount())
|
||||||
mkInt(*state.allocAttr(v, state.symbols.create("revCount")), *revCount);
|
mkInt(*state.allocAttr(v, state.symbols.create("revCount")), *revCount);
|
||||||
|
@ -101,7 +102,7 @@ static void fetchTree(
|
||||||
else if (attr.value->type == tString)
|
else if (attr.value->type == tString)
|
||||||
addURI(state, attrs, attr.name, attr.value->string.s);
|
addURI(state, attrs, attr.name, attr.value->string.s);
|
||||||
else if (attr.value->type == tBool)
|
else if (attr.value->type == tBool)
|
||||||
attrs.emplace(attr.name, fetchers::Explicit<bool>{attr.value->boolean});
|
attrs.emplace(attr.name, Explicit<bool>{attr.value->boolean});
|
||||||
else if (attr.value->type == tInt)
|
else if (attr.value->type == tInt)
|
||||||
attrs.emplace(attr.name, attr.value->integer);
|
attrs.emplace(attr.name, attr.value->integer);
|
||||||
else
|
else
|
||||||
|
@ -211,7 +212,7 @@ static void fetch(EvalState & state, const Pos & pos, Value * * args, Value & v,
|
||||||
? state.store->queryPathInfo(storePath)->narHash
|
? state.store->queryPathInfo(storePath)->narHash
|
||||||
: hashFile(htSHA256, path);
|
: hashFile(htSHA256, path);
|
||||||
if (hash != *expectedHash)
|
if (hash != *expectedHash)
|
||||||
throw Error((unsigned int) 102, "hash mismatch in file downloaded from '%s':\n wanted: %s\n got: %s",
|
throw Error((unsigned int) 102, "hash mismatch in file downloaded from '%s':\n specified: %s\n got: %s",
|
||||||
*url, expectedHash->to_string(Base32, true), hash.to_string(Base32, true));
|
*url, expectedHash->to_string(Base32, true), hash.to_string(Base32, true));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -23,7 +23,7 @@ Attrs jsonToAttrs(const nlohmann::json & json)
|
||||||
return attrs;
|
return attrs;
|
||||||
}
|
}
|
||||||
|
|
||||||
nlohmann::json attrsToJson(const Attrs & attrs)
|
nlohmann::json attrsToJSON(const Attrs & attrs)
|
||||||
{
|
{
|
||||||
nlohmann::json json;
|
nlohmann::json json;
|
||||||
for (auto & attr : attrs) {
|
for (auto & attr : attrs) {
|
||||||
|
@ -44,7 +44,7 @@ std::optional<std::string> maybeGetStrAttr(const Attrs & attrs, const std::strin
|
||||||
if (i == attrs.end()) return {};
|
if (i == attrs.end()) return {};
|
||||||
if (auto v = std::get_if<std::string>(&i->second))
|
if (auto v = std::get_if<std::string>(&i->second))
|
||||||
return *v;
|
return *v;
|
||||||
throw Error("input attribute '%s' is not a string %s", name, attrsToJson(attrs).dump());
|
throw Error("input attribute '%s' is not a string %s", name, attrsToJSON(attrs).dump());
|
||||||
}
|
}
|
||||||
|
|
||||||
std::string getStrAttr(const Attrs & attrs, const std::string & name)
|
std::string getStrAttr(const Attrs & attrs, const std::string & name)
|
||||||
|
|
|
@ -8,24 +8,12 @@
|
||||||
|
|
||||||
namespace nix::fetchers {
|
namespace nix::fetchers {
|
||||||
|
|
||||||
/* Wrap bools to prevent string literals (i.e. 'char *') from being
|
|
||||||
cast to a bool in Attr. */
|
|
||||||
template<typename T>
|
|
||||||
struct Explicit {
|
|
||||||
T t;
|
|
||||||
|
|
||||||
bool operator ==(const Explicit<T> & other) const
|
|
||||||
{
|
|
||||||
return t == other.t;
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
typedef std::variant<std::string, uint64_t, Explicit<bool>> Attr;
|
typedef std::variant<std::string, uint64_t, Explicit<bool>> Attr;
|
||||||
typedef std::map<std::string, Attr> Attrs;
|
typedef std::map<std::string, Attr> Attrs;
|
||||||
|
|
||||||
Attrs jsonToAttrs(const nlohmann::json & json);
|
Attrs jsonToAttrs(const nlohmann::json & json);
|
||||||
|
|
||||||
nlohmann::json attrsToJson(const Attrs & attrs);
|
nlohmann::json attrsToJSON(const Attrs & attrs);
|
||||||
|
|
||||||
std::optional<std::string> maybeGetStrAttr(const Attrs & attrs, const std::string & name);
|
std::optional<std::string> maybeGetStrAttr(const Attrs & attrs, const std::string & name);
|
||||||
|
|
||||||
|
|
|
@ -55,8 +55,8 @@ struct CacheImpl : Cache
|
||||||
bool immutable) override
|
bool immutable) override
|
||||||
{
|
{
|
||||||
_state.lock()->add.use()
|
_state.lock()->add.use()
|
||||||
(attrsToJson(inAttrs).dump())
|
(attrsToJSON(inAttrs).dump())
|
||||||
(attrsToJson(infoAttrs).dump())
|
(attrsToJSON(infoAttrs).dump())
|
||||||
(store->printStorePath(storePath))
|
(store->printStorePath(storePath))
|
||||||
(immutable)
|
(immutable)
|
||||||
(time(0)).exec();
|
(time(0)).exec();
|
||||||
|
@ -70,7 +70,7 @@ struct CacheImpl : Cache
|
||||||
if (!res->expired)
|
if (!res->expired)
|
||||||
return std::make_pair(std::move(res->infoAttrs), std::move(res->storePath));
|
return std::make_pair(std::move(res->infoAttrs), std::move(res->storePath));
|
||||||
debug("ignoring expired cache entry '%s'",
|
debug("ignoring expired cache entry '%s'",
|
||||||
attrsToJson(inAttrs).dump());
|
attrsToJSON(inAttrs).dump());
|
||||||
}
|
}
|
||||||
return {};
|
return {};
|
||||||
}
|
}
|
||||||
|
@ -81,15 +81,15 @@ struct CacheImpl : Cache
|
||||||
{
|
{
|
||||||
auto state(_state.lock());
|
auto state(_state.lock());
|
||||||
|
|
||||||
auto inAttrsJson = attrsToJson(inAttrs).dump();
|
auto inAttrsJSON = attrsToJSON(inAttrs).dump();
|
||||||
|
|
||||||
auto stmt(state->lookup.use()(inAttrsJson));
|
auto stmt(state->lookup.use()(inAttrsJSON));
|
||||||
if (!stmt.next()) {
|
if (!stmt.next()) {
|
||||||
debug("did not find cache entry for '%s'", inAttrsJson);
|
debug("did not find cache entry for '%s'", inAttrsJSON);
|
||||||
return {};
|
return {};
|
||||||
}
|
}
|
||||||
|
|
||||||
auto infoJson = stmt.getStr(0);
|
auto infoJSON = stmt.getStr(0);
|
||||||
auto storePath = store->parseStorePath(stmt.getStr(1));
|
auto storePath = store->parseStorePath(stmt.getStr(1));
|
||||||
auto immutable = stmt.getInt(2) != 0;
|
auto immutable = stmt.getInt(2) != 0;
|
||||||
auto timestamp = stmt.getInt(3);
|
auto timestamp = stmt.getInt(3);
|
||||||
|
@ -97,16 +97,16 @@ struct CacheImpl : Cache
|
||||||
store->addTempRoot(storePath);
|
store->addTempRoot(storePath);
|
||||||
if (!store->isValidPath(storePath)) {
|
if (!store->isValidPath(storePath)) {
|
||||||
// FIXME: we could try to substitute 'storePath'.
|
// FIXME: we could try to substitute 'storePath'.
|
||||||
debug("ignoring disappeared cache entry '%s'", inAttrsJson);
|
debug("ignoring disappeared cache entry '%s'", inAttrsJSON);
|
||||||
return {};
|
return {};
|
||||||
}
|
}
|
||||||
|
|
||||||
debug("using cache entry '%s' -> '%s', '%s'",
|
debug("using cache entry '%s' -> '%s', '%s'",
|
||||||
inAttrsJson, infoJson, store->printStorePath(storePath));
|
inAttrsJSON, infoJSON, store->printStorePath(storePath));
|
||||||
|
|
||||||
return Result {
|
return Result {
|
||||||
.expired = !immutable && (settings.tarballTtl.get() == 0 || timestamp + settings.tarballTtl < time(0)),
|
.expired = !immutable && (settings.tarballTtl.get() == 0 || timestamp + settings.tarballTtl < time(0)),
|
||||||
.infoAttrs = jsonToAttrs(nlohmann::json::parse(infoJson)),
|
.infoAttrs = jsonToAttrs(nlohmann::json::parse(infoJSON)),
|
||||||
.storePath = std::move(storePath)
|
.storePath = std::move(storePath)
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
|
@ -65,7 +65,7 @@ Input Input::fromAttrs(Attrs && attrs)
|
||||||
ParsedURL Input::toURL() const
|
ParsedURL Input::toURL() const
|
||||||
{
|
{
|
||||||
if (!scheme)
|
if (!scheme)
|
||||||
throw Error("cannot show unsupported input '%s'", attrsToJson(attrs));
|
throw Error("cannot show unsupported input '%s'", attrsToJSON(attrs));
|
||||||
return scheme->toURL(*this);
|
return scheme->toURL(*this);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -110,7 +110,7 @@ bool Input::contains(const Input & other) const
|
||||||
std::pair<Tree, Input> Input::fetch(ref<Store> store) const
|
std::pair<Tree, Input> Input::fetch(ref<Store> store) const
|
||||||
{
|
{
|
||||||
if (!scheme)
|
if (!scheme)
|
||||||
throw Error("cannot fetch unsupported input '%s'", attrsToJson(toAttrs()));
|
throw Error("cannot fetch unsupported input '%s'", attrsToJSON(toAttrs()));
|
||||||
|
|
||||||
/* The tree may already be in the Nix store, or it could be
|
/* The tree may already be in the Nix store, or it could be
|
||||||
substituted (which is often faster than fetching from the
|
substituted (which is often faster than fetching from the
|
||||||
|
@ -247,7 +247,7 @@ std::optional<time_t> Input::getLastModified() const
|
||||||
|
|
||||||
ParsedURL InputScheme::toURL(const Input & input)
|
ParsedURL InputScheme::toURL(const Input & input)
|
||||||
{
|
{
|
||||||
throw Error("don't know how to convert input '%s' to a URL", attrsToJson(input.attrs));
|
throw Error("don't know how to convert input '%s' to a URL", attrsToJSON(input.attrs));
|
||||||
}
|
}
|
||||||
|
|
||||||
Input InputScheme::applyOverrides(
|
Input InputScheme::applyOverrides(
|
||||||
|
|
|
@ -21,6 +21,14 @@ struct Tree
|
||||||
|
|
||||||
struct InputScheme;
|
struct InputScheme;
|
||||||
|
|
||||||
|
/* The Input object is generated by a specific fetcher, based on the
|
||||||
|
* user-supplied input attribute in the flake.nix file, and contains
|
||||||
|
* the information that the specific fetcher needs to perform the
|
||||||
|
* actual fetch. The Input object is most commonly created via the
|
||||||
|
* "fromURL()" or "fromAttrs()" static functions which are provided
|
||||||
|
* the url or attrset specified in the flake file.
|
||||||
|
*/
|
||||||
|
|
||||||
struct Input
|
struct Input
|
||||||
{
|
{
|
||||||
friend struct InputScheme;
|
friend struct InputScheme;
|
||||||
|
@ -84,6 +92,16 @@ public:
|
||||||
std::optional<time_t> getLastModified() const;
|
std::optional<time_t> getLastModified() const;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
|
/* The InputScheme represents a type of fetcher. Each fetcher
|
||||||
|
* registers with nix at startup time. When processing an input for a
|
||||||
|
* flake, each scheme is given an opportunity to "recognize" that
|
||||||
|
* input from the url or attributes in the flake file's specification
|
||||||
|
* and return an Input object to represent the input if it is
|
||||||
|
* recognized. The Input object contains the information the fetcher
|
||||||
|
* needs to actually perform the "fetch()" when called.
|
||||||
|
*/
|
||||||
|
|
||||||
struct InputScheme
|
struct InputScheme
|
||||||
{
|
{
|
||||||
virtual ~InputScheme()
|
virtual ~InputScheme()
|
||||||
|
|
|
@ -273,7 +273,7 @@ struct GitInputScheme : InputScheme
|
||||||
haveCommits ? std::stoull(runProgram("git", true, { "-C", actualUrl, "log", "-1", "--format=%ct", "--no-show-signature", "HEAD" })) : 0);
|
haveCommits ? std::stoull(runProgram("git", true, { "-C", actualUrl, "log", "-1", "--format=%ct", "--no-show-signature", "HEAD" })) : 0);
|
||||||
|
|
||||||
return {
|
return {
|
||||||
Tree(store->printStorePath(storePath), std::move(storePath)),
|
Tree(store->toRealPath(storePath), std::move(storePath)),
|
||||||
input
|
input
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
|
@ -11,6 +11,36 @@ using namespace std::string_literals;
|
||||||
|
|
||||||
namespace nix::fetchers {
|
namespace nix::fetchers {
|
||||||
|
|
||||||
|
namespace {
|
||||||
|
|
||||||
|
RunOptions hgOptions(const Strings & args) {
|
||||||
|
RunOptions opts("hg", args);
|
||||||
|
opts.searchPath = true;
|
||||||
|
|
||||||
|
auto env = getEnv();
|
||||||
|
// Set HGPLAIN: this means we get consistent output from hg and avoids leakage from a user or system .hgrc.
|
||||||
|
env["HGPLAIN"] = "";
|
||||||
|
opts.environment = env;
|
||||||
|
|
||||||
|
return opts;
|
||||||
|
}
|
||||||
|
|
||||||
|
// runProgram wrapper that uses hgOptions instead of stock RunOptions.
|
||||||
|
string runHg(const Strings & args, const std::optional<std::string> & input = {})
|
||||||
|
{
|
||||||
|
RunOptions opts = hgOptions(args);
|
||||||
|
opts.input = input;
|
||||||
|
|
||||||
|
auto res = runProgram(opts);
|
||||||
|
|
||||||
|
if (!statusOk(res.first))
|
||||||
|
throw ExecError(res.first, fmt("hg %1%", statusToString(res.first)));
|
||||||
|
|
||||||
|
return res.second;
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
struct MercurialInputScheme : InputScheme
|
struct MercurialInputScheme : InputScheme
|
||||||
{
|
{
|
||||||
std::optional<Input> inputFromURL(const ParsedURL & url) override
|
std::optional<Input> inputFromURL(const ParsedURL & url) override
|
||||||
|
@ -100,11 +130,11 @@ struct MercurialInputScheme : InputScheme
|
||||||
assert(sourcePath);
|
assert(sourcePath);
|
||||||
|
|
||||||
// FIXME: shut up if file is already tracked.
|
// FIXME: shut up if file is already tracked.
|
||||||
runProgram("hg", true,
|
runHg(
|
||||||
{ "add", *sourcePath + "/" + std::string(file) });
|
{ "add", *sourcePath + "/" + std::string(file) });
|
||||||
|
|
||||||
if (commitMsg)
|
if (commitMsg)
|
||||||
runProgram("hg", true,
|
runHg(
|
||||||
{ "commit", *sourcePath + "/" + std::string(file), "-m", *commitMsg });
|
{ "commit", *sourcePath + "/" + std::string(file), "-m", *commitMsg });
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -130,7 +160,7 @@ struct MercurialInputScheme : InputScheme
|
||||||
|
|
||||||
if (!input.getRef() && !input.getRev() && isLocal && pathExists(actualUrl + "/.hg")) {
|
if (!input.getRef() && !input.getRev() && isLocal && pathExists(actualUrl + "/.hg")) {
|
||||||
|
|
||||||
bool clean = runProgram("hg", true, { "status", "-R", actualUrl, "--modified", "--added", "--removed" }) == "";
|
bool clean = runHg({ "status", "-R", actualUrl, "--modified", "--added", "--removed" }) == "";
|
||||||
|
|
||||||
if (!clean) {
|
if (!clean) {
|
||||||
|
|
||||||
|
@ -143,10 +173,10 @@ struct MercurialInputScheme : InputScheme
|
||||||
if (settings.warnDirty)
|
if (settings.warnDirty)
|
||||||
warn("Mercurial tree '%s' is unclean", actualUrl);
|
warn("Mercurial tree '%s' is unclean", actualUrl);
|
||||||
|
|
||||||
input.attrs.insert_or_assign("ref", chomp(runProgram("hg", true, { "branch", "-R", actualUrl })));
|
input.attrs.insert_or_assign("ref", chomp(runHg({ "branch", "-R", actualUrl })));
|
||||||
|
|
||||||
auto files = tokenizeString<std::set<std::string>>(
|
auto files = tokenizeString<std::set<std::string>>(
|
||||||
runProgram("hg", true, { "status", "-R", actualUrl, "--clean", "--modified", "--added", "--no-status", "--print0" }), "\0"s);
|
runHg({ "status", "-R", actualUrl, "--clean", "--modified", "--added", "--no-status", "--print0" }), "\0"s);
|
||||||
|
|
||||||
PathFilter filter = [&](const Path & p) -> bool {
|
PathFilter filter = [&](const Path & p) -> bool {
|
||||||
assert(hasPrefix(p, actualUrl));
|
assert(hasPrefix(p, actualUrl));
|
||||||
|
@ -166,7 +196,7 @@ struct MercurialInputScheme : InputScheme
|
||||||
auto storePath = store->addToStore("source", actualUrl, FileIngestionMethod::Recursive, htSHA256, filter);
|
auto storePath = store->addToStore("source", actualUrl, FileIngestionMethod::Recursive, htSHA256, filter);
|
||||||
|
|
||||||
return {
|
return {
|
||||||
Tree(store->printStorePath(storePath), std::move(storePath)),
|
Tree(store->toRealPath(storePath), std::move(storePath)),
|
||||||
input
|
input
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
@ -224,33 +254,33 @@ struct MercurialInputScheme : InputScheme
|
||||||
if (!(input.getRev()
|
if (!(input.getRev()
|
||||||
&& pathExists(cacheDir)
|
&& pathExists(cacheDir)
|
||||||
&& runProgram(
|
&& runProgram(
|
||||||
RunOptions("hg", { "log", "-R", cacheDir, "-r", input.getRev()->gitRev(), "--template", "1" })
|
hgOptions({ "log", "-R", cacheDir, "-r", input.getRev()->gitRev(), "--template", "1" })
|
||||||
.killStderr(true)).second == "1"))
|
.killStderr(true)).second == "1"))
|
||||||
{
|
{
|
||||||
Activity act(*logger, lvlTalkative, actUnknown, fmt("fetching Mercurial repository '%s'", actualUrl));
|
Activity act(*logger, lvlTalkative, actUnknown, fmt("fetching Mercurial repository '%s'", actualUrl));
|
||||||
|
|
||||||
if (pathExists(cacheDir)) {
|
if (pathExists(cacheDir)) {
|
||||||
try {
|
try {
|
||||||
runProgram("hg", true, { "pull", "-R", cacheDir, "--", actualUrl });
|
runHg({ "pull", "-R", cacheDir, "--", actualUrl });
|
||||||
}
|
}
|
||||||
catch (ExecError & e) {
|
catch (ExecError & e) {
|
||||||
string transJournal = cacheDir + "/.hg/store/journal";
|
string transJournal = cacheDir + "/.hg/store/journal";
|
||||||
/* hg throws "abandoned transaction" error only if this file exists */
|
/* hg throws "abandoned transaction" error only if this file exists */
|
||||||
if (pathExists(transJournal)) {
|
if (pathExists(transJournal)) {
|
||||||
runProgram("hg", true, { "recover", "-R", cacheDir });
|
runHg({ "recover", "-R", cacheDir });
|
||||||
runProgram("hg", true, { "pull", "-R", cacheDir, "--", actualUrl });
|
runHg({ "pull", "-R", cacheDir, "--", actualUrl });
|
||||||
} else {
|
} else {
|
||||||
throw ExecError(e.status, fmt("'hg pull' %s", statusToString(e.status)));
|
throw ExecError(e.status, fmt("'hg pull' %s", statusToString(e.status)));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
createDirs(dirOf(cacheDir));
|
createDirs(dirOf(cacheDir));
|
||||||
runProgram("hg", true, { "clone", "--noupdate", "--", actualUrl, cacheDir });
|
runHg({ "clone", "--noupdate", "--", actualUrl, cacheDir });
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
auto tokens = tokenizeString<std::vector<std::string>>(
|
auto tokens = tokenizeString<std::vector<std::string>>(
|
||||||
runProgram("hg", true, { "log", "-R", cacheDir, "-r", revOrRef, "--template", "{node} {rev} {branch}" }));
|
runHg({ "log", "-R", cacheDir, "-r", revOrRef, "--template", "{node} {rev} {branch}" }));
|
||||||
assert(tokens.size() == 3);
|
assert(tokens.size() == 3);
|
||||||
|
|
||||||
input.attrs.insert_or_assign("rev", Hash::parseAny(tokens[0], htSHA1).gitRev());
|
input.attrs.insert_or_assign("rev", Hash::parseAny(tokens[0], htSHA1).gitRev());
|
||||||
|
@ -263,7 +293,7 @@ struct MercurialInputScheme : InputScheme
|
||||||
Path tmpDir = createTempDir();
|
Path tmpDir = createTempDir();
|
||||||
AutoDelete delTmpDir(tmpDir, true);
|
AutoDelete delTmpDir(tmpDir, true);
|
||||||
|
|
||||||
runProgram("hg", true, { "archive", "-R", cacheDir, "-r", input.getRev()->gitRev(), tmpDir });
|
runHg({ "archive", "-R", cacheDir, "-r", input.getRev()->gitRev(), tmpDir });
|
||||||
|
|
||||||
deletePath(tmpDir + "/.hg_archival.txt");
|
deletePath(tmpDir + "/.hg_archival.txt");
|
||||||
|
|
||||||
|
|
|
@ -60,10 +60,10 @@ void Registry::write(const Path & path)
|
||||||
nlohmann::json arr;
|
nlohmann::json arr;
|
||||||
for (auto & entry : entries) {
|
for (auto & entry : entries) {
|
||||||
nlohmann::json obj;
|
nlohmann::json obj;
|
||||||
obj["from"] = attrsToJson(entry.from.toAttrs());
|
obj["from"] = attrsToJSON(entry.from.toAttrs());
|
||||||
obj["to"] = attrsToJson(entry.to.toAttrs());
|
obj["to"] = attrsToJSON(entry.to.toAttrs());
|
||||||
if (!entry.extraAttrs.empty())
|
if (!entry.extraAttrs.empty())
|
||||||
obj["to"].update(attrsToJson(entry.extraAttrs));
|
obj["to"].update(attrsToJSON(entry.extraAttrs));
|
||||||
if (entry.exact)
|
if (entry.exact)
|
||||||
obj["exact"] = true;
|
obj["exact"] = true;
|
||||||
arr.emplace_back(std::move(obj));
|
arr.emplace_back(std::move(obj));
|
||||||
|
|
|
@ -44,7 +44,7 @@ MixCommonArgs::MixCommonArgs(const string & programName)
|
||||||
globalConfig.getSettings(settings);
|
globalConfig.getSettings(settings);
|
||||||
for (auto & s : settings)
|
for (auto & s : settings)
|
||||||
if (hasPrefix(s.first, prefix))
|
if (hasPrefix(s.first, prefix))
|
||||||
completions->add(s.first, s.second.description);
|
completions->add(s.first, fmt("Set the `%s` setting.", s.first));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
|
|
|
@ -12,7 +12,7 @@ LogFormat parseLogFormat(const std::string & logFormatStr) {
|
||||||
else if (logFormatStr == "raw-with-logs")
|
else if (logFormatStr == "raw-with-logs")
|
||||||
return LogFormat::rawWithLogs;
|
return LogFormat::rawWithLogs;
|
||||||
else if (logFormatStr == "internal-json")
|
else if (logFormatStr == "internal-json")
|
||||||
return LogFormat::internalJson;
|
return LogFormat::internalJSON;
|
||||||
else if (logFormatStr == "bar")
|
else if (logFormatStr == "bar")
|
||||||
return LogFormat::bar;
|
return LogFormat::bar;
|
||||||
else if (logFormatStr == "bar-with-logs")
|
else if (logFormatStr == "bar-with-logs")
|
||||||
|
@ -26,7 +26,7 @@ Logger * makeDefaultLogger() {
|
||||||
return makeSimpleLogger(false);
|
return makeSimpleLogger(false);
|
||||||
case LogFormat::rawWithLogs:
|
case LogFormat::rawWithLogs:
|
||||||
return makeSimpleLogger(true);
|
return makeSimpleLogger(true);
|
||||||
case LogFormat::internalJson:
|
case LogFormat::internalJSON:
|
||||||
return makeJSONLogger(*makeSimpleLogger(true));
|
return makeJSONLogger(*makeSimpleLogger(true));
|
||||||
case LogFormat::bar:
|
case LogFormat::bar:
|
||||||
return makeProgressBar();
|
return makeProgressBar();
|
||||||
|
|
|
@ -7,7 +7,7 @@ namespace nix {
|
||||||
enum class LogFormat {
|
enum class LogFormat {
|
||||||
raw,
|
raw,
|
||||||
rawWithLogs,
|
rawWithLogs,
|
||||||
internalJson,
|
internalJSON,
|
||||||
bar,
|
bar,
|
||||||
barWithLogs,
|
barWithLogs,
|
||||||
};
|
};
|
||||||
|
|
|
@ -466,6 +466,17 @@ public:
|
||||||
Logger::writeToStdout(s);
|
Logger::writeToStdout(s);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
std::optional<char> ask(std::string_view msg) override
|
||||||
|
{
|
||||||
|
auto state(state_.lock());
|
||||||
|
if (!state->active || !isatty(STDIN_FILENO)) return {};
|
||||||
|
std::cerr << fmt("\r\e[K%s ", msg);
|
||||||
|
auto s = trim(readLine(STDIN_FILENO));
|
||||||
|
if (s.size() != 1) return {};
|
||||||
|
draw(*state);
|
||||||
|
return s[0];
|
||||||
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
Logger * makeProgressBar(bool printBuildLogs)
|
Logger * makeProgressBar(bool printBuildLogs)
|
||||||
|
|
|
@ -86,8 +86,7 @@ void BinaryCacheStore::getFile(const std::string & path, Sink & sink)
|
||||||
promise.set_exception(std::current_exception());
|
promise.set_exception(std::current_exception());
|
||||||
}
|
}
|
||||||
}});
|
}});
|
||||||
auto data = promise.get_future().get();
|
sink(*promise.get_future().get());
|
||||||
sink((unsigned char *) data->data(), data->size());
|
|
||||||
}
|
}
|
||||||
|
|
||||||
std::shared_ptr<std::string> BinaryCacheStore::getFile(const std::string & path)
|
std::shared_ptr<std::string> BinaryCacheStore::getFile(const std::string & path)
|
||||||
|
@ -434,7 +433,9 @@ StorePath BinaryCacheStore::addTextToStore(const string & name, const string & s
|
||||||
if (!repair && isValidPath(path))
|
if (!repair && isValidPath(path))
|
||||||
return path;
|
return path;
|
||||||
|
|
||||||
auto source = StringSource { s };
|
StringSink sink;
|
||||||
|
dumpString(s, sink);
|
||||||
|
auto source = StringSource { *sink.s };
|
||||||
return addToStoreCommon(source, repair, CheckSigs, [&](HashResult nar) {
|
return addToStoreCommon(source, repair, CheckSigs, [&](HashResult nar) {
|
||||||
ValidPathInfo info { path, nar.first };
|
ValidPathInfo info { path, nar.first };
|
||||||
info.narSize = nar.second;
|
info.narSize = nar.second;
|
||||||
|
@ -444,6 +445,24 @@ StorePath BinaryCacheStore::addTextToStore(const string & name, const string & s
|
||||||
})->path;
|
})->path;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
std::optional<const Realisation> BinaryCacheStore::queryRealisation(const DrvOutput & id)
|
||||||
|
{
|
||||||
|
auto outputInfoFilePath = realisationsPrefix + "/" + id.to_string() + ".doi";
|
||||||
|
auto rawOutputInfo = getFile(outputInfoFilePath);
|
||||||
|
|
||||||
|
if (rawOutputInfo) {
|
||||||
|
return {Realisation::fromJSON(
|
||||||
|
nlohmann::json::parse(*rawOutputInfo), outputInfoFilePath)};
|
||||||
|
} else {
|
||||||
|
return std::nullopt;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void BinaryCacheStore::registerDrvOutput(const Realisation& info) {
|
||||||
|
auto filePath = realisationsPrefix + "/" + info.id.to_string() + ".doi";
|
||||||
|
upsertFile(filePath, info.toJSON().dump(), "application/json");
|
||||||
|
}
|
||||||
|
|
||||||
ref<FSAccessor> BinaryCacheStore::getFSAccessor()
|
ref<FSAccessor> BinaryCacheStore::getFSAccessor()
|
||||||
{
|
{
|
||||||
return make_ref<RemoteFSAccessor>(ref<Store>(shared_from_this()), localNarCache);
|
return make_ref<RemoteFSAccessor>(ref<Store>(shared_from_this()), localNarCache);
|
||||||
|
|
|
@ -33,6 +33,9 @@ private:
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
|
|
||||||
|
// The prefix under which realisation infos will be stored
|
||||||
|
const std::string realisationsPrefix = "/realisations";
|
||||||
|
|
||||||
BinaryCacheStore(const Params & params);
|
BinaryCacheStore(const Params & params);
|
||||||
|
|
||||||
public:
|
public:
|
||||||
|
@ -99,6 +102,10 @@ public:
|
||||||
StorePath addTextToStore(const string & name, const string & s,
|
StorePath addTextToStore(const string & name, const string & s,
|
||||||
const StorePathSet & references, RepairFlag repair) override;
|
const StorePathSet & references, RepairFlag repair) override;
|
||||||
|
|
||||||
|
void registerDrvOutput(const Realisation & info) override;
|
||||||
|
|
||||||
|
std::optional<const Realisation> queryRealisation(const DrvOutput &) override;
|
||||||
|
|
||||||
void narFromPath(const StorePath & path, Sink & sink) override;
|
void narFromPath(const StorePath & path, Sink & sink) override;
|
||||||
|
|
||||||
BuildResult buildDerivation(const StorePath & drvPath, const BasicDerivation & drv,
|
BuildResult buildDerivation(const StorePath & drvPath, const BasicDerivation & drv,
|
||||||
|
|
File diff suppressed because it is too large
Load diff
369
src/libstore/build/derivation-goal.hh
Normal file
369
src/libstore/build/derivation-goal.hh
Normal file
|
@ -0,0 +1,369 @@
|
||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include "parsed-derivations.hh"
|
||||||
|
#include "lock.hh"
|
||||||
|
#include "local-store.hh"
|
||||||
|
#include "goal.hh"
|
||||||
|
|
||||||
|
namespace nix {
|
||||||
|
|
||||||
|
using std::map;
|
||||||
|
|
||||||
|
struct HookInstance;
|
||||||
|
|
||||||
|
typedef enum {rpAccept, rpDecline, rpPostpone} HookReply;
|
||||||
|
|
||||||
|
/* Unless we are repairing, we don't both to test validity and just assume it,
|
||||||
|
so the choices are `Absent` or `Valid`. */
|
||||||
|
enum struct PathStatus {
|
||||||
|
Corrupt,
|
||||||
|
Absent,
|
||||||
|
Valid,
|
||||||
|
};
|
||||||
|
|
||||||
|
struct InitialOutputStatus {
|
||||||
|
StorePath path;
|
||||||
|
PathStatus status;
|
||||||
|
/* Valid in the store, and additionally non-corrupt if we are repairing */
|
||||||
|
bool isValid() const {
|
||||||
|
return status == PathStatus::Valid;
|
||||||
|
}
|
||||||
|
/* Merely present, allowed to be corrupt */
|
||||||
|
bool isPresent() const {
|
||||||
|
return status == PathStatus::Corrupt
|
||||||
|
|| status == PathStatus::Valid;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
struct InitialOutput {
|
||||||
|
bool wanted;
|
||||||
|
std::optional<InitialOutputStatus> known;
|
||||||
|
};
|
||||||
|
|
||||||
|
struct DerivationGoal : public Goal
|
||||||
|
{
|
||||||
|
/* Whether to use an on-disk .drv file. */
|
||||||
|
bool useDerivation;
|
||||||
|
|
||||||
|
/* The path of the derivation. */
|
||||||
|
StorePath drvPath;
|
||||||
|
|
||||||
|
/* The specific outputs that we need to build. Empty means all of
|
||||||
|
them. */
|
||||||
|
StringSet wantedOutputs;
|
||||||
|
|
||||||
|
/* Whether additional wanted outputs have been added. */
|
||||||
|
bool needRestart = false;
|
||||||
|
|
||||||
|
/* Whether to retry substituting the outputs after building the
|
||||||
|
inputs. */
|
||||||
|
bool retrySubstitution;
|
||||||
|
|
||||||
|
/* The derivation stored at drvPath. */
|
||||||
|
std::unique_ptr<BasicDerivation> drv;
|
||||||
|
|
||||||
|
std::unique_ptr<ParsedDerivation> parsedDrv;
|
||||||
|
|
||||||
|
/* The remainder is state held during the build. */
|
||||||
|
|
||||||
|
/* Locks on (fixed) output paths. */
|
||||||
|
PathLocks outputLocks;
|
||||||
|
|
||||||
|
/* All input paths (that is, the union of FS closures of the
|
||||||
|
immediate input paths). */
|
||||||
|
StorePathSet inputPaths;
|
||||||
|
|
||||||
|
std::map<std::string, InitialOutput> initialOutputs;
|
||||||
|
|
||||||
|
/* User selected for running the builder. */
|
||||||
|
std::unique_ptr<UserLock> buildUser;
|
||||||
|
|
||||||
|
/* The process ID of the builder. */
|
||||||
|
Pid pid;
|
||||||
|
|
||||||
|
/* The temporary directory. */
|
||||||
|
Path tmpDir;
|
||||||
|
|
||||||
|
/* The path of the temporary directory in the sandbox. */
|
||||||
|
Path tmpDirInSandbox;
|
||||||
|
|
||||||
|
/* File descriptor for the log file. */
|
||||||
|
AutoCloseFD fdLogFile;
|
||||||
|
std::shared_ptr<BufferedSink> logFileSink, logSink;
|
||||||
|
|
||||||
|
/* Number of bytes received from the builder's stdout/stderr. */
|
||||||
|
unsigned long logSize;
|
||||||
|
|
||||||
|
/* The most recent log lines. */
|
||||||
|
std::list<std::string> logTail;
|
||||||
|
|
||||||
|
std::string currentLogLine;
|
||||||
|
size_t currentLogLinePos = 0; // to handle carriage return
|
||||||
|
|
||||||
|
std::string currentHookLine;
|
||||||
|
|
||||||
|
/* Pipe for the builder's standard output/error. */
|
||||||
|
Pipe builderOut;
|
||||||
|
|
||||||
|
/* Pipe for synchronising updates to the builder namespaces. */
|
||||||
|
Pipe userNamespaceSync;
|
||||||
|
|
||||||
|
/* The mount namespace of the builder, used to add additional
|
||||||
|
paths to the sandbox as a result of recursive Nix calls. */
|
||||||
|
AutoCloseFD sandboxMountNamespace;
|
||||||
|
|
||||||
|
/* On Linux, whether we're doing the build in its own user
|
||||||
|
namespace. */
|
||||||
|
bool usingUserNamespace = true;
|
||||||
|
|
||||||
|
/* The build hook. */
|
||||||
|
std::unique_ptr<HookInstance> hook;
|
||||||
|
|
||||||
|
/* Whether we're currently doing a chroot build. */
|
||||||
|
bool useChroot = false;
|
||||||
|
|
||||||
|
Path chrootRootDir;
|
||||||
|
|
||||||
|
/* RAII object to delete the chroot directory. */
|
||||||
|
std::shared_ptr<AutoDelete> autoDelChroot;
|
||||||
|
|
||||||
|
/* The sort of derivation we are building. */
|
||||||
|
DerivationType derivationType;
|
||||||
|
|
||||||
|
/* Whether to run the build in a private network namespace. */
|
||||||
|
bool privateNetwork = false;
|
||||||
|
|
||||||
|
typedef void (DerivationGoal::*GoalState)();
|
||||||
|
GoalState state;
|
||||||
|
|
||||||
|
/* Stuff we need to pass to initChild(). */
|
||||||
|
struct ChrootPath {
|
||||||
|
Path source;
|
||||||
|
bool optional;
|
||||||
|
ChrootPath(Path source = "", bool optional = false)
|
||||||
|
: source(source), optional(optional)
|
||||||
|
{ }
|
||||||
|
};
|
||||||
|
typedef map<Path, ChrootPath> DirsInChroot; // maps target path to source path
|
||||||
|
DirsInChroot dirsInChroot;
|
||||||
|
|
||||||
|
typedef map<string, string> Environment;
|
||||||
|
Environment env;
|
||||||
|
|
||||||
|
#if __APPLE__
|
||||||
|
typedef string SandboxProfile;
|
||||||
|
SandboxProfile additionalSandboxProfile;
|
||||||
|
#endif
|
||||||
|
|
||||||
|
/* Hash rewriting. */
|
||||||
|
StringMap inputRewrites, outputRewrites;
|
||||||
|
typedef map<StorePath, StorePath> RedirectedOutputs;
|
||||||
|
RedirectedOutputs redirectedOutputs;
|
||||||
|
|
||||||
|
/* The outputs paths used during the build.
|
||||||
|
|
||||||
|
- Input-addressed derivations or fixed content-addressed outputs are
|
||||||
|
sometimes built when some of their outputs already exist, and can not
|
||||||
|
be hidden via sandboxing. We use temporary locations instead and
|
||||||
|
rewrite after the build. Otherwise the regular predetermined paths are
|
||||||
|
put here.
|
||||||
|
|
||||||
|
- Floating content-addressed derivations do not know their final build
|
||||||
|
output paths until the outputs are hashed, so random locations are
|
||||||
|
used, and then renamed. The randomness helps guard against hidden
|
||||||
|
self-references.
|
||||||
|
*/
|
||||||
|
OutputPathMap scratchOutputs;
|
||||||
|
|
||||||
|
/* The final output paths of the build.
|
||||||
|
|
||||||
|
- For input-addressed derivations, always the precomputed paths
|
||||||
|
|
||||||
|
- For content-addressed derivations, calcuated from whatever the hash
|
||||||
|
ends up being. (Note that fixed outputs derivations that produce the
|
||||||
|
"wrong" output still install that data under its true content-address.)
|
||||||
|
*/
|
||||||
|
OutputPathMap finalOutputs;
|
||||||
|
|
||||||
|
BuildMode buildMode;
|
||||||
|
|
||||||
|
/* If we're repairing without a chroot, there may be outputs that
|
||||||
|
are valid but corrupt. So we redirect these outputs to
|
||||||
|
temporary paths. */
|
||||||
|
StorePathSet redirectedBadOutputs;
|
||||||
|
|
||||||
|
BuildResult result;
|
||||||
|
|
||||||
|
/* The current round, if we're building multiple times. */
|
||||||
|
size_t curRound = 1;
|
||||||
|
|
||||||
|
size_t nrRounds;
|
||||||
|
|
||||||
|
/* Path registration info from the previous round, if we're
|
||||||
|
building multiple times. Since this contains the hash, it
|
||||||
|
allows us to compare whether two rounds produced the same
|
||||||
|
result. */
|
||||||
|
std::map<Path, ValidPathInfo> prevInfos;
|
||||||
|
|
||||||
|
uid_t sandboxUid() { return usingUserNamespace ? 1000 : buildUser->getUID(); }
|
||||||
|
gid_t sandboxGid() { return usingUserNamespace ? 100 : buildUser->getGID(); }
|
||||||
|
|
||||||
|
const static Path homeDir;
|
||||||
|
|
||||||
|
std::unique_ptr<MaintainCount<uint64_t>> mcExpectedBuilds, mcRunningBuilds;
|
||||||
|
|
||||||
|
std::unique_ptr<Activity> act;
|
||||||
|
|
||||||
|
/* Activity that denotes waiting for a lock. */
|
||||||
|
std::unique_ptr<Activity> actLock;
|
||||||
|
|
||||||
|
std::map<ActivityId, Activity> builderActivities;
|
||||||
|
|
||||||
|
/* The remote machine on which we're building. */
|
||||||
|
std::string machineName;
|
||||||
|
|
||||||
|
/* The recursive Nix daemon socket. */
|
||||||
|
AutoCloseFD daemonSocket;
|
||||||
|
|
||||||
|
/* The daemon main thread. */
|
||||||
|
std::thread daemonThread;
|
||||||
|
|
||||||
|
/* The daemon worker threads. */
|
||||||
|
std::vector<std::thread> daemonWorkerThreads;
|
||||||
|
|
||||||
|
/* Paths that were added via recursive Nix calls. */
|
||||||
|
StorePathSet addedPaths;
|
||||||
|
|
||||||
|
/* Recursive Nix calls are only allowed to build or realize paths
|
||||||
|
in the original input closure or added via a recursive Nix call
|
||||||
|
(so e.g. you can't do 'nix-store -r /nix/store/<bla>' where
|
||||||
|
/nix/store/<bla> is some arbitrary path in a binary cache). */
|
||||||
|
bool isAllowed(const StorePath & path)
|
||||||
|
{
|
||||||
|
return inputPaths.count(path) || addedPaths.count(path);
|
||||||
|
}
|
||||||
|
|
||||||
|
friend struct RestrictedStore;
|
||||||
|
|
||||||
|
DerivationGoal(const StorePath & drvPath,
|
||||||
|
const StringSet & wantedOutputs, Worker & worker,
|
||||||
|
BuildMode buildMode = bmNormal);
|
||||||
|
DerivationGoal(const StorePath & drvPath, const BasicDerivation & drv,
|
||||||
|
const StringSet & wantedOutputs, Worker & worker,
|
||||||
|
BuildMode buildMode = bmNormal);
|
||||||
|
~DerivationGoal();
|
||||||
|
|
||||||
|
/* Whether we need to perform hash rewriting if there are valid output paths. */
|
||||||
|
bool needsHashRewrite();
|
||||||
|
|
||||||
|
void timedOut(Error && ex) override;
|
||||||
|
|
||||||
|
string key() override;
|
||||||
|
|
||||||
|
void work() override;
|
||||||
|
|
||||||
|
/* Add wanted outputs to an already existing derivation goal. */
|
||||||
|
void addWantedOutputs(const StringSet & outputs);
|
||||||
|
|
||||||
|
BuildResult getResult() { return result; }
|
||||||
|
|
||||||
|
/* The states. */
|
||||||
|
void getDerivation();
|
||||||
|
void loadDerivation();
|
||||||
|
void haveDerivation();
|
||||||
|
void outputsSubstitutionTried();
|
||||||
|
void gaveUpOnSubstitution();
|
||||||
|
void closureRepaired();
|
||||||
|
void inputsRealised();
|
||||||
|
void tryToBuild();
|
||||||
|
void tryLocalBuild();
|
||||||
|
void buildDone();
|
||||||
|
|
||||||
|
void resolvedFinished();
|
||||||
|
|
||||||
|
/* Is the build hook willing to perform the build? */
|
||||||
|
HookReply tryBuildHook();
|
||||||
|
|
||||||
|
/* Start building a derivation. */
|
||||||
|
void startBuilder();
|
||||||
|
|
||||||
|
/* Fill in the environment for the builder. */
|
||||||
|
void initEnv();
|
||||||
|
|
||||||
|
/* Setup tmp dir location. */
|
||||||
|
void initTmpDir();
|
||||||
|
|
||||||
|
/* Write a JSON file containing the derivation attributes. */
|
||||||
|
void writeStructuredAttrs();
|
||||||
|
|
||||||
|
void startDaemon();
|
||||||
|
|
||||||
|
void stopDaemon();
|
||||||
|
|
||||||
|
/* Add 'path' to the set of paths that may be referenced by the
|
||||||
|
outputs, and make it appear in the sandbox. */
|
||||||
|
void addDependency(const StorePath & path);
|
||||||
|
|
||||||
|
/* Make a file owned by the builder. */
|
||||||
|
void chownToBuilder(const Path & path);
|
||||||
|
|
||||||
|
/* Run the builder's process. */
|
||||||
|
void runChild();
|
||||||
|
|
||||||
|
/* Check that the derivation outputs all exist and register them
|
||||||
|
as valid. */
|
||||||
|
void registerOutputs();
|
||||||
|
|
||||||
|
/* Check that an output meets the requirements specified by the
|
||||||
|
'outputChecks' attribute (or the legacy
|
||||||
|
'{allowed,disallowed}{References,Requisites}' attributes). */
|
||||||
|
void checkOutputs(const std::map<std::string, ValidPathInfo> & outputs);
|
||||||
|
|
||||||
|
/* Open a log file and a pipe to it. */
|
||||||
|
Path openLogFile();
|
||||||
|
|
||||||
|
/* Close the log file. */
|
||||||
|
void closeLogFile();
|
||||||
|
|
||||||
|
/* Delete the temporary directory, if we have one. */
|
||||||
|
void deleteTmpDir(bool force);
|
||||||
|
|
||||||
|
/* Callback used by the worker to write to the log. */
|
||||||
|
void handleChildOutput(int fd, const string & data) override;
|
||||||
|
void handleEOF(int fd) override;
|
||||||
|
void flushLine();
|
||||||
|
|
||||||
|
/* Wrappers around the corresponding Store methods that first consult the
|
||||||
|
derivation. This is currently needed because when there is no drv file
|
||||||
|
there also is no DB entry. */
|
||||||
|
std::map<std::string, std::optional<StorePath>> queryPartialDerivationOutputMap();
|
||||||
|
OutputPathMap queryDerivationOutputMap();
|
||||||
|
|
||||||
|
/* Return the set of (in)valid paths. */
|
||||||
|
void checkPathValidity();
|
||||||
|
|
||||||
|
/* Forcibly kill the child process, if any. */
|
||||||
|
void killChild();
|
||||||
|
|
||||||
|
/* Create alternative path calculated from but distinct from the
|
||||||
|
input, so we can avoid overwriting outputs (or other store paths)
|
||||||
|
that already exist. */
|
||||||
|
StorePath makeFallbackPath(const StorePath & path);
|
||||||
|
/* Make a path to another based on the output name along with the
|
||||||
|
derivation hash. */
|
||||||
|
/* FIXME add option to randomize, so we can audit whether our
|
||||||
|
rewrites caught everything */
|
||||||
|
StorePath makeFallbackPath(std::string_view outputName);
|
||||||
|
|
||||||
|
void repairClosure();
|
||||||
|
|
||||||
|
void started();
|
||||||
|
|
||||||
|
void done(
|
||||||
|
BuildResult::Status status,
|
||||||
|
std::optional<Error> ex = {});
|
||||||
|
|
||||||
|
StorePathSet exportReferences(const StorePathSet & storePaths);
|
||||||
|
};
|
||||||
|
|
||||||
|
}
|
89
src/libstore/build/goal.cc
Normal file
89
src/libstore/build/goal.cc
Normal file
|
@ -0,0 +1,89 @@
|
||||||
|
#include "goal.hh"
|
||||||
|
#include "worker.hh"
|
||||||
|
|
||||||
|
namespace nix {
|
||||||
|
|
||||||
|
|
||||||
|
bool CompareGoalPtrs::operator() (const GoalPtr & a, const GoalPtr & b) const {
|
||||||
|
string s1 = a->key();
|
||||||
|
string s2 = b->key();
|
||||||
|
return s1 < s2;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
void addToWeakGoals(WeakGoals & goals, GoalPtr p)
|
||||||
|
{
|
||||||
|
// FIXME: necessary?
|
||||||
|
// FIXME: O(n)
|
||||||
|
for (auto & i : goals)
|
||||||
|
if (i.lock() == p) return;
|
||||||
|
goals.push_back(p);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
void Goal::addWaitee(GoalPtr waitee)
|
||||||
|
{
|
||||||
|
waitees.insert(waitee);
|
||||||
|
addToWeakGoals(waitee->waiters, shared_from_this());
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
void Goal::waiteeDone(GoalPtr waitee, ExitCode result)
|
||||||
|
{
|
||||||
|
assert(waitees.find(waitee) != waitees.end());
|
||||||
|
waitees.erase(waitee);
|
||||||
|
|
||||||
|
trace(fmt("waitee '%s' done; %d left", waitee->name, waitees.size()));
|
||||||
|
|
||||||
|
if (result == ecFailed || result == ecNoSubstituters || result == ecIncompleteClosure) ++nrFailed;
|
||||||
|
|
||||||
|
if (result == ecNoSubstituters) ++nrNoSubstituters;
|
||||||
|
|
||||||
|
if (result == ecIncompleteClosure) ++nrIncompleteClosure;
|
||||||
|
|
||||||
|
if (waitees.empty() || (result == ecFailed && !settings.keepGoing)) {
|
||||||
|
|
||||||
|
/* If we failed and keepGoing is not set, we remove all
|
||||||
|
remaining waitees. */
|
||||||
|
for (auto & goal : waitees) {
|
||||||
|
WeakGoals waiters2;
|
||||||
|
for (auto & j : goal->waiters)
|
||||||
|
if (j.lock() != shared_from_this()) waiters2.push_back(j);
|
||||||
|
goal->waiters = waiters2;
|
||||||
|
}
|
||||||
|
waitees.clear();
|
||||||
|
|
||||||
|
worker.wakeUp(shared_from_this());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
void Goal::amDone(ExitCode result, std::optional<Error> ex)
|
||||||
|
{
|
||||||
|
trace("done");
|
||||||
|
assert(exitCode == ecBusy);
|
||||||
|
assert(result == ecSuccess || result == ecFailed || result == ecNoSubstituters || result == ecIncompleteClosure);
|
||||||
|
exitCode = result;
|
||||||
|
|
||||||
|
if (ex) {
|
||||||
|
if (!waiters.empty())
|
||||||
|
logError(ex->info());
|
||||||
|
else
|
||||||
|
this->ex = std::move(*ex);
|
||||||
|
}
|
||||||
|
|
||||||
|
for (auto & i : waiters) {
|
||||||
|
GoalPtr goal = i.lock();
|
||||||
|
if (goal) goal->waiteeDone(shared_from_this(), result);
|
||||||
|
}
|
||||||
|
waiters.clear();
|
||||||
|
worker.removeGoal(shared_from_this());
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
void Goal::trace(const FormatOrString & fs)
|
||||||
|
{
|
||||||
|
debug("%1%: %2%", name, fs.s);
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
107
src/libstore/build/goal.hh
Normal file
107
src/libstore/build/goal.hh
Normal file
|
@ -0,0 +1,107 @@
|
||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include "types.hh"
|
||||||
|
#include "store-api.hh"
|
||||||
|
|
||||||
|
namespace nix {
|
||||||
|
|
||||||
|
/* Forward definition. */
|
||||||
|
struct Goal;
|
||||||
|
class Worker;
|
||||||
|
|
||||||
|
/* A pointer to a goal. */
|
||||||
|
typedef std::shared_ptr<Goal> GoalPtr;
|
||||||
|
typedef std::weak_ptr<Goal> WeakGoalPtr;
|
||||||
|
|
||||||
|
struct CompareGoalPtrs {
|
||||||
|
bool operator() (const GoalPtr & a, const GoalPtr & b) const;
|
||||||
|
};
|
||||||
|
|
||||||
|
/* Set of goals. */
|
||||||
|
typedef set<GoalPtr, CompareGoalPtrs> Goals;
|
||||||
|
typedef list<WeakGoalPtr> WeakGoals;
|
||||||
|
|
||||||
|
/* A map of paths to goals (and the other way around). */
|
||||||
|
typedef std::map<StorePath, WeakGoalPtr> WeakGoalMap;
|
||||||
|
|
||||||
|
struct Goal : public std::enable_shared_from_this<Goal>
|
||||||
|
{
|
||||||
|
typedef enum {ecBusy, ecSuccess, ecFailed, ecNoSubstituters, ecIncompleteClosure} ExitCode;
|
||||||
|
|
||||||
|
/* Backlink to the worker. */
|
||||||
|
Worker & worker;
|
||||||
|
|
||||||
|
/* Goals that this goal is waiting for. */
|
||||||
|
Goals waitees;
|
||||||
|
|
||||||
|
/* Goals waiting for this one to finish. Must use weak pointers
|
||||||
|
here to prevent cycles. */
|
||||||
|
WeakGoals waiters;
|
||||||
|
|
||||||
|
/* Number of goals we are/were waiting for that have failed. */
|
||||||
|
unsigned int nrFailed;
|
||||||
|
|
||||||
|
/* Number of substitution goals we are/were waiting for that
|
||||||
|
failed because there are no substituters. */
|
||||||
|
unsigned int nrNoSubstituters;
|
||||||
|
|
||||||
|
/* Number of substitution goals we are/were waiting for that
|
||||||
|
failed because they had unsubstitutable references. */
|
||||||
|
unsigned int nrIncompleteClosure;
|
||||||
|
|
||||||
|
/* Name of this goal for debugging purposes. */
|
||||||
|
string name;
|
||||||
|
|
||||||
|
/* Whether the goal is finished. */
|
||||||
|
ExitCode exitCode;
|
||||||
|
|
||||||
|
/* Exception containing an error message, if any. */
|
||||||
|
std::optional<Error> ex;
|
||||||
|
|
||||||
|
Goal(Worker & worker) : worker(worker)
|
||||||
|
{
|
||||||
|
nrFailed = nrNoSubstituters = nrIncompleteClosure = 0;
|
||||||
|
exitCode = ecBusy;
|
||||||
|
}
|
||||||
|
|
||||||
|
virtual ~Goal()
|
||||||
|
{
|
||||||
|
trace("goal destroyed");
|
||||||
|
}
|
||||||
|
|
||||||
|
virtual void work() = 0;
|
||||||
|
|
||||||
|
void addWaitee(GoalPtr waitee);
|
||||||
|
|
||||||
|
virtual void waiteeDone(GoalPtr waitee, ExitCode result);
|
||||||
|
|
||||||
|
virtual void handleChildOutput(int fd, const string & data)
|
||||||
|
{
|
||||||
|
abort();
|
||||||
|
}
|
||||||
|
|
||||||
|
virtual void handleEOF(int fd)
|
||||||
|
{
|
||||||
|
abort();
|
||||||
|
}
|
||||||
|
|
||||||
|
void trace(const FormatOrString & fs);
|
||||||
|
|
||||||
|
string getName()
|
||||||
|
{
|
||||||
|
return name;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Callback in case of a timeout. It should wake up its waiters,
|
||||||
|
get rid of any running child processes that are being monitored
|
||||||
|
by the worker (important!), etc. */
|
||||||
|
virtual void timedOut(Error && ex) = 0;
|
||||||
|
|
||||||
|
virtual string key() = 0;
|
||||||
|
|
||||||
|
void amDone(ExitCode result, std::optional<Error> ex = {});
|
||||||
|
};
|
||||||
|
|
||||||
|
void addToWeakGoals(WeakGoals & goals, GoalPtr p);
|
||||||
|
|
||||||
|
}
|
72
src/libstore/build/hook-instance.cc
Normal file
72
src/libstore/build/hook-instance.cc
Normal file
|
@ -0,0 +1,72 @@
|
||||||
|
#include "globals.hh"
|
||||||
|
#include "hook-instance.hh"
|
||||||
|
|
||||||
|
namespace nix {
|
||||||
|
|
||||||
|
HookInstance::HookInstance()
|
||||||
|
{
|
||||||
|
debug("starting build hook '%s'", settings.buildHook);
|
||||||
|
|
||||||
|
/* Create a pipe to get the output of the child. */
|
||||||
|
fromHook.create();
|
||||||
|
|
||||||
|
/* Create the communication pipes. */
|
||||||
|
toHook.create();
|
||||||
|
|
||||||
|
/* Create a pipe to get the output of the builder. */
|
||||||
|
builderOut.create();
|
||||||
|
|
||||||
|
/* Fork the hook. */
|
||||||
|
pid = startProcess([&]() {
|
||||||
|
|
||||||
|
commonChildInit(fromHook);
|
||||||
|
|
||||||
|
if (chdir("/") == -1) throw SysError("changing into /");
|
||||||
|
|
||||||
|
/* Dup the communication pipes. */
|
||||||
|
if (dup2(toHook.readSide.get(), STDIN_FILENO) == -1)
|
||||||
|
throw SysError("dupping to-hook read side");
|
||||||
|
|
||||||
|
/* Use fd 4 for the builder's stdout/stderr. */
|
||||||
|
if (dup2(builderOut.writeSide.get(), 4) == -1)
|
||||||
|
throw SysError("dupping builder's stdout/stderr");
|
||||||
|
|
||||||
|
/* Hack: pass the read side of that fd to allow build-remote
|
||||||
|
to read SSH error messages. */
|
||||||
|
if (dup2(builderOut.readSide.get(), 5) == -1)
|
||||||
|
throw SysError("dupping builder's stdout/stderr");
|
||||||
|
|
||||||
|
Strings args = {
|
||||||
|
std::string(baseNameOf(settings.buildHook.get())),
|
||||||
|
std::to_string(verbosity),
|
||||||
|
};
|
||||||
|
|
||||||
|
execv(settings.buildHook.get().c_str(), stringsToCharPtrs(args).data());
|
||||||
|
|
||||||
|
throw SysError("executing '%s'", settings.buildHook);
|
||||||
|
});
|
||||||
|
|
||||||
|
pid.setSeparatePG(true);
|
||||||
|
fromHook.writeSide = -1;
|
||||||
|
toHook.readSide = -1;
|
||||||
|
|
||||||
|
sink = FdSink(toHook.writeSide.get());
|
||||||
|
std::map<std::string, Config::SettingInfo> settings;
|
||||||
|
globalConfig.getSettings(settings);
|
||||||
|
for (auto & setting : settings)
|
||||||
|
sink << 1 << setting.first << setting.second.value;
|
||||||
|
sink << 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
HookInstance::~HookInstance()
|
||||||
|
{
|
||||||
|
try {
|
||||||
|
toHook.writeSide = -1;
|
||||||
|
if (pid != -1) pid.kill();
|
||||||
|
} catch (...) {
|
||||||
|
ignoreException();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
31
src/libstore/build/hook-instance.hh
Normal file
31
src/libstore/build/hook-instance.hh
Normal file
|
@ -0,0 +1,31 @@
|
||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include "logging.hh"
|
||||||
|
#include "serialise.hh"
|
||||||
|
|
||||||
|
namespace nix {
|
||||||
|
|
||||||
|
struct HookInstance
|
||||||
|
{
|
||||||
|
/* Pipes for talking to the build hook. */
|
||||||
|
Pipe toHook;
|
||||||
|
|
||||||
|
/* Pipe for the hook's standard output/error. */
|
||||||
|
Pipe fromHook;
|
||||||
|
|
||||||
|
/* Pipe for the builder's standard output/error. */
|
||||||
|
Pipe builderOut;
|
||||||
|
|
||||||
|
/* The process ID of the hook. */
|
||||||
|
Pid pid;
|
||||||
|
|
||||||
|
FdSink sink;
|
||||||
|
|
||||||
|
std::map<ActivityId, Activity> activities;
|
||||||
|
|
||||||
|
HookInstance();
|
||||||
|
|
||||||
|
~HookInstance();
|
||||||
|
};
|
||||||
|
|
||||||
|
}
|
108
src/libstore/build/local-store-build.cc
Normal file
108
src/libstore/build/local-store-build.cc
Normal file
|
@ -0,0 +1,108 @@
|
||||||
|
#include "machines.hh"
|
||||||
|
#include "worker.hh"
|
||||||
|
#include "substitution-goal.hh"
|
||||||
|
#include "derivation-goal.hh"
|
||||||
|
|
||||||
|
namespace nix {
|
||||||
|
|
||||||
|
void LocalStore::buildPaths(const std::vector<StorePathWithOutputs> & drvPaths, BuildMode buildMode)
|
||||||
|
{
|
||||||
|
Worker worker(*this);
|
||||||
|
|
||||||
|
Goals goals;
|
||||||
|
for (auto & path : drvPaths) {
|
||||||
|
if (path.path.isDerivation())
|
||||||
|
goals.insert(worker.makeDerivationGoal(path.path, path.outputs, buildMode));
|
||||||
|
else
|
||||||
|
goals.insert(worker.makeSubstitutionGoal(path.path, buildMode == bmRepair ? Repair : NoRepair));
|
||||||
|
}
|
||||||
|
|
||||||
|
worker.run(goals);
|
||||||
|
|
||||||
|
StorePathSet failed;
|
||||||
|
std::optional<Error> ex;
|
||||||
|
for (auto & i : goals) {
|
||||||
|
if (i->ex) {
|
||||||
|
if (ex)
|
||||||
|
logError(i->ex->info());
|
||||||
|
else
|
||||||
|
ex = i->ex;
|
||||||
|
}
|
||||||
|
if (i->exitCode != Goal::ecSuccess) {
|
||||||
|
if (auto i2 = dynamic_cast<DerivationGoal *>(i.get())) failed.insert(i2->drvPath);
|
||||||
|
else if (auto i2 = dynamic_cast<SubstitutionGoal *>(i.get())) failed.insert(i2->storePath);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (failed.size() == 1 && ex) {
|
||||||
|
ex->status = worker.exitStatus();
|
||||||
|
throw *ex;
|
||||||
|
} else if (!failed.empty()) {
|
||||||
|
if (ex) logError(ex->info());
|
||||||
|
throw Error(worker.exitStatus(), "build of %s failed", showPaths(failed));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
BuildResult LocalStore::buildDerivation(const StorePath & drvPath, const BasicDerivation & drv,
|
||||||
|
BuildMode buildMode)
|
||||||
|
{
|
||||||
|
Worker worker(*this);
|
||||||
|
auto goal = worker.makeBasicDerivationGoal(drvPath, drv, {}, buildMode);
|
||||||
|
|
||||||
|
BuildResult result;
|
||||||
|
|
||||||
|
try {
|
||||||
|
worker.run(Goals{goal});
|
||||||
|
result = goal->getResult();
|
||||||
|
} catch (Error & e) {
|
||||||
|
result.status = BuildResult::MiscFailure;
|
||||||
|
result.errorMsg = e.msg();
|
||||||
|
}
|
||||||
|
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
void LocalStore::ensurePath(const StorePath & path)
|
||||||
|
{
|
||||||
|
/* If the path is already valid, we're done. */
|
||||||
|
if (isValidPath(path)) return;
|
||||||
|
|
||||||
|
Worker worker(*this);
|
||||||
|
GoalPtr goal = worker.makeSubstitutionGoal(path);
|
||||||
|
Goals goals = {goal};
|
||||||
|
|
||||||
|
worker.run(goals);
|
||||||
|
|
||||||
|
if (goal->exitCode != Goal::ecSuccess) {
|
||||||
|
if (goal->ex) {
|
||||||
|
goal->ex->status = worker.exitStatus();
|
||||||
|
throw *goal->ex;
|
||||||
|
} else
|
||||||
|
throw Error(worker.exitStatus(), "path '%s' does not exist and cannot be created", printStorePath(path));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
void LocalStore::repairPath(const StorePath & path)
|
||||||
|
{
|
||||||
|
Worker worker(*this);
|
||||||
|
GoalPtr goal = worker.makeSubstitutionGoal(path, Repair);
|
||||||
|
Goals goals = {goal};
|
||||||
|
|
||||||
|
worker.run(goals);
|
||||||
|
|
||||||
|
if (goal->exitCode != Goal::ecSuccess) {
|
||||||
|
/* Since substituting the path didn't work, if we have a valid
|
||||||
|
deriver, then rebuild the deriver. */
|
||||||
|
auto info = queryPathInfo(path);
|
||||||
|
if (info->deriver && isValidPath(*info->deriver)) {
|
||||||
|
goals.clear();
|
||||||
|
goals.insert(worker.makeDerivationGoal(*info->deriver, StringSet(), bmRepair));
|
||||||
|
worker.run(goals);
|
||||||
|
} else
|
||||||
|
throw Error(worker.exitStatus(), "cannot repair path '%s'", printStorePath(path));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
296
src/libstore/build/substitution-goal.cc
Normal file
296
src/libstore/build/substitution-goal.cc
Normal file
|
@ -0,0 +1,296 @@
|
||||||
|
#include "worker.hh"
|
||||||
|
#include "substitution-goal.hh"
|
||||||
|
#include "nar-info.hh"
|
||||||
|
#include "finally.hh"
|
||||||
|
|
||||||
|
namespace nix {
|
||||||
|
|
||||||
|
SubstitutionGoal::SubstitutionGoal(const StorePath & storePath, Worker & worker, RepairFlag repair, std::optional<ContentAddress> ca)
|
||||||
|
: Goal(worker)
|
||||||
|
, storePath(storePath)
|
||||||
|
, repair(repair)
|
||||||
|
, ca(ca)
|
||||||
|
{
|
||||||
|
state = &SubstitutionGoal::init;
|
||||||
|
name = fmt("substitution of '%s'", worker.store.printStorePath(this->storePath));
|
||||||
|
trace("created");
|
||||||
|
maintainExpectedSubstitutions = std::make_unique<MaintainCount<uint64_t>>(worker.expectedSubstitutions);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
SubstitutionGoal::~SubstitutionGoal()
|
||||||
|
{
|
||||||
|
try {
|
||||||
|
if (thr.joinable()) {
|
||||||
|
// FIXME: signal worker thread to quit.
|
||||||
|
thr.join();
|
||||||
|
worker.childTerminated(this);
|
||||||
|
}
|
||||||
|
} catch (...) {
|
||||||
|
ignoreException();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
void SubstitutionGoal::work()
|
||||||
|
{
|
||||||
|
(this->*state)();
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
void SubstitutionGoal::init()
|
||||||
|
{
|
||||||
|
trace("init");
|
||||||
|
|
||||||
|
worker.store.addTempRoot(storePath);
|
||||||
|
|
||||||
|
/* If the path already exists we're done. */
|
||||||
|
if (!repair && worker.store.isValidPath(storePath)) {
|
||||||
|
amDone(ecSuccess);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (settings.readOnlyMode)
|
||||||
|
throw Error("cannot substitute path '%s' - no write access to the Nix store", worker.store.printStorePath(storePath));
|
||||||
|
|
||||||
|
subs = settings.useSubstitutes ? getDefaultSubstituters() : std::list<ref<Store>>();
|
||||||
|
|
||||||
|
tryNext();
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
void SubstitutionGoal::tryNext()
|
||||||
|
{
|
||||||
|
trace("trying next substituter");
|
||||||
|
|
||||||
|
if (subs.size() == 0) {
|
||||||
|
/* None left. Terminate this goal and let someone else deal
|
||||||
|
with it. */
|
||||||
|
debug("path '%s' is required, but there is no substituter that can build it", worker.store.printStorePath(storePath));
|
||||||
|
|
||||||
|
/* Hack: don't indicate failure if there were no substituters.
|
||||||
|
In that case the calling derivation should just do a
|
||||||
|
build. */
|
||||||
|
amDone(substituterFailed ? ecFailed : ecNoSubstituters);
|
||||||
|
|
||||||
|
if (substituterFailed) {
|
||||||
|
worker.failedSubstitutions++;
|
||||||
|
worker.updateProgress();
|
||||||
|
}
|
||||||
|
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
sub = subs.front();
|
||||||
|
subs.pop_front();
|
||||||
|
|
||||||
|
if (ca) {
|
||||||
|
subPath = sub->makeFixedOutputPathFromCA(storePath.name(), *ca);
|
||||||
|
if (sub->storeDir == worker.store.storeDir)
|
||||||
|
assert(subPath == storePath);
|
||||||
|
} else if (sub->storeDir != worker.store.storeDir) {
|
||||||
|
tryNext();
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
try {
|
||||||
|
// FIXME: make async
|
||||||
|
info = sub->queryPathInfo(subPath ? *subPath : storePath);
|
||||||
|
} catch (InvalidPath &) {
|
||||||
|
tryNext();
|
||||||
|
return;
|
||||||
|
} catch (SubstituterDisabled &) {
|
||||||
|
if (settings.tryFallback) {
|
||||||
|
tryNext();
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
throw;
|
||||||
|
} catch (Error & e) {
|
||||||
|
if (settings.tryFallback) {
|
||||||
|
logError(e.info());
|
||||||
|
tryNext();
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
throw;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (info->path != storePath) {
|
||||||
|
if (info->isContentAddressed(*sub) && info->references.empty()) {
|
||||||
|
auto info2 = std::make_shared<ValidPathInfo>(*info);
|
||||||
|
info2->path = storePath;
|
||||||
|
info = info2;
|
||||||
|
} else {
|
||||||
|
printError("asked '%s' for '%s' but got '%s'",
|
||||||
|
sub->getUri(), worker.store.printStorePath(storePath), sub->printStorePath(info->path));
|
||||||
|
tryNext();
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Update the total expected download size. */
|
||||||
|
auto narInfo = std::dynamic_pointer_cast<const NarInfo>(info);
|
||||||
|
|
||||||
|
maintainExpectedNar = std::make_unique<MaintainCount<uint64_t>>(worker.expectedNarSize, info->narSize);
|
||||||
|
|
||||||
|
maintainExpectedDownload =
|
||||||
|
narInfo && narInfo->fileSize
|
||||||
|
? std::make_unique<MaintainCount<uint64_t>>(worker.expectedDownloadSize, narInfo->fileSize)
|
||||||
|
: nullptr;
|
||||||
|
|
||||||
|
worker.updateProgress();
|
||||||
|
|
||||||
|
/* Bail out early if this substituter lacks a valid
|
||||||
|
signature. LocalStore::addToStore() also checks for this, but
|
||||||
|
only after we've downloaded the path. */
|
||||||
|
if (worker.store.requireSigs
|
||||||
|
&& !sub->isTrusted
|
||||||
|
&& !info->checkSignatures(worker.store, worker.store.getPublicKeys()))
|
||||||
|
{
|
||||||
|
logWarning({
|
||||||
|
.name = "Invalid path signature",
|
||||||
|
.hint = hintfmt("substituter '%s' does not have a valid signature for path '%s'",
|
||||||
|
sub->getUri(), worker.store.printStorePath(storePath))
|
||||||
|
});
|
||||||
|
tryNext();
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* To maintain the closure invariant, we first have to realise the
|
||||||
|
paths referenced by this one. */
|
||||||
|
for (auto & i : info->references)
|
||||||
|
if (i != storePath) /* ignore self-references */
|
||||||
|
addWaitee(worker.makeSubstitutionGoal(i));
|
||||||
|
|
||||||
|
if (waitees.empty()) /* to prevent hang (no wake-up event) */
|
||||||
|
referencesValid();
|
||||||
|
else
|
||||||
|
state = &SubstitutionGoal::referencesValid;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
void SubstitutionGoal::referencesValid()
|
||||||
|
{
|
||||||
|
trace("all references realised");
|
||||||
|
|
||||||
|
if (nrFailed > 0) {
|
||||||
|
debug("some references of path '%s' could not be realised", worker.store.printStorePath(storePath));
|
||||||
|
amDone(nrNoSubstituters > 0 || nrIncompleteClosure > 0 ? ecIncompleteClosure : ecFailed);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
for (auto & i : info->references)
|
||||||
|
if (i != storePath) /* ignore self-references */
|
||||||
|
assert(worker.store.isValidPath(i));
|
||||||
|
|
||||||
|
state = &SubstitutionGoal::tryToRun;
|
||||||
|
worker.wakeUp(shared_from_this());
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
void SubstitutionGoal::tryToRun()
|
||||||
|
{
|
||||||
|
trace("trying to run");
|
||||||
|
|
||||||
|
/* Make sure that we are allowed to start a build. Note that even
|
||||||
|
if maxBuildJobs == 0 (no local builds allowed), we still allow
|
||||||
|
a substituter to run. This is because substitutions cannot be
|
||||||
|
distributed to another machine via the build hook. */
|
||||||
|
if (worker.getNrLocalBuilds() >= std::max(1U, (unsigned int) settings.maxBuildJobs)) {
|
||||||
|
worker.waitForBuildSlot(shared_from_this());
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
maintainRunningSubstitutions = std::make_unique<MaintainCount<uint64_t>>(worker.runningSubstitutions);
|
||||||
|
worker.updateProgress();
|
||||||
|
|
||||||
|
outPipe.create();
|
||||||
|
|
||||||
|
promise = std::promise<void>();
|
||||||
|
|
||||||
|
thr = std::thread([this]() {
|
||||||
|
try {
|
||||||
|
/* Wake up the worker loop when we're done. */
|
||||||
|
Finally updateStats([this]() { outPipe.writeSide = -1; });
|
||||||
|
|
||||||
|
Activity act(*logger, actSubstitute, Logger::Fields{worker.store.printStorePath(storePath), sub->getUri()});
|
||||||
|
PushActivity pact(act.id);
|
||||||
|
|
||||||
|
copyStorePath(ref<Store>(sub), ref<Store>(worker.store.shared_from_this()),
|
||||||
|
subPath ? *subPath : storePath, repair, sub->isTrusted ? NoCheckSigs : CheckSigs);
|
||||||
|
|
||||||
|
promise.set_value();
|
||||||
|
} catch (...) {
|
||||||
|
promise.set_exception(std::current_exception());
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
worker.childStarted(shared_from_this(), {outPipe.readSide.get()}, true, false);
|
||||||
|
|
||||||
|
state = &SubstitutionGoal::finished;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
void SubstitutionGoal::finished()
|
||||||
|
{
|
||||||
|
trace("substitute finished");
|
||||||
|
|
||||||
|
thr.join();
|
||||||
|
worker.childTerminated(this);
|
||||||
|
|
||||||
|
try {
|
||||||
|
promise.get_future().get();
|
||||||
|
} catch (std::exception & e) {
|
||||||
|
printError(e.what());
|
||||||
|
|
||||||
|
/* Cause the parent build to fail unless --fallback is given,
|
||||||
|
or the substitute has disappeared. The latter case behaves
|
||||||
|
the same as the substitute never having existed in the
|
||||||
|
first place. */
|
||||||
|
try {
|
||||||
|
throw;
|
||||||
|
} catch (SubstituteGone &) {
|
||||||
|
} catch (...) {
|
||||||
|
substituterFailed = true;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Try the next substitute. */
|
||||||
|
state = &SubstitutionGoal::tryNext;
|
||||||
|
worker.wakeUp(shared_from_this());
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
worker.markContentsGood(storePath);
|
||||||
|
|
||||||
|
printMsg(lvlChatty, "substitution of path '%s' succeeded", worker.store.printStorePath(storePath));
|
||||||
|
|
||||||
|
maintainRunningSubstitutions.reset();
|
||||||
|
|
||||||
|
maintainExpectedSubstitutions.reset();
|
||||||
|
worker.doneSubstitutions++;
|
||||||
|
|
||||||
|
if (maintainExpectedDownload) {
|
||||||
|
auto fileSize = maintainExpectedDownload->delta;
|
||||||
|
maintainExpectedDownload.reset();
|
||||||
|
worker.doneDownloadSize += fileSize;
|
||||||
|
}
|
||||||
|
|
||||||
|
worker.doneNarSize += maintainExpectedNar->delta;
|
||||||
|
maintainExpectedNar.reset();
|
||||||
|
|
||||||
|
worker.updateProgress();
|
||||||
|
|
||||||
|
amDone(ecSuccess);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
void SubstitutionGoal::handleChildOutput(int fd, const string & data)
|
||||||
|
{
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
void SubstitutionGoal::handleEOF(int fd)
|
||||||
|
{
|
||||||
|
if (fd == outPipe.readSide.get()) worker.wakeUp(shared_from_this());
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
83
src/libstore/build/substitution-goal.hh
Normal file
83
src/libstore/build/substitution-goal.hh
Normal file
|
@ -0,0 +1,83 @@
|
||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include "lock.hh"
|
||||||
|
#include "store-api.hh"
|
||||||
|
#include "goal.hh"
|
||||||
|
|
||||||
|
namespace nix {
|
||||||
|
|
||||||
|
class Worker;
|
||||||
|
|
||||||
|
struct SubstitutionGoal : public Goal
|
||||||
|
{
|
||||||
|
/* The store path that should be realised through a substitute. */
|
||||||
|
StorePath storePath;
|
||||||
|
|
||||||
|
/* The path the substituter refers to the path as. This will be
|
||||||
|
* different when the stores have different names. */
|
||||||
|
std::optional<StorePath> subPath;
|
||||||
|
|
||||||
|
/* The remaining substituters. */
|
||||||
|
std::list<ref<Store>> subs;
|
||||||
|
|
||||||
|
/* The current substituter. */
|
||||||
|
std::shared_ptr<Store> sub;
|
||||||
|
|
||||||
|
/* Whether a substituter failed. */
|
||||||
|
bool substituterFailed = false;
|
||||||
|
|
||||||
|
/* Path info returned by the substituter's query info operation. */
|
||||||
|
std::shared_ptr<const ValidPathInfo> info;
|
||||||
|
|
||||||
|
/* Pipe for the substituter's standard output. */
|
||||||
|
Pipe outPipe;
|
||||||
|
|
||||||
|
/* The substituter thread. */
|
||||||
|
std::thread thr;
|
||||||
|
|
||||||
|
std::promise<void> promise;
|
||||||
|
|
||||||
|
/* Whether to try to repair a valid path. */
|
||||||
|
RepairFlag repair;
|
||||||
|
|
||||||
|
/* Location where we're downloading the substitute. Differs from
|
||||||
|
storePath when doing a repair. */
|
||||||
|
Path destPath;
|
||||||
|
|
||||||
|
std::unique_ptr<MaintainCount<uint64_t>> maintainExpectedSubstitutions,
|
||||||
|
maintainRunningSubstitutions, maintainExpectedNar, maintainExpectedDownload;
|
||||||
|
|
||||||
|
typedef void (SubstitutionGoal::*GoalState)();
|
||||||
|
GoalState state;
|
||||||
|
|
||||||
|
/* Content address for recomputing store path */
|
||||||
|
std::optional<ContentAddress> ca;
|
||||||
|
|
||||||
|
SubstitutionGoal(const StorePath & storePath, Worker & worker, RepairFlag repair = NoRepair, std::optional<ContentAddress> ca = std::nullopt);
|
||||||
|
~SubstitutionGoal();
|
||||||
|
|
||||||
|
void timedOut(Error && ex) override { abort(); };
|
||||||
|
|
||||||
|
string key() override
|
||||||
|
{
|
||||||
|
/* "a$" ensures substitution goals happen before derivation
|
||||||
|
goals. */
|
||||||
|
return "a$" + std::string(storePath.name()) + "$" + worker.store.printStorePath(storePath);
|
||||||
|
}
|
||||||
|
|
||||||
|
void work() override;
|
||||||
|
|
||||||
|
/* The states. */
|
||||||
|
void init();
|
||||||
|
void tryNext();
|
||||||
|
void gotInfo();
|
||||||
|
void referencesValid();
|
||||||
|
void tryToRun();
|
||||||
|
void finished();
|
||||||
|
|
||||||
|
/* Callback used by the worker to write to the log. */
|
||||||
|
void handleChildOutput(int fd, const string & data) override;
|
||||||
|
void handleEOF(int fd) override;
|
||||||
|
};
|
||||||
|
|
||||||
|
}
|
475
src/libstore/build/worker.cc
Normal file
475
src/libstore/build/worker.cc
Normal file
|
@ -0,0 +1,475 @@
|
||||||
|
#include "machines.hh"
|
||||||
|
#include "worker.hh"
|
||||||
|
#include "substitution-goal.hh"
|
||||||
|
#include "derivation-goal.hh"
|
||||||
|
#include "hook-instance.hh"
|
||||||
|
|
||||||
|
#include <poll.h>
|
||||||
|
|
||||||
|
namespace nix {
|
||||||
|
|
||||||
|
Worker::Worker(LocalStore & store)
|
||||||
|
: act(*logger, actRealise)
|
||||||
|
, actDerivations(*logger, actBuilds)
|
||||||
|
, actSubstitutions(*logger, actCopyPaths)
|
||||||
|
, store(store)
|
||||||
|
{
|
||||||
|
/* Debugging: prevent recursive workers. */
|
||||||
|
nrLocalBuilds = 0;
|
||||||
|
lastWokenUp = steady_time_point::min();
|
||||||
|
permanentFailure = false;
|
||||||
|
timedOut = false;
|
||||||
|
hashMismatch = false;
|
||||||
|
checkMismatch = false;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
Worker::~Worker()
|
||||||
|
{
|
||||||
|
/* Explicitly get rid of all strong pointers now. After this all
|
||||||
|
goals that refer to this worker should be gone. (Otherwise we
|
||||||
|
are in trouble, since goals may call childTerminated() etc. in
|
||||||
|
their destructors). */
|
||||||
|
topGoals.clear();
|
||||||
|
|
||||||
|
assert(expectedSubstitutions == 0);
|
||||||
|
assert(expectedDownloadSize == 0);
|
||||||
|
assert(expectedNarSize == 0);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
std::shared_ptr<DerivationGoal> Worker::makeDerivationGoalCommon(
|
||||||
|
const StorePath & drvPath,
|
||||||
|
const StringSet & wantedOutputs,
|
||||||
|
std::function<std::shared_ptr<DerivationGoal>()> mkDrvGoal)
|
||||||
|
{
|
||||||
|
std::weak_ptr<DerivationGoal> & goal_weak = derivationGoals[drvPath];
|
||||||
|
std::shared_ptr<DerivationGoal> goal = goal_weak.lock();
|
||||||
|
if (!goal) {
|
||||||
|
goal = mkDrvGoal();
|
||||||
|
goal_weak = goal;
|
||||||
|
wakeUp(goal);
|
||||||
|
} else {
|
||||||
|
goal->addWantedOutputs(wantedOutputs);
|
||||||
|
}
|
||||||
|
return goal;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
std::shared_ptr<DerivationGoal> Worker::makeDerivationGoal(const StorePath & drvPath,
|
||||||
|
const StringSet & wantedOutputs, BuildMode buildMode)
|
||||||
|
{
|
||||||
|
return makeDerivationGoalCommon(drvPath, wantedOutputs, [&]() {
|
||||||
|
return std::make_shared<DerivationGoal>(drvPath, wantedOutputs, *this, buildMode);
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
std::shared_ptr<DerivationGoal> Worker::makeBasicDerivationGoal(const StorePath & drvPath,
|
||||||
|
const BasicDerivation & drv, const StringSet & wantedOutputs, BuildMode buildMode)
|
||||||
|
{
|
||||||
|
return makeDerivationGoalCommon(drvPath, wantedOutputs, [&]() {
|
||||||
|
return std::make_shared<DerivationGoal>(drvPath, drv, wantedOutputs, *this, buildMode);
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
std::shared_ptr<SubstitutionGoal> Worker::makeSubstitutionGoal(const StorePath & path, RepairFlag repair, std::optional<ContentAddress> ca)
|
||||||
|
{
|
||||||
|
std::weak_ptr<SubstitutionGoal> & goal_weak = substitutionGoals[path];
|
||||||
|
auto goal = goal_weak.lock(); // FIXME
|
||||||
|
if (!goal) {
|
||||||
|
goal = std::make_shared<SubstitutionGoal>(path, *this, repair, ca);
|
||||||
|
goal_weak = goal;
|
||||||
|
wakeUp(goal);
|
||||||
|
}
|
||||||
|
return goal;
|
||||||
|
}
|
||||||
|
|
||||||
|
template<typename G>
|
||||||
|
static void removeGoal(std::shared_ptr<G> goal, std::map<StorePath, std::weak_ptr<G>> & goalMap)
|
||||||
|
{
|
||||||
|
/* !!! inefficient */
|
||||||
|
for (auto i = goalMap.begin();
|
||||||
|
i != goalMap.end(); )
|
||||||
|
if (i->second.lock() == goal) {
|
||||||
|
auto j = i; ++j;
|
||||||
|
goalMap.erase(i);
|
||||||
|
i = j;
|
||||||
|
}
|
||||||
|
else ++i;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
void Worker::removeGoal(GoalPtr goal)
|
||||||
|
{
|
||||||
|
if (auto drvGoal = std::dynamic_pointer_cast<DerivationGoal>(goal))
|
||||||
|
nix::removeGoal(drvGoal, derivationGoals);
|
||||||
|
else if (auto subGoal = std::dynamic_pointer_cast<SubstitutionGoal>(goal))
|
||||||
|
nix::removeGoal(subGoal, substitutionGoals);
|
||||||
|
else
|
||||||
|
assert(false);
|
||||||
|
if (topGoals.find(goal) != topGoals.end()) {
|
||||||
|
topGoals.erase(goal);
|
||||||
|
/* If a top-level goal failed, then kill all other goals
|
||||||
|
(unless keepGoing was set). */
|
||||||
|
if (goal->exitCode == Goal::ecFailed && !settings.keepGoing)
|
||||||
|
topGoals.clear();
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Wake up goals waiting for any goal to finish. */
|
||||||
|
for (auto & i : waitingForAnyGoal) {
|
||||||
|
GoalPtr goal = i.lock();
|
||||||
|
if (goal) wakeUp(goal);
|
||||||
|
}
|
||||||
|
|
||||||
|
waitingForAnyGoal.clear();
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
void Worker::wakeUp(GoalPtr goal)
|
||||||
|
{
|
||||||
|
goal->trace("woken up");
|
||||||
|
addToWeakGoals(awake, goal);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
unsigned Worker::getNrLocalBuilds()
|
||||||
|
{
|
||||||
|
return nrLocalBuilds;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
void Worker::childStarted(GoalPtr goal, const set<int> & fds,
|
||||||
|
bool inBuildSlot, bool respectTimeouts)
|
||||||
|
{
|
||||||
|
Child child;
|
||||||
|
child.goal = goal;
|
||||||
|
child.goal2 = goal.get();
|
||||||
|
child.fds = fds;
|
||||||
|
child.timeStarted = child.lastOutput = steady_time_point::clock::now();
|
||||||
|
child.inBuildSlot = inBuildSlot;
|
||||||
|
child.respectTimeouts = respectTimeouts;
|
||||||
|
children.emplace_back(child);
|
||||||
|
if (inBuildSlot) nrLocalBuilds++;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
void Worker::childTerminated(Goal * goal, bool wakeSleepers)
|
||||||
|
{
|
||||||
|
auto i = std::find_if(children.begin(), children.end(),
|
||||||
|
[&](const Child & child) { return child.goal2 == goal; });
|
||||||
|
if (i == children.end()) return;
|
||||||
|
|
||||||
|
if (i->inBuildSlot) {
|
||||||
|
assert(nrLocalBuilds > 0);
|
||||||
|
nrLocalBuilds--;
|
||||||
|
}
|
||||||
|
|
||||||
|
children.erase(i);
|
||||||
|
|
||||||
|
if (wakeSleepers) {
|
||||||
|
|
||||||
|
/* Wake up goals waiting for a build slot. */
|
||||||
|
for (auto & j : wantingToBuild) {
|
||||||
|
GoalPtr goal = j.lock();
|
||||||
|
if (goal) wakeUp(goal);
|
||||||
|
}
|
||||||
|
|
||||||
|
wantingToBuild.clear();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
void Worker::waitForBuildSlot(GoalPtr goal)
|
||||||
|
{
|
||||||
|
debug("wait for build slot");
|
||||||
|
if (getNrLocalBuilds() < settings.maxBuildJobs)
|
||||||
|
wakeUp(goal); /* we can do it right away */
|
||||||
|
else
|
||||||
|
addToWeakGoals(wantingToBuild, goal);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
void Worker::waitForAnyGoal(GoalPtr goal)
|
||||||
|
{
|
||||||
|
debug("wait for any goal");
|
||||||
|
addToWeakGoals(waitingForAnyGoal, goal);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
void Worker::waitForAWhile(GoalPtr goal)
|
||||||
|
{
|
||||||
|
debug("wait for a while");
|
||||||
|
addToWeakGoals(waitingForAWhile, goal);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
void Worker::run(const Goals & _topGoals)
|
||||||
|
{
|
||||||
|
std::vector<nix::StorePathWithOutputs> topPaths;
|
||||||
|
|
||||||
|
for (auto & i : _topGoals) {
|
||||||
|
topGoals.insert(i);
|
||||||
|
if (auto goal = dynamic_cast<DerivationGoal *>(i.get())) {
|
||||||
|
topPaths.push_back({goal->drvPath, goal->wantedOutputs});
|
||||||
|
} else if (auto goal = dynamic_cast<SubstitutionGoal *>(i.get())) {
|
||||||
|
topPaths.push_back({goal->storePath});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Call queryMissing() efficiently query substitutes. */
|
||||||
|
StorePathSet willBuild, willSubstitute, unknown;
|
||||||
|
uint64_t downloadSize, narSize;
|
||||||
|
store.queryMissing(topPaths, willBuild, willSubstitute, unknown, downloadSize, narSize);
|
||||||
|
|
||||||
|
debug("entered goal loop");
|
||||||
|
|
||||||
|
while (1) {
|
||||||
|
|
||||||
|
checkInterrupt();
|
||||||
|
|
||||||
|
store.autoGC(false);
|
||||||
|
|
||||||
|
/* Call every wake goal (in the ordering established by
|
||||||
|
CompareGoalPtrs). */
|
||||||
|
while (!awake.empty() && !topGoals.empty()) {
|
||||||
|
Goals awake2;
|
||||||
|
for (auto & i : awake) {
|
||||||
|
GoalPtr goal = i.lock();
|
||||||
|
if (goal) awake2.insert(goal);
|
||||||
|
}
|
||||||
|
awake.clear();
|
||||||
|
for (auto & goal : awake2) {
|
||||||
|
checkInterrupt();
|
||||||
|
goal->work();
|
||||||
|
if (topGoals.empty()) break; // stuff may have been cancelled
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (topGoals.empty()) break;
|
||||||
|
|
||||||
|
/* Wait for input. */
|
||||||
|
if (!children.empty() || !waitingForAWhile.empty())
|
||||||
|
waitForInput();
|
||||||
|
else {
|
||||||
|
if (awake.empty() && 0 == settings.maxBuildJobs)
|
||||||
|
{
|
||||||
|
if (getMachines().empty())
|
||||||
|
throw Error("unable to start any build; either increase '--max-jobs' "
|
||||||
|
"or enable remote builds."
|
||||||
|
"\nhttps://nixos.org/nix/manual/#chap-distributed-builds");
|
||||||
|
else
|
||||||
|
throw Error("unable to start any build; remote machines may not have "
|
||||||
|
"all required system features."
|
||||||
|
"\nhttps://nixos.org/nix/manual/#chap-distributed-builds");
|
||||||
|
|
||||||
|
}
|
||||||
|
assert(!awake.empty());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/* If --keep-going is not set, it's possible that the main goal
|
||||||
|
exited while some of its subgoals were still active. But if
|
||||||
|
--keep-going *is* set, then they must all be finished now. */
|
||||||
|
assert(!settings.keepGoing || awake.empty());
|
||||||
|
assert(!settings.keepGoing || wantingToBuild.empty());
|
||||||
|
assert(!settings.keepGoing || children.empty());
|
||||||
|
}
|
||||||
|
|
||||||
|
void Worker::waitForInput()
|
||||||
|
{
|
||||||
|
printMsg(lvlVomit, "waiting for children");
|
||||||
|
|
||||||
|
/* Process output from the file descriptors attached to the
|
||||||
|
children, namely log output and output path creation commands.
|
||||||
|
We also use this to detect child termination: if we get EOF on
|
||||||
|
the logger pipe of a build, we assume that the builder has
|
||||||
|
terminated. */
|
||||||
|
|
||||||
|
bool useTimeout = false;
|
||||||
|
long timeout = 0;
|
||||||
|
auto before = steady_time_point::clock::now();
|
||||||
|
|
||||||
|
/* If we're monitoring for silence on stdout/stderr, or if there
|
||||||
|
is a build timeout, then wait for input until the first
|
||||||
|
deadline for any child. */
|
||||||
|
auto nearest = steady_time_point::max(); // nearest deadline
|
||||||
|
if (settings.minFree.get() != 0)
|
||||||
|
// Periodicallty wake up to see if we need to run the garbage collector.
|
||||||
|
nearest = before + std::chrono::seconds(10);
|
||||||
|
for (auto & i : children) {
|
||||||
|
if (!i.respectTimeouts) continue;
|
||||||
|
if (0 != settings.maxSilentTime)
|
||||||
|
nearest = std::min(nearest, i.lastOutput + std::chrono::seconds(settings.maxSilentTime));
|
||||||
|
if (0 != settings.buildTimeout)
|
||||||
|
nearest = std::min(nearest, i.timeStarted + std::chrono::seconds(settings.buildTimeout));
|
||||||
|
}
|
||||||
|
if (nearest != steady_time_point::max()) {
|
||||||
|
timeout = std::max(1L, (long) std::chrono::duration_cast<std::chrono::seconds>(nearest - before).count());
|
||||||
|
useTimeout = true;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* If we are polling goals that are waiting for a lock, then wake
|
||||||
|
up after a few seconds at most. */
|
||||||
|
if (!waitingForAWhile.empty()) {
|
||||||
|
useTimeout = true;
|
||||||
|
if (lastWokenUp == steady_time_point::min() || lastWokenUp > before) lastWokenUp = before;
|
||||||
|
timeout = std::max(1L,
|
||||||
|
(long) std::chrono::duration_cast<std::chrono::seconds>(
|
||||||
|
lastWokenUp + std::chrono::seconds(settings.pollInterval) - before).count());
|
||||||
|
} else lastWokenUp = steady_time_point::min();
|
||||||
|
|
||||||
|
if (useTimeout)
|
||||||
|
vomit("sleeping %d seconds", timeout);
|
||||||
|
|
||||||
|
/* Use select() to wait for the input side of any logger pipe to
|
||||||
|
become `available'. Note that `available' (i.e., non-blocking)
|
||||||
|
includes EOF. */
|
||||||
|
std::vector<struct pollfd> pollStatus;
|
||||||
|
std::map <int, int> fdToPollStatus;
|
||||||
|
for (auto & i : children) {
|
||||||
|
for (auto & j : i.fds) {
|
||||||
|
pollStatus.push_back((struct pollfd) { .fd = j, .events = POLLIN });
|
||||||
|
fdToPollStatus[j] = pollStatus.size() - 1;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (poll(pollStatus.data(), pollStatus.size(),
|
||||||
|
useTimeout ? timeout * 1000 : -1) == -1) {
|
||||||
|
if (errno == EINTR) return;
|
||||||
|
throw SysError("waiting for input");
|
||||||
|
}
|
||||||
|
|
||||||
|
auto after = steady_time_point::clock::now();
|
||||||
|
|
||||||
|
/* Process all available file descriptors. FIXME: this is
|
||||||
|
O(children * fds). */
|
||||||
|
decltype(children)::iterator i;
|
||||||
|
for (auto j = children.begin(); j != children.end(); j = i) {
|
||||||
|
i = std::next(j);
|
||||||
|
|
||||||
|
checkInterrupt();
|
||||||
|
|
||||||
|
GoalPtr goal = j->goal.lock();
|
||||||
|
assert(goal);
|
||||||
|
|
||||||
|
set<int> fds2(j->fds);
|
||||||
|
std::vector<unsigned char> buffer(4096);
|
||||||
|
for (auto & k : fds2) {
|
||||||
|
if (pollStatus.at(fdToPollStatus.at(k)).revents) {
|
||||||
|
ssize_t rd = ::read(k, buffer.data(), buffer.size());
|
||||||
|
// FIXME: is there a cleaner way to handle pt close
|
||||||
|
// than EIO? Is this even standard?
|
||||||
|
if (rd == 0 || (rd == -1 && errno == EIO)) {
|
||||||
|
debug("%1%: got EOF", goal->getName());
|
||||||
|
goal->handleEOF(k);
|
||||||
|
j->fds.erase(k);
|
||||||
|
} else if (rd == -1) {
|
||||||
|
if (errno != EINTR)
|
||||||
|
throw SysError("%s: read failed", goal->getName());
|
||||||
|
} else {
|
||||||
|
printMsg(lvlVomit, "%1%: read %2% bytes",
|
||||||
|
goal->getName(), rd);
|
||||||
|
string data((char *) buffer.data(), rd);
|
||||||
|
j->lastOutput = after;
|
||||||
|
goal->handleChildOutput(k, data);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (goal->exitCode == Goal::ecBusy &&
|
||||||
|
0 != settings.maxSilentTime &&
|
||||||
|
j->respectTimeouts &&
|
||||||
|
after - j->lastOutput >= std::chrono::seconds(settings.maxSilentTime))
|
||||||
|
{
|
||||||
|
goal->timedOut(Error(
|
||||||
|
"%1% timed out after %2% seconds of silence",
|
||||||
|
goal->getName(), settings.maxSilentTime));
|
||||||
|
}
|
||||||
|
|
||||||
|
else if (goal->exitCode == Goal::ecBusy &&
|
||||||
|
0 != settings.buildTimeout &&
|
||||||
|
j->respectTimeouts &&
|
||||||
|
after - j->timeStarted >= std::chrono::seconds(settings.buildTimeout))
|
||||||
|
{
|
||||||
|
goal->timedOut(Error(
|
||||||
|
"%1% timed out after %2% seconds",
|
||||||
|
goal->getName(), settings.buildTimeout));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!waitingForAWhile.empty() && lastWokenUp + std::chrono::seconds(settings.pollInterval) <= after) {
|
||||||
|
lastWokenUp = after;
|
||||||
|
for (auto & i : waitingForAWhile) {
|
||||||
|
GoalPtr goal = i.lock();
|
||||||
|
if (goal) wakeUp(goal);
|
||||||
|
}
|
||||||
|
waitingForAWhile.clear();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
unsigned int Worker::exitStatus()
|
||||||
|
{
|
||||||
|
/*
|
||||||
|
* 1100100
|
||||||
|
* ^^^^
|
||||||
|
* |||`- timeout
|
||||||
|
* ||`-- output hash mismatch
|
||||||
|
* |`--- build failure
|
||||||
|
* `---- not deterministic
|
||||||
|
*/
|
||||||
|
unsigned int mask = 0;
|
||||||
|
bool buildFailure = permanentFailure || timedOut || hashMismatch;
|
||||||
|
if (buildFailure)
|
||||||
|
mask |= 0x04; // 100
|
||||||
|
if (timedOut)
|
||||||
|
mask |= 0x01; // 101
|
||||||
|
if (hashMismatch)
|
||||||
|
mask |= 0x02; // 102
|
||||||
|
if (checkMismatch) {
|
||||||
|
mask |= 0x08; // 104
|
||||||
|
}
|
||||||
|
|
||||||
|
if (mask)
|
||||||
|
mask |= 0x60;
|
||||||
|
return mask ? mask : 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
bool Worker::pathContentsGood(const StorePath & path)
|
||||||
|
{
|
||||||
|
auto i = pathContentsGoodCache.find(path);
|
||||||
|
if (i != pathContentsGoodCache.end()) return i->second;
|
||||||
|
printInfo("checking path '%s'...", store.printStorePath(path));
|
||||||
|
auto info = store.queryPathInfo(path);
|
||||||
|
bool res;
|
||||||
|
if (!pathExists(store.printStorePath(path)))
|
||||||
|
res = false;
|
||||||
|
else {
|
||||||
|
HashResult current = hashPath(info->narHash.type, store.printStorePath(path));
|
||||||
|
Hash nullHash(htSHA256);
|
||||||
|
res = info->narHash == nullHash || info->narHash == current.first;
|
||||||
|
}
|
||||||
|
pathContentsGoodCache.insert_or_assign(path, res);
|
||||||
|
if (!res)
|
||||||
|
logError({
|
||||||
|
.name = "Corrupted path",
|
||||||
|
.hint = hintfmt("path '%s' is corrupted or missing!", store.printStorePath(path))
|
||||||
|
});
|
||||||
|
return res;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
void Worker::markContentsGood(const StorePath & path)
|
||||||
|
{
|
||||||
|
pathContentsGoodCache.insert_or_assign(path, true);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
GoalPtr upcast_goal(std::shared_ptr<SubstitutionGoal> subGoal) {
|
||||||
|
return subGoal;
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
207
src/libstore/build/worker.hh
Normal file
207
src/libstore/build/worker.hh
Normal file
|
@ -0,0 +1,207 @@
|
||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include "types.hh"
|
||||||
|
#include "lock.hh"
|
||||||
|
#include "local-store.hh"
|
||||||
|
#include "goal.hh"
|
||||||
|
|
||||||
|
namespace nix {
|
||||||
|
|
||||||
|
/* Forward definition. */
|
||||||
|
struct DerivationGoal;
|
||||||
|
struct SubstitutionGoal;
|
||||||
|
|
||||||
|
/* Workaround for not being able to declare a something like
|
||||||
|
|
||||||
|
class SubstitutionGoal : public Goal;
|
||||||
|
|
||||||
|
even when Goal is a complete type.
|
||||||
|
|
||||||
|
This is still a static cast. The purpose of exporting it is to define it in
|
||||||
|
a place where `SubstitutionGoal` is concrete, and use it in a place where it
|
||||||
|
is opaque. */
|
||||||
|
GoalPtr upcast_goal(std::shared_ptr<SubstitutionGoal> subGoal);
|
||||||
|
|
||||||
|
typedef std::chrono::time_point<std::chrono::steady_clock> steady_time_point;
|
||||||
|
|
||||||
|
|
||||||
|
/* A mapping used to remember for each child process to what goal it
|
||||||
|
belongs, and file descriptors for receiving log data and output
|
||||||
|
path creation commands. */
|
||||||
|
struct Child
|
||||||
|
{
|
||||||
|
WeakGoalPtr goal;
|
||||||
|
Goal * goal2; // ugly hackery
|
||||||
|
set<int> fds;
|
||||||
|
bool respectTimeouts;
|
||||||
|
bool inBuildSlot;
|
||||||
|
steady_time_point lastOutput; /* time we last got output on stdout/stderr */
|
||||||
|
steady_time_point timeStarted;
|
||||||
|
};
|
||||||
|
|
||||||
|
/* Forward definition. */
|
||||||
|
struct HookInstance;
|
||||||
|
|
||||||
|
/* The worker class. */
|
||||||
|
class Worker
|
||||||
|
{
|
||||||
|
private:
|
||||||
|
|
||||||
|
/* Note: the worker should only have strong pointers to the
|
||||||
|
top-level goals. */
|
||||||
|
|
||||||
|
/* The top-level goals of the worker. */
|
||||||
|
Goals topGoals;
|
||||||
|
|
||||||
|
/* Goals that are ready to do some work. */
|
||||||
|
WeakGoals awake;
|
||||||
|
|
||||||
|
/* Goals waiting for a build slot. */
|
||||||
|
WeakGoals wantingToBuild;
|
||||||
|
|
||||||
|
/* Child processes currently running. */
|
||||||
|
std::list<Child> children;
|
||||||
|
|
||||||
|
/* Number of build slots occupied. This includes local builds and
|
||||||
|
substitutions but not remote builds via the build hook. */
|
||||||
|
unsigned int nrLocalBuilds;
|
||||||
|
|
||||||
|
/* Maps used to prevent multiple instantiations of a goal for the
|
||||||
|
same derivation / path. */
|
||||||
|
std::map<StorePath, std::weak_ptr<DerivationGoal>> derivationGoals;
|
||||||
|
std::map<StorePath, std::weak_ptr<SubstitutionGoal>> substitutionGoals;
|
||||||
|
|
||||||
|
/* Goals waiting for busy paths to be unlocked. */
|
||||||
|
WeakGoals waitingForAnyGoal;
|
||||||
|
|
||||||
|
/* Goals sleeping for a few seconds (polling a lock). */
|
||||||
|
WeakGoals waitingForAWhile;
|
||||||
|
|
||||||
|
/* Last time the goals in `waitingForAWhile' where woken up. */
|
||||||
|
steady_time_point lastWokenUp;
|
||||||
|
|
||||||
|
/* Cache for pathContentsGood(). */
|
||||||
|
std::map<StorePath, bool> pathContentsGoodCache;
|
||||||
|
|
||||||
|
public:
|
||||||
|
|
||||||
|
const Activity act;
|
||||||
|
const Activity actDerivations;
|
||||||
|
const Activity actSubstitutions;
|
||||||
|
|
||||||
|
/* Set if at least one derivation had a BuildError (i.e. permanent
|
||||||
|
failure). */
|
||||||
|
bool permanentFailure;
|
||||||
|
|
||||||
|
/* Set if at least one derivation had a timeout. */
|
||||||
|
bool timedOut;
|
||||||
|
|
||||||
|
/* Set if at least one derivation fails with a hash mismatch. */
|
||||||
|
bool hashMismatch;
|
||||||
|
|
||||||
|
/* Set if at least one derivation is not deterministic in check mode. */
|
||||||
|
bool checkMismatch;
|
||||||
|
|
||||||
|
LocalStore & store;
|
||||||
|
|
||||||
|
std::unique_ptr<HookInstance> hook;
|
||||||
|
|
||||||
|
uint64_t expectedBuilds = 0;
|
||||||
|
uint64_t doneBuilds = 0;
|
||||||
|
uint64_t failedBuilds = 0;
|
||||||
|
uint64_t runningBuilds = 0;
|
||||||
|
|
||||||
|
uint64_t expectedSubstitutions = 0;
|
||||||
|
uint64_t doneSubstitutions = 0;
|
||||||
|
uint64_t failedSubstitutions = 0;
|
||||||
|
uint64_t runningSubstitutions = 0;
|
||||||
|
uint64_t expectedDownloadSize = 0;
|
||||||
|
uint64_t doneDownloadSize = 0;
|
||||||
|
uint64_t expectedNarSize = 0;
|
||||||
|
uint64_t doneNarSize = 0;
|
||||||
|
|
||||||
|
/* Whether to ask the build hook if it can build a derivation. If
|
||||||
|
it answers with "decline-permanently", we don't try again. */
|
||||||
|
bool tryBuildHook = true;
|
||||||
|
|
||||||
|
Worker(LocalStore & store);
|
||||||
|
~Worker();
|
||||||
|
|
||||||
|
/* Make a goal (with caching). */
|
||||||
|
|
||||||
|
/* derivation goal */
|
||||||
|
private:
|
||||||
|
std::shared_ptr<DerivationGoal> makeDerivationGoalCommon(
|
||||||
|
const StorePath & drvPath, const StringSet & wantedOutputs,
|
||||||
|
std::function<std::shared_ptr<DerivationGoal>()> mkDrvGoal);
|
||||||
|
public:
|
||||||
|
std::shared_ptr<DerivationGoal> makeDerivationGoal(
|
||||||
|
const StorePath & drvPath,
|
||||||
|
const StringSet & wantedOutputs, BuildMode buildMode = bmNormal);
|
||||||
|
std::shared_ptr<DerivationGoal> makeBasicDerivationGoal(
|
||||||
|
const StorePath & drvPath, const BasicDerivation & drv,
|
||||||
|
const StringSet & wantedOutputs, BuildMode buildMode = bmNormal);
|
||||||
|
|
||||||
|
/* substitution goal */
|
||||||
|
std::shared_ptr<SubstitutionGoal> makeSubstitutionGoal(const StorePath & storePath, RepairFlag repair = NoRepair, std::optional<ContentAddress> ca = std::nullopt);
|
||||||
|
|
||||||
|
/* Remove a dead goal. */
|
||||||
|
void removeGoal(GoalPtr goal);
|
||||||
|
|
||||||
|
/* Wake up a goal (i.e., there is something for it to do). */
|
||||||
|
void wakeUp(GoalPtr goal);
|
||||||
|
|
||||||
|
/* Return the number of local build and substitution processes
|
||||||
|
currently running (but not remote builds via the build
|
||||||
|
hook). */
|
||||||
|
unsigned int getNrLocalBuilds();
|
||||||
|
|
||||||
|
/* Registers a running child process. `inBuildSlot' means that
|
||||||
|
the process counts towards the jobs limit. */
|
||||||
|
void childStarted(GoalPtr goal, const set<int> & fds,
|
||||||
|
bool inBuildSlot, bool respectTimeouts);
|
||||||
|
|
||||||
|
/* Unregisters a running child process. `wakeSleepers' should be
|
||||||
|
false if there is no sense in waking up goals that are sleeping
|
||||||
|
because they can't run yet (e.g., there is no free build slot,
|
||||||
|
or the hook would still say `postpone'). */
|
||||||
|
void childTerminated(Goal * goal, bool wakeSleepers = true);
|
||||||
|
|
||||||
|
/* Put `goal' to sleep until a build slot becomes available (which
|
||||||
|
might be right away). */
|
||||||
|
void waitForBuildSlot(GoalPtr goal);
|
||||||
|
|
||||||
|
/* Wait for any goal to finish. Pretty indiscriminate way to
|
||||||
|
wait for some resource that some other goal is holding. */
|
||||||
|
void waitForAnyGoal(GoalPtr goal);
|
||||||
|
|
||||||
|
/* Wait for a few seconds and then retry this goal. Used when
|
||||||
|
waiting for a lock held by another process. This kind of
|
||||||
|
polling is inefficient, but POSIX doesn't really provide a way
|
||||||
|
to wait for multiple locks in the main select() loop. */
|
||||||
|
void waitForAWhile(GoalPtr goal);
|
||||||
|
|
||||||
|
/* Loop until the specified top-level goals have finished. */
|
||||||
|
void run(const Goals & topGoals);
|
||||||
|
|
||||||
|
/* Wait for input to become available. */
|
||||||
|
void waitForInput();
|
||||||
|
|
||||||
|
unsigned int exitStatus();
|
||||||
|
|
||||||
|
/* Check whether the given valid path exists and has the right
|
||||||
|
contents. */
|
||||||
|
bool pathContentsGood(const StorePath & path);
|
||||||
|
|
||||||
|
void markContentsGood(const StorePath & path);
|
||||||
|
|
||||||
|
void updateProgress()
|
||||||
|
{
|
||||||
|
actDerivations.progress(doneBuilds, expectedBuilds + doneBuilds, runningBuilds, failedBuilds);
|
||||||
|
actSubstitutions.progress(doneSubstitutions, expectedSubstitutions + doneSubstitutions, runningSubstitutions, failedSubstitutions);
|
||||||
|
act.setExpected(actFileTransfer, expectedDownloadSize + doneDownloadSize);
|
||||||
|
act.setExpected(actCopyPath, expectedNarSize + doneNarSize);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
}
|
11
src/libstore/ca-specific-schema.sql
Normal file
11
src/libstore/ca-specific-schema.sql
Normal file
|
@ -0,0 +1,11 @@
|
||||||
|
-- Extension of the sql schema for content-addressed derivations.
|
||||||
|
-- Won't be loaded unless the experimental feature `ca-derivations`
|
||||||
|
-- is enabled
|
||||||
|
|
||||||
|
create table if not exists Realisations (
|
||||||
|
drvPath text not null,
|
||||||
|
outputName text not null, -- symbolic output id, usually "out"
|
||||||
|
outputPath integer not null,
|
||||||
|
primary key (drvPath, outputName),
|
||||||
|
foreign key (outputPath) references ValidPaths(id) on delete cascade
|
||||||
|
);
|
|
@ -153,10 +153,10 @@ struct TunnelSink : Sink
|
||||||
{
|
{
|
||||||
Sink & to;
|
Sink & to;
|
||||||
TunnelSink(Sink & to) : to(to) { }
|
TunnelSink(Sink & to) : to(to) { }
|
||||||
virtual void operator () (const unsigned char * data, size_t len)
|
void operator () (std::string_view data)
|
||||||
{
|
{
|
||||||
to << STDERR_WRITE;
|
to << STDERR_WRITE;
|
||||||
writeString(data, len, to);
|
writeString(data, to);
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -165,7 +165,7 @@ struct TunnelSource : BufferedSource
|
||||||
Source & from;
|
Source & from;
|
||||||
BufferedSink & to;
|
BufferedSink & to;
|
||||||
TunnelSource(Source & from, BufferedSink & to) : from(from), to(to) { }
|
TunnelSource(Source & from, BufferedSink & to) : from(from), to(to) { }
|
||||||
size_t readUnbuffered(unsigned char * data, size_t len) override
|
size_t readUnbuffered(char * data, size_t len) override
|
||||||
{
|
{
|
||||||
to << STDERR_READ << len;
|
to << STDERR_READ << len;
|
||||||
to.flush();
|
to.flush();
|
||||||
|
@ -215,6 +215,8 @@ struct ClientSettings
|
||||||
for (auto & s : ss)
|
for (auto & s : ss)
|
||||||
if (trusted.count(s))
|
if (trusted.count(s))
|
||||||
subs.push_back(s);
|
subs.push_back(s);
|
||||||
|
else if (!hasSuffix(s, "/") && trusted.count(s + "/"))
|
||||||
|
subs.push_back(s + "/");
|
||||||
else
|
else
|
||||||
warn("ignoring untrusted substituter '%s'", s);
|
warn("ignoring untrusted substituter '%s'", s);
|
||||||
res = subs;
|
res = subs;
|
||||||
|
@ -231,8 +233,6 @@ struct ClientSettings
|
||||||
settings.set(name, value);
|
settings.set(name, value);
|
||||||
else if (setSubstituters(settings.substituters))
|
else if (setSubstituters(settings.substituters))
|
||||||
;
|
;
|
||||||
else if (setSubstituters(settings.extraSubstituters))
|
|
||||||
;
|
|
||||||
else
|
else
|
||||||
debug("ignoring the client-specified setting '%s', because it is a restricted setting and you are not a trusted user", name);
|
debug("ignoring the client-specified setting '%s', because it is a restricted setting and you are not a trusted user", name);
|
||||||
} catch (UsageError & e) {
|
} catch (UsageError & e) {
|
||||||
|
@ -276,8 +276,17 @@ static void performOp(TunnelLogger * logger, ref<Store> store,
|
||||||
|
|
||||||
case wopQueryValidPaths: {
|
case wopQueryValidPaths: {
|
||||||
auto paths = worker_proto::read(*store, from, Phantom<StorePathSet> {});
|
auto paths = worker_proto::read(*store, from, Phantom<StorePathSet> {});
|
||||||
|
|
||||||
|
SubstituteFlag substitute = NoSubstitute;
|
||||||
|
if (GET_PROTOCOL_MINOR(clientVersion) >= 27) {
|
||||||
|
substitute = readInt(from) ? Substitute : NoSubstitute;
|
||||||
|
}
|
||||||
|
|
||||||
logger->startWork();
|
logger->startWork();
|
||||||
auto res = store->queryValidPaths(paths);
|
if (substitute) {
|
||||||
|
store->substitutePaths(paths);
|
||||||
|
}
|
||||||
|
auto res = store->queryValidPaths(paths, substitute);
|
||||||
logger->stopWork();
|
logger->stopWork();
|
||||||
worker_proto::write(*store, to, res);
|
worker_proto::write(*store, to, res);
|
||||||
break;
|
break;
|
||||||
|
@ -859,6 +868,28 @@ static void performOp(TunnelLogger * logger, ref<Store> store,
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
case wopRegisterDrvOutput: {
|
||||||
|
logger->startWork();
|
||||||
|
auto outputId = DrvOutput::parse(readString(from));
|
||||||
|
auto outputPath = StorePath(readString(from));
|
||||||
|
auto resolvedDrv = StorePath(readString(from));
|
||||||
|
store->registerDrvOutput(Realisation{
|
||||||
|
.id = outputId, .outPath = outputPath});
|
||||||
|
logger->stopWork();
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
case wopQueryRealisation: {
|
||||||
|
logger->startWork();
|
||||||
|
auto outputId = DrvOutput::parse(readString(from));
|
||||||
|
auto info = store->queryRealisation(outputId);
|
||||||
|
logger->stopWork();
|
||||||
|
std::set<StorePath> outPaths;
|
||||||
|
if (info) outPaths.insert(info->outPath);
|
||||||
|
worker_proto::write(*store, to, outPaths);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
default:
|
default:
|
||||||
throw Error("invalid operation %1%", op);
|
throw Error("invalid operation %1%", op);
|
||||||
}
|
}
|
||||||
|
|
|
@ -21,6 +21,9 @@ std::optional<StorePath> DerivationOutput::path(const Store & store, std::string
|
||||||
[](DerivationOutputCAFloating dof) -> std::optional<StorePath> {
|
[](DerivationOutputCAFloating dof) -> std::optional<StorePath> {
|
||||||
return std::nullopt;
|
return std::nullopt;
|
||||||
},
|
},
|
||||||
|
[](DerivationOutputDeferred) -> std::optional<StorePath> {
|
||||||
|
return std::nullopt;
|
||||||
|
},
|
||||||
}, output);
|
}, output);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -37,6 +40,7 @@ bool derivationIsCA(DerivationType dt) {
|
||||||
case DerivationType::InputAddressed: return false;
|
case DerivationType::InputAddressed: return false;
|
||||||
case DerivationType::CAFixed: return true;
|
case DerivationType::CAFixed: return true;
|
||||||
case DerivationType::CAFloating: return true;
|
case DerivationType::CAFloating: return true;
|
||||||
|
case DerivationType::DeferredInputAddressed: return false;
|
||||||
};
|
};
|
||||||
// Since enums can have non-variant values, but making a `default:` would
|
// Since enums can have non-variant values, but making a `default:` would
|
||||||
// disable exhaustiveness warnings.
|
// disable exhaustiveness warnings.
|
||||||
|
@ -48,6 +52,7 @@ bool derivationIsFixed(DerivationType dt) {
|
||||||
case DerivationType::InputAddressed: return false;
|
case DerivationType::InputAddressed: return false;
|
||||||
case DerivationType::CAFixed: return true;
|
case DerivationType::CAFixed: return true;
|
||||||
case DerivationType::CAFloating: return false;
|
case DerivationType::CAFloating: return false;
|
||||||
|
case DerivationType::DeferredInputAddressed: return false;
|
||||||
};
|
};
|
||||||
assert(false);
|
assert(false);
|
||||||
}
|
}
|
||||||
|
@ -57,6 +62,7 @@ bool derivationIsImpure(DerivationType dt) {
|
||||||
case DerivationType::InputAddressed: return false;
|
case DerivationType::InputAddressed: return false;
|
||||||
case DerivationType::CAFixed: return true;
|
case DerivationType::CAFixed: return true;
|
||||||
case DerivationType::CAFloating: return false;
|
case DerivationType::CAFloating: return false;
|
||||||
|
case DerivationType::DeferredInputAddressed: return false;
|
||||||
};
|
};
|
||||||
assert(false);
|
assert(false);
|
||||||
}
|
}
|
||||||
|
@ -180,6 +186,11 @@ static DerivationOutput parseDerivationOutput(const Store & store,
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
|
if (pathS == "") {
|
||||||
|
return DerivationOutput {
|
||||||
|
.output = DerivationOutputDeferred { }
|
||||||
|
};
|
||||||
|
}
|
||||||
validatePath(pathS);
|
validatePath(pathS);
|
||||||
return DerivationOutput {
|
return DerivationOutput {
|
||||||
.output = DerivationOutputInputAddressed {
|
.output = DerivationOutputInputAddressed {
|
||||||
|
@ -325,6 +336,11 @@ string Derivation::unparse(const Store & store, bool maskOutputs,
|
||||||
s += ','; printUnquotedString(s, makeFileIngestionPrefix(dof.method) + printHashType(dof.hashType));
|
s += ','; printUnquotedString(s, makeFileIngestionPrefix(dof.method) + printHashType(dof.hashType));
|
||||||
s += ','; printUnquotedString(s, "");
|
s += ','; printUnquotedString(s, "");
|
||||||
},
|
},
|
||||||
|
[&](DerivationOutputDeferred) {
|
||||||
|
s += ','; printUnquotedString(s, "");
|
||||||
|
s += ','; printUnquotedString(s, "");
|
||||||
|
s += ','; printUnquotedString(s, "");
|
||||||
|
}
|
||||||
}, i.second.output);
|
}, i.second.output);
|
||||||
s += ')';
|
s += ')';
|
||||||
}
|
}
|
||||||
|
@ -389,7 +405,7 @@ std::string outputPathName(std::string_view drvName, std::string_view outputName
|
||||||
|
|
||||||
DerivationType BasicDerivation::type() const
|
DerivationType BasicDerivation::type() const
|
||||||
{
|
{
|
||||||
std::set<std::string_view> inputAddressedOutputs, fixedCAOutputs, floatingCAOutputs;
|
std::set<std::string_view> inputAddressedOutputs, fixedCAOutputs, floatingCAOutputs, deferredIAOutputs;
|
||||||
std::optional<HashType> floatingHashType;
|
std::optional<HashType> floatingHashType;
|
||||||
for (auto & i : outputs) {
|
for (auto & i : outputs) {
|
||||||
std::visit(overloaded {
|
std::visit(overloaded {
|
||||||
|
@ -408,29 +424,34 @@ DerivationType BasicDerivation::type() const
|
||||||
throw Error("All floating outputs must use the same hash type");
|
throw Error("All floating outputs must use the same hash type");
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
[&](DerivationOutputDeferred _) {
|
||||||
|
deferredIAOutputs.insert(i.first);
|
||||||
|
},
|
||||||
}, i.second.output);
|
}, i.second.output);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (inputAddressedOutputs.empty() && fixedCAOutputs.empty() && floatingCAOutputs.empty()) {
|
if (inputAddressedOutputs.empty() && fixedCAOutputs.empty() && floatingCAOutputs.empty() && deferredIAOutputs.empty()) {
|
||||||
throw Error("Must have at least one output");
|
throw Error("Must have at least one output");
|
||||||
} else if (! inputAddressedOutputs.empty() && fixedCAOutputs.empty() && floatingCAOutputs.empty()) {
|
} else if (! inputAddressedOutputs.empty() && fixedCAOutputs.empty() && floatingCAOutputs.empty() && deferredIAOutputs.empty()) {
|
||||||
return DerivationType::InputAddressed;
|
return DerivationType::InputAddressed;
|
||||||
} else if (inputAddressedOutputs.empty() && ! fixedCAOutputs.empty() && floatingCAOutputs.empty()) {
|
} else if (inputAddressedOutputs.empty() && ! fixedCAOutputs.empty() && floatingCAOutputs.empty() && deferredIAOutputs.empty()) {
|
||||||
if (fixedCAOutputs.size() > 1)
|
if (fixedCAOutputs.size() > 1)
|
||||||
// FIXME: Experimental feature?
|
// FIXME: Experimental feature?
|
||||||
throw Error("Only one fixed output is allowed for now");
|
throw Error("Only one fixed output is allowed for now");
|
||||||
if (*fixedCAOutputs.begin() != "out")
|
if (*fixedCAOutputs.begin() != "out")
|
||||||
throw Error("Single fixed output must be named \"out\"");
|
throw Error("Single fixed output must be named \"out\"");
|
||||||
return DerivationType::CAFixed;
|
return DerivationType::CAFixed;
|
||||||
} else if (inputAddressedOutputs.empty() && fixedCAOutputs.empty() && ! floatingCAOutputs.empty()) {
|
} else if (inputAddressedOutputs.empty() && fixedCAOutputs.empty() && ! floatingCAOutputs.empty() && deferredIAOutputs.empty()) {
|
||||||
return DerivationType::CAFloating;
|
return DerivationType::CAFloating;
|
||||||
|
} else if (inputAddressedOutputs.empty() && fixedCAOutputs.empty() && floatingCAOutputs.empty() && !deferredIAOutputs.empty()) {
|
||||||
|
return DerivationType::DeferredInputAddressed;
|
||||||
} else {
|
} else {
|
||||||
throw Error("Can't mix derivation output types");
|
throw Error("Can't mix derivation output types");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
DrvHashes drvHashes;
|
Sync<DrvHashes> drvHashes;
|
||||||
|
|
||||||
/* pathDerivationModulo and hashDerivationModulo are mutually recursive
|
/* pathDerivationModulo and hashDerivationModulo are mutually recursive
|
||||||
*/
|
*/
|
||||||
|
@ -438,20 +459,22 @@ DrvHashes drvHashes;
|
||||||
/* Look up the derivation by value and memoize the
|
/* Look up the derivation by value and memoize the
|
||||||
`hashDerivationModulo` call.
|
`hashDerivationModulo` call.
|
||||||
*/
|
*/
|
||||||
static const DrvHashModulo & pathDerivationModulo(Store & store, const StorePath & drvPath)
|
static const DrvHashModulo pathDerivationModulo(Store & store, const StorePath & drvPath)
|
||||||
{
|
{
|
||||||
auto h = drvHashes.find(drvPath);
|
{
|
||||||
if (h == drvHashes.end()) {
|
auto hashes = drvHashes.lock();
|
||||||
assert(store.isValidPath(drvPath));
|
auto h = hashes->find(drvPath);
|
||||||
// Cache it
|
if (h != hashes->end()) {
|
||||||
h = drvHashes.insert_or_assign(
|
|
||||||
drvPath,
|
|
||||||
hashDerivationModulo(
|
|
||||||
store,
|
|
||||||
store.readDerivation(drvPath),
|
|
||||||
false)).first;
|
|
||||||
}
|
|
||||||
return h->second;
|
return h->second;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
auto h = hashDerivationModulo(
|
||||||
|
store,
|
||||||
|
store.readInvalidDerivation(drvPath),
|
||||||
|
false);
|
||||||
|
// Cache it
|
||||||
|
drvHashes.lock()->insert_or_assign(drvPath, h);
|
||||||
|
return h;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* See the header for interface details. These are the implementation details.
|
/* See the header for interface details. These are the implementation details.
|
||||||
|
@ -473,10 +496,9 @@ static const DrvHashModulo & pathDerivationModulo(Store & store, const StorePath
|
||||||
*/
|
*/
|
||||||
DrvHashModulo hashDerivationModulo(Store & store, const Derivation & drv, bool maskOutputs)
|
DrvHashModulo hashDerivationModulo(Store & store, const Derivation & drv, bool maskOutputs)
|
||||||
{
|
{
|
||||||
|
bool isDeferred = false;
|
||||||
/* Return a fixed hash for fixed-output derivations. */
|
/* Return a fixed hash for fixed-output derivations. */
|
||||||
switch (drv.type()) {
|
switch (drv.type()) {
|
||||||
case DerivationType::CAFloating:
|
|
||||||
throw Error("Regular input-addressed derivations are not yet allowed to depend on CA derivations");
|
|
||||||
case DerivationType::CAFixed: {
|
case DerivationType::CAFixed: {
|
||||||
std::map<std::string, Hash> outputHashes;
|
std::map<std::string, Hash> outputHashes;
|
||||||
for (const auto & i : drv.outputs) {
|
for (const auto & i : drv.outputs) {
|
||||||
|
@ -489,8 +511,13 @@ DrvHashModulo hashDerivationModulo(Store & store, const Derivation & drv, bool m
|
||||||
}
|
}
|
||||||
return outputHashes;
|
return outputHashes;
|
||||||
}
|
}
|
||||||
|
case DerivationType::CAFloating:
|
||||||
|
isDeferred = true;
|
||||||
|
break;
|
||||||
case DerivationType::InputAddressed:
|
case DerivationType::InputAddressed:
|
||||||
break;
|
break;
|
||||||
|
case DerivationType::DeferredInputAddressed:
|
||||||
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* For other derivations, replace the inputs paths with recursive
|
/* For other derivations, replace the inputs paths with recursive
|
||||||
|
@ -503,6 +530,10 @@ DrvHashModulo hashDerivationModulo(Store & store, const Derivation & drv, bool m
|
||||||
[&](Hash drvHash) {
|
[&](Hash drvHash) {
|
||||||
inputs2.insert_or_assign(drvHash.to_string(Base16, false), i.second);
|
inputs2.insert_or_assign(drvHash.to_string(Base16, false), i.second);
|
||||||
},
|
},
|
||||||
|
[&](DeferredHash deferredHash) {
|
||||||
|
isDeferred = true;
|
||||||
|
inputs2.insert_or_assign(deferredHash.hash.to_string(Base16, false), i.second);
|
||||||
|
},
|
||||||
// CA derivation's output hashes
|
// CA derivation's output hashes
|
||||||
[&](CaOutputHashes outputHashes) {
|
[&](CaOutputHashes outputHashes) {
|
||||||
std::set<std::string> justOut = { "out" };
|
std::set<std::string> justOut = { "out" };
|
||||||
|
@ -517,7 +548,34 @@ DrvHashModulo hashDerivationModulo(Store & store, const Derivation & drv, bool m
|
||||||
}, res);
|
}, res);
|
||||||
}
|
}
|
||||||
|
|
||||||
return hashString(htSHA256, drv.unparse(store, maskOutputs, &inputs2));
|
auto hash = hashString(htSHA256, drv.unparse(store, maskOutputs, &inputs2));
|
||||||
|
|
||||||
|
if (isDeferred)
|
||||||
|
return DeferredHash { hash };
|
||||||
|
else
|
||||||
|
return hash;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
std::map<std::string, Hash> staticOutputHashes(Store& store, const Derivation& drv)
|
||||||
|
{
|
||||||
|
std::map<std::string, Hash> res;
|
||||||
|
std::visit(overloaded {
|
||||||
|
[&](Hash drvHash) {
|
||||||
|
for (auto & outputName : drv.outputNames()) {
|
||||||
|
res.insert({outputName, drvHash});
|
||||||
|
}
|
||||||
|
},
|
||||||
|
[&](DeferredHash deferredHash) {
|
||||||
|
for (auto & outputName : drv.outputNames()) {
|
||||||
|
res.insert({outputName, deferredHash.hash});
|
||||||
|
}
|
||||||
|
},
|
||||||
|
[&](CaOutputHashes outputHashes) {
|
||||||
|
res = outputHashes;
|
||||||
|
},
|
||||||
|
}, hashDerivationModulo(store, drv, true));
|
||||||
|
return res;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -620,6 +678,11 @@ void writeDerivation(Sink & out, const Store & store, const BasicDerivation & dr
|
||||||
<< (makeFileIngestionPrefix(dof.method) + printHashType(dof.hashType))
|
<< (makeFileIngestionPrefix(dof.method) + printHashType(dof.hashType))
|
||||||
<< "";
|
<< "";
|
||||||
},
|
},
|
||||||
|
[&](DerivationOutputDeferred) {
|
||||||
|
out << ""
|
||||||
|
<< ""
|
||||||
|
<< "";
|
||||||
|
},
|
||||||
}, i.second.output);
|
}, i.second.output);
|
||||||
}
|
}
|
||||||
worker_proto::write(store, out, drv.inputSrcs);
|
worker_proto::write(store, out, drv.inputSrcs);
|
||||||
|
@ -645,7 +708,6 @@ std::string downstreamPlaceholder(const Store & store, const StorePath & drvPath
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
// N.B. Outputs are left unchanged
|
|
||||||
static void rewriteDerivation(Store & store, BasicDerivation & drv, const StringMap & rewrites) {
|
static void rewriteDerivation(Store & store, BasicDerivation & drv, const StringMap & rewrites) {
|
||||||
|
|
||||||
debug("Rewriting the derivation");
|
debug("Rewriting the derivation");
|
||||||
|
@ -666,12 +728,24 @@ static void rewriteDerivation(Store & store, BasicDerivation & drv, const String
|
||||||
newEnv.emplace(envName, envValue);
|
newEnv.emplace(envName, envValue);
|
||||||
}
|
}
|
||||||
drv.env = newEnv;
|
drv.env = newEnv;
|
||||||
|
|
||||||
|
auto hashModulo = hashDerivationModulo(store, Derivation(drv), true);
|
||||||
|
for (auto & [outputName, output] : drv.outputs) {
|
||||||
|
if (std::holds_alternative<DerivationOutputDeferred>(output.output)) {
|
||||||
|
Hash h = std::get<Hash>(hashModulo);
|
||||||
|
auto outPath = store.makeOutputPath(outputName, h, drv.name);
|
||||||
|
drv.env[outputName] = store.printStorePath(outPath);
|
||||||
|
output = DerivationOutput {
|
||||||
|
.output = DerivationOutputInputAddressed {
|
||||||
|
.path = std::move(outPath),
|
||||||
|
},
|
||||||
|
};
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
std::optional<BasicDerivation> Derivation::tryResolveUncached(Store & store) {
|
||||||
Sync<DrvPathResolutions> drvPathResolutions;
|
|
||||||
|
|
||||||
std::optional<BasicDerivation> Derivation::tryResolve(Store & store) {
|
|
||||||
BasicDerivation resolved { *this };
|
BasicDerivation resolved { *this };
|
||||||
|
|
||||||
// Input paths that we'll want to rewrite in the derivation
|
// Input paths that we'll want to rewrite in the derivation
|
||||||
|
@ -697,4 +771,34 @@ std::optional<BasicDerivation> Derivation::tryResolve(Store & store) {
|
||||||
return resolved;
|
return resolved;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
std::optional<BasicDerivation> Derivation::tryResolve(Store& store)
|
||||||
|
{
|
||||||
|
auto drvPath = writeDerivation(store, *this, NoRepair, false);
|
||||||
|
return Derivation::tryResolve(store, drvPath);
|
||||||
|
}
|
||||||
|
|
||||||
|
std::optional<BasicDerivation> Derivation::tryResolve(Store& store, const StorePath& drvPath)
|
||||||
|
{
|
||||||
|
// This is quite dirty and leaky, but will disappear once #4340 is merged
|
||||||
|
static Sync<std::map<StorePath, std::optional<Derivation>>> resolutionsCache;
|
||||||
|
|
||||||
|
{
|
||||||
|
auto resolutions = resolutionsCache.lock();
|
||||||
|
auto resolvedDrvIter = resolutions->find(drvPath);
|
||||||
|
if (resolvedDrvIter != resolutions->end()) {
|
||||||
|
auto & [_, resolvedDrv] = *resolvedDrvIter;
|
||||||
|
return *resolvedDrv;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Try resolve drv and use that path instead. */
|
||||||
|
auto drv = store.readDerivation(drvPath);
|
||||||
|
auto attempt = drv.tryResolveUncached(store);
|
||||||
|
if (!attempt)
|
||||||
|
return std::nullopt;
|
||||||
|
/* Store in memo table. */
|
||||||
|
resolutionsCache.lock()->insert_or_assign(drvPath, *attempt);
|
||||||
|
return *attempt;
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -18,8 +18,6 @@ namespace nix {
|
||||||
/* The traditional non-fixed-output derivation type. */
|
/* The traditional non-fixed-output derivation type. */
|
||||||
struct DerivationOutputInputAddressed
|
struct DerivationOutputInputAddressed
|
||||||
{
|
{
|
||||||
/* Will need to become `std::optional<StorePath>` once input-addressed
|
|
||||||
derivations are allowed to depend on cont-addressed derivations */
|
|
||||||
StorePath path;
|
StorePath path;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -41,12 +39,18 @@ struct DerivationOutputCAFloating
|
||||||
HashType hashType;
|
HashType hashType;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
/* Input-addressed output which depends on a (CA) derivation whose hash isn't
|
||||||
|
* known atm
|
||||||
|
*/
|
||||||
|
struct DerivationOutputDeferred {};
|
||||||
|
|
||||||
struct DerivationOutput
|
struct DerivationOutput
|
||||||
{
|
{
|
||||||
std::variant<
|
std::variant<
|
||||||
DerivationOutputInputAddressed,
|
DerivationOutputInputAddressed,
|
||||||
DerivationOutputCAFixed,
|
DerivationOutputCAFixed,
|
||||||
DerivationOutputCAFloating
|
DerivationOutputCAFloating,
|
||||||
|
DerivationOutputDeferred
|
||||||
> output;
|
> output;
|
||||||
std::optional<HashType> hashAlgoOpt(const Store & store) const;
|
std::optional<HashType> hashAlgoOpt(const Store & store) const;
|
||||||
/* Note, when you use this function you should make sure that you're passing
|
/* Note, when you use this function you should make sure that you're passing
|
||||||
|
@ -72,6 +76,7 @@ typedef std::map<string, string> StringPairs;
|
||||||
|
|
||||||
enum struct DerivationType : uint8_t {
|
enum struct DerivationType : uint8_t {
|
||||||
InputAddressed,
|
InputAddressed,
|
||||||
|
DeferredInputAddressed,
|
||||||
CAFixed,
|
CAFixed,
|
||||||
CAFloating,
|
CAFloating,
|
||||||
};
|
};
|
||||||
|
@ -133,10 +138,14 @@ struct Derivation : BasicDerivation
|
||||||
|
|
||||||
2. Input placeholders are replaced with realized input store paths. */
|
2. Input placeholders are replaced with realized input store paths. */
|
||||||
std::optional<BasicDerivation> tryResolve(Store & store);
|
std::optional<BasicDerivation> tryResolve(Store & store);
|
||||||
|
static std::optional<BasicDerivation> tryResolve(Store & store, const StorePath & drvPath);
|
||||||
|
|
||||||
Derivation() = default;
|
Derivation() = default;
|
||||||
Derivation(const BasicDerivation & bd) : BasicDerivation(bd) { }
|
Derivation(const BasicDerivation & bd) : BasicDerivation(bd) { }
|
||||||
Derivation(BasicDerivation && bd) : BasicDerivation(std::move(bd)) { }
|
Derivation(BasicDerivation && bd) : BasicDerivation(std::move(bd)) { }
|
||||||
|
|
||||||
|
private:
|
||||||
|
std::optional<BasicDerivation> tryResolveUncached(Store & store);
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
|
@ -167,9 +176,12 @@ std::string outputPathName(std::string_view drvName, std::string_view outputName
|
||||||
// whose output hashes are always known since they are fixed up-front.
|
// whose output hashes are always known since they are fixed up-front.
|
||||||
typedef std::map<std::string, Hash> CaOutputHashes;
|
typedef std::map<std::string, Hash> CaOutputHashes;
|
||||||
|
|
||||||
|
struct DeferredHash { Hash hash; };
|
||||||
|
|
||||||
typedef std::variant<
|
typedef std::variant<
|
||||||
Hash, // regular DRV normalized hash
|
Hash, // regular DRV normalized hash
|
||||||
CaOutputHashes
|
CaOutputHashes, // Fixed-output derivation hashes
|
||||||
|
DeferredHash // Deferred hashes for floating outputs drvs and their dependencies
|
||||||
> DrvHashModulo;
|
> DrvHashModulo;
|
||||||
|
|
||||||
/* Returns hashes with the details of fixed-output subderivations
|
/* Returns hashes with the details of fixed-output subderivations
|
||||||
|
@ -197,20 +209,17 @@ typedef std::variant<
|
||||||
*/
|
*/
|
||||||
DrvHashModulo hashDerivationModulo(Store & store, const Derivation & drv, bool maskOutputs);
|
DrvHashModulo hashDerivationModulo(Store & store, const Derivation & drv, bool maskOutputs);
|
||||||
|
|
||||||
|
/*
|
||||||
|
Return a map associating each output to a hash that uniquely identifies its
|
||||||
|
derivation (modulo the self-references).
|
||||||
|
*/
|
||||||
|
std::map<std::string, Hash> staticOutputHashes(Store& store, const Derivation& drv);
|
||||||
|
|
||||||
/* Memoisation of hashDerivationModulo(). */
|
/* Memoisation of hashDerivationModulo(). */
|
||||||
typedef std::map<StorePath, DrvHashModulo> DrvHashes;
|
typedef std::map<StorePath, DrvHashModulo> DrvHashes;
|
||||||
|
|
||||||
extern DrvHashes drvHashes; // FIXME: global, not thread-safe
|
|
||||||
|
|
||||||
/* Memoisation of `readDerivation(..).resove()`. */
|
|
||||||
typedef std::map<
|
|
||||||
StorePath,
|
|
||||||
std::optional<StorePath>
|
|
||||||
> DrvPathResolutions;
|
|
||||||
|
|
||||||
// FIXME: global, though at least thread-safe.
|
// FIXME: global, though at least thread-safe.
|
||||||
// FIXME: arguably overlaps with hashDerivationModulo memo table.
|
extern Sync<DrvHashes> drvHashes;
|
||||||
extern Sync<DrvPathResolutions> drvPathResolutions;
|
|
||||||
|
|
||||||
bool wantOutput(const string & output, const std::set<string> & wanted);
|
bool wantOutput(const string & output, const std::set<string> & wanted);
|
||||||
|
|
||||||
|
|
|
@ -60,6 +60,9 @@ struct DummyStore : public Store, public virtual DummyStoreConfig
|
||||||
BuildResult buildDerivation(const StorePath & drvPath, const BasicDerivation & drv,
|
BuildResult buildDerivation(const StorePath & drvPath, const BasicDerivation & drv,
|
||||||
BuildMode buildMode) override
|
BuildMode buildMode) override
|
||||||
{ unsupported("buildDerivation"); }
|
{ unsupported("buildDerivation"); }
|
||||||
|
|
||||||
|
std::optional<const Realisation> queryRealisation(const DrvOutput&) override
|
||||||
|
{ unsupported("queryRealisation"); }
|
||||||
};
|
};
|
||||||
|
|
||||||
static RegisterStoreImplementation<DummyStore, DummyStoreConfig> regDummyStore;
|
static RegisterStoreImplementation<DummyStore, DummyStoreConfig> regDummyStore;
|
||||||
|
|
|
@ -95,18 +95,18 @@ struct curlFileTransfer : public FileTransfer
|
||||||
fmt(request.data ? "uploading '%s'" : "downloading '%s'", request.uri),
|
fmt(request.data ? "uploading '%s'" : "downloading '%s'", request.uri),
|
||||||
{request.uri}, request.parentAct)
|
{request.uri}, request.parentAct)
|
||||||
, callback(std::move(callback))
|
, callback(std::move(callback))
|
||||||
, finalSink([this](const unsigned char * data, size_t len) {
|
, finalSink([this](std::string_view data) {
|
||||||
if (this->request.dataCallback) {
|
if (this->request.dataCallback) {
|
||||||
auto httpStatus = getHTTPStatus();
|
auto httpStatus = getHTTPStatus();
|
||||||
|
|
||||||
/* Only write data to the sink if this is a
|
/* Only write data to the sink if this is a
|
||||||
successful response. */
|
successful response. */
|
||||||
if (successfulStatuses.count(httpStatus)) {
|
if (successfulStatuses.count(httpStatus)) {
|
||||||
writtenToSink += len;
|
writtenToSink += data.size();
|
||||||
this->request.dataCallback((char *) data, len);
|
this->request.dataCallback(data);
|
||||||
}
|
}
|
||||||
} else
|
} else
|
||||||
this->result.data->append((char *) data, len);
|
this->result.data->append(data);
|
||||||
})
|
})
|
||||||
{
|
{
|
||||||
if (!request.expectedETag.empty())
|
if (!request.expectedETag.empty())
|
||||||
|
@ -171,8 +171,8 @@ struct curlFileTransfer : public FileTransfer
|
||||||
}
|
}
|
||||||
|
|
||||||
if (errorSink)
|
if (errorSink)
|
||||||
(*errorSink)((unsigned char *) contents, realSize);
|
(*errorSink)({(char *) contents, realSize});
|
||||||
(*decompressionSink)((unsigned char *) contents, realSize);
|
(*decompressionSink)({(char *) contents, realSize});
|
||||||
|
|
||||||
return realSize;
|
return realSize;
|
||||||
} catch (...) {
|
} catch (...) {
|
||||||
|
@ -776,7 +776,7 @@ void FileTransfer::download(FileTransferRequest && request, Sink & sink)
|
||||||
state->request.notify_one();
|
state->request.notify_one();
|
||||||
});
|
});
|
||||||
|
|
||||||
request.dataCallback = [_state](char * buf, size_t len) {
|
request.dataCallback = [_state](std::string_view data) {
|
||||||
|
|
||||||
auto state(_state->lock());
|
auto state(_state->lock());
|
||||||
|
|
||||||
|
@ -794,7 +794,7 @@ void FileTransfer::download(FileTransferRequest && request, Sink & sink)
|
||||||
|
|
||||||
/* Append data to the buffer and wake up the calling
|
/* Append data to the buffer and wake up the calling
|
||||||
thread. */
|
thread. */
|
||||||
state->data.append(buf, len);
|
state->data.append(data);
|
||||||
state->avail.notify_one();
|
state->avail.notify_one();
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -840,7 +840,7 @@ void FileTransfer::download(FileTransferRequest && request, Sink & sink)
|
||||||
if it's blocked on a full buffer. We don't hold the state
|
if it's blocked on a full buffer. We don't hold the state
|
||||||
lock while doing this to prevent blocking the download
|
lock while doing this to prevent blocking the download
|
||||||
thread if sink() takes a long time. */
|
thread if sink() takes a long time. */
|
||||||
sink((unsigned char *) chunk.data(), chunk.size());
|
sink(chunk);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -61,7 +61,7 @@ struct FileTransferRequest
|
||||||
bool decompress = true;
|
bool decompress = true;
|
||||||
std::shared_ptr<std::string> data;
|
std::shared_ptr<std::string> data;
|
||||||
std::string mimeType;
|
std::string mimeType;
|
||||||
std::function<void(char *, size_t)> dataCallback;
|
std::function<void(std::string_view data)> dataCallback;
|
||||||
|
|
||||||
FileTransferRequest(const std::string & uri)
|
FileTransferRequest(const std::string & uri)
|
||||||
: uri(uri), parentAct(getCurActivity()) { }
|
: uri(uri), parentAct(getCurActivity()) { }
|
||||||
|
|
|
@ -683,7 +683,7 @@ void LocalStore::removeUnusedLinks(const GCState & state)
|
||||||
struct stat st;
|
struct stat st;
|
||||||
if (stat(linksDir.c_str(), &st) == -1)
|
if (stat(linksDir.c_str(), &st) == -1)
|
||||||
throw SysError("statting '%1%'", linksDir);
|
throw SysError("statting '%1%'", linksDir);
|
||||||
auto overhead = st.st_blocks * 512ULL;
|
int64_t overhead = st.st_blocks * 512ULL;
|
||||||
|
|
||||||
printInfo("note: currently hard linking saves %.2f MiB",
|
printInfo("note: currently hard linking saves %.2f MiB",
|
||||||
((unsharedSize - actualSize - overhead) / (1024.0 * 1024.0)));
|
((unsharedSize - actualSize - overhead) / (1024.0 * 1024.0)));
|
||||||
|
|
|
@ -86,6 +86,12 @@ void loadConfFile()
|
||||||
for (auto file = files.rbegin(); file != files.rend(); file++) {
|
for (auto file = files.rbegin(); file != files.rend(); file++) {
|
||||||
globalConfig.applyConfigFile(*file);
|
globalConfig.applyConfigFile(*file);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
auto nixConfEnv = getEnv("NIX_CONFIG");
|
||||||
|
if (nixConfEnv.has_value()) {
|
||||||
|
globalConfig.applyConfig(nixConfEnv.value(), "NIX_CONFIG");
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
std::vector<Path> getUserConfigFiles()
|
std::vector<Path> getUserConfigFiles()
|
||||||
|
@ -154,7 +160,7 @@ NLOHMANN_JSON_SERIALIZE_ENUM(SandboxMode, {
|
||||||
{SandboxMode::smDisabled, false},
|
{SandboxMode::smDisabled, false},
|
||||||
});
|
});
|
||||||
|
|
||||||
template<> void BaseSetting<SandboxMode>::set(const std::string & str)
|
template<> void BaseSetting<SandboxMode>::set(const std::string & str, bool append)
|
||||||
{
|
{
|
||||||
if (str == "true") value = smEnabled;
|
if (str == "true") value = smEnabled;
|
||||||
else if (str == "relaxed") value = smRelaxed;
|
else if (str == "relaxed") value = smRelaxed;
|
||||||
|
@ -162,6 +168,11 @@ template<> void BaseSetting<SandboxMode>::set(const std::string & str)
|
||||||
else throw UsageError("option '%s' has invalid value '%s'", name, str);
|
else throw UsageError("option '%s' has invalid value '%s'", name, str);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
template<> bool BaseSetting<SandboxMode>::isAppendable()
|
||||||
|
{
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
template<> std::string BaseSetting<SandboxMode>::to_string() const
|
template<> std::string BaseSetting<SandboxMode>::to_string() const
|
||||||
{
|
{
|
||||||
if (value == smEnabled) return "true";
|
if (value == smEnabled) return "true";
|
||||||
|
@ -192,7 +203,7 @@ template<> void BaseSetting<SandboxMode>::convertToArg(Args & args, const std::s
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
void MaxBuildJobsSetting::set(const std::string & str)
|
void MaxBuildJobsSetting::set(const std::string & str, bool append)
|
||||||
{
|
{
|
||||||
if (str == "auto") value = std::max(1U, std::thread::hardware_concurrency());
|
if (str == "auto") value = std::max(1U, std::thread::hardware_concurrency());
|
||||||
else if (!string2Int(str, value))
|
else if (!string2Int(str, value))
|
||||||
|
|
|
@ -25,7 +25,7 @@ struct MaxBuildJobsSetting : public BaseSetting<unsigned int>
|
||||||
options->addSetting(this);
|
options->addSetting(this);
|
||||||
}
|
}
|
||||||
|
|
||||||
void set(const std::string & str) override;
|
void set(const std::string & str, bool append = false) override;
|
||||||
};
|
};
|
||||||
|
|
||||||
class Settings : public Config {
|
class Settings : public Config {
|
||||||
|
@ -413,14 +413,6 @@ public:
|
||||||
Setting<bool> sandboxFallback{this, true, "sandbox-fallback",
|
Setting<bool> sandboxFallback{this, true, "sandbox-fallback",
|
||||||
"Whether to disable sandboxing when the kernel doesn't allow it."};
|
"Whether to disable sandboxing when the kernel doesn't allow it."};
|
||||||
|
|
||||||
Setting<PathSet> extraSandboxPaths{
|
|
||||||
this, {}, "extra-sandbox-paths",
|
|
||||||
R"(
|
|
||||||
A list of additional paths appended to `sandbox-paths`. Useful if
|
|
||||||
you want to extend its default value.
|
|
||||||
)",
|
|
||||||
{"build-extra-chroot-dirs", "build-extra-sandbox-paths"}};
|
|
||||||
|
|
||||||
Setting<size_t> buildRepeat{
|
Setting<size_t> buildRepeat{
|
||||||
this, 0, "repeat",
|
this, 0, "repeat",
|
||||||
R"(
|
R"(
|
||||||
|
@ -591,7 +583,7 @@ public:
|
||||||
|
|
||||||
Setting<Strings> substituters{
|
Setting<Strings> substituters{
|
||||||
this,
|
this,
|
||||||
nixStore == "/nix/store" ? Strings{"https://cache.nixos.org/"} : Strings(),
|
Strings{"https://cache.nixos.org/"},
|
||||||
"substituters",
|
"substituters",
|
||||||
R"(
|
R"(
|
||||||
A list of URLs of substituters, separated by whitespace. The default
|
A list of URLs of substituters, separated by whitespace. The default
|
||||||
|
@ -599,17 +591,6 @@ public:
|
||||||
)",
|
)",
|
||||||
{"binary-caches"}};
|
{"binary-caches"}};
|
||||||
|
|
||||||
// FIXME: provide a way to add to option values.
|
|
||||||
Setting<Strings> extraSubstituters{
|
|
||||||
this, {}, "extra-substituters",
|
|
||||||
R"(
|
|
||||||
Additional binary caches appended to those specified in
|
|
||||||
`substituters`. When used by unprivileged users, untrusted
|
|
||||||
substituters (i.e. those not listed in `trusted-substituters`) are
|
|
||||||
silently ignored.
|
|
||||||
)",
|
|
||||||
{"extra-binary-caches"}};
|
|
||||||
|
|
||||||
Setting<StringSet> trustedSubstituters{
|
Setting<StringSet> trustedSubstituters{
|
||||||
this, {}, "trusted-substituters",
|
this, {}, "trusted-substituters",
|
||||||
R"(
|
R"(
|
||||||
|
@ -886,7 +867,7 @@ public:
|
||||||
Example `~/.config/nix/nix.conf`:
|
Example `~/.config/nix/nix.conf`:
|
||||||
|
|
||||||
```
|
```
|
||||||
access-tokens = "github.com=23ac...b289 gitlab.mycompany.com=PAT:A123Bp_Cd..EfG gitlab.com=OAuth2:1jklw3jk"
|
access-tokens = github.com=23ac...b289 gitlab.mycompany.com=PAT:A123Bp_Cd..EfG gitlab.com=OAuth2:1jklw3jk
|
||||||
```
|
```
|
||||||
|
|
||||||
Example `~/code/flake.nix`:
|
Example `~/code/flake.nix`:
|
||||||
|
|
|
@ -333,6 +333,10 @@ public:
|
||||||
auto conn(connections->get());
|
auto conn(connections->get());
|
||||||
return conn->remoteVersion;
|
return conn->remoteVersion;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
std::optional<const Realisation> queryRealisation(const DrvOutput&) override
|
||||||
|
// TODO: Implement
|
||||||
|
{ unsupported("queryRealisation"); }
|
||||||
};
|
};
|
||||||
|
|
||||||
static RegisterStoreImplementation<LegacySSHStore, LegacySSHStoreConfig> regLegacySSHStore;
|
static RegisterStoreImplementation<LegacySSHStore, LegacySSHStoreConfig> regLegacySSHStore;
|
||||||
|
|
|
@ -87,6 +87,7 @@ protected:
|
||||||
void LocalBinaryCacheStore::init()
|
void LocalBinaryCacheStore::init()
|
||||||
{
|
{
|
||||||
createDirs(binaryCacheDir + "/nar");
|
createDirs(binaryCacheDir + "/nar");
|
||||||
|
createDirs(binaryCacheDir + realisationsPrefix);
|
||||||
if (writeDebugInfo)
|
if (writeDebugInfo)
|
||||||
createDirs(binaryCacheDir + "/debuginfo");
|
createDirs(binaryCacheDir + "/debuginfo");
|
||||||
BinaryCacheStore::init();
|
BinaryCacheStore::init();
|
||||||
|
|
|
@ -7,6 +7,7 @@
|
||||||
#include "nar-info.hh"
|
#include "nar-info.hh"
|
||||||
#include "references.hh"
|
#include "references.hh"
|
||||||
#include "callback.hh"
|
#include "callback.hh"
|
||||||
|
#include "topo-sort.hh"
|
||||||
|
|
||||||
#include <iostream>
|
#include <iostream>
|
||||||
#include <algorithm>
|
#include <algorithm>
|
||||||
|
@ -41,6 +42,61 @@
|
||||||
|
|
||||||
namespace nix {
|
namespace nix {
|
||||||
|
|
||||||
|
struct LocalStore::State::Stmts {
|
||||||
|
/* Some precompiled SQLite statements. */
|
||||||
|
SQLiteStmt RegisterValidPath;
|
||||||
|
SQLiteStmt UpdatePathInfo;
|
||||||
|
SQLiteStmt AddReference;
|
||||||
|
SQLiteStmt QueryPathInfo;
|
||||||
|
SQLiteStmt QueryReferences;
|
||||||
|
SQLiteStmt QueryReferrers;
|
||||||
|
SQLiteStmt InvalidatePath;
|
||||||
|
SQLiteStmt AddDerivationOutput;
|
||||||
|
SQLiteStmt RegisterRealisedOutput;
|
||||||
|
SQLiteStmt QueryValidDerivers;
|
||||||
|
SQLiteStmt QueryDerivationOutputs;
|
||||||
|
SQLiteStmt QueryRealisedOutput;
|
||||||
|
SQLiteStmt QueryAllRealisedOutputs;
|
||||||
|
SQLiteStmt QueryPathFromHashPart;
|
||||||
|
SQLiteStmt QueryValidPaths;
|
||||||
|
};
|
||||||
|
|
||||||
|
int getSchema(Path schemaPath)
|
||||||
|
{
|
||||||
|
int curSchema = 0;
|
||||||
|
if (pathExists(schemaPath)) {
|
||||||
|
string s = readFile(schemaPath);
|
||||||
|
if (!string2Int(s, curSchema))
|
||||||
|
throw Error("'%1%' is corrupt", schemaPath);
|
||||||
|
}
|
||||||
|
return curSchema;
|
||||||
|
}
|
||||||
|
|
||||||
|
void migrateCASchema(SQLite& db, Path schemaPath, AutoCloseFD& lockFd)
|
||||||
|
{
|
||||||
|
const int nixCASchemaVersion = 1;
|
||||||
|
int curCASchema = getSchema(schemaPath);
|
||||||
|
if (curCASchema != nixCASchemaVersion) {
|
||||||
|
if (curCASchema > nixCASchemaVersion) {
|
||||||
|
throw Error("current Nix store ca-schema is version %1%, but I only support %2%",
|
||||||
|
curCASchema, nixCASchemaVersion);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!lockFile(lockFd.get(), ltWrite, false)) {
|
||||||
|
printInfo("waiting for exclusive access to the Nix store for ca drvs...");
|
||||||
|
lockFile(lockFd.get(), ltWrite, true);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (curCASchema == 0) {
|
||||||
|
static const char schema[] =
|
||||||
|
#include "ca-specific-schema.sql.gen.hh"
|
||||||
|
;
|
||||||
|
db.exec(schema);
|
||||||
|
}
|
||||||
|
writeFile(schemaPath, fmt("%d", nixCASchemaVersion));
|
||||||
|
lockFile(lockFd.get(), ltRead, true);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
LocalStore::LocalStore(const Params & params)
|
LocalStore::LocalStore(const Params & params)
|
||||||
: StoreConfig(params)
|
: StoreConfig(params)
|
||||||
|
@ -59,6 +115,7 @@ LocalStore::LocalStore(const Params & params)
|
||||||
, locksHeld(tokenizeString<PathSet>(getEnv("NIX_HELD_LOCKS").value_or("")))
|
, locksHeld(tokenizeString<PathSet>(getEnv("NIX_HELD_LOCKS").value_or("")))
|
||||||
{
|
{
|
||||||
auto state(_state.lock());
|
auto state(_state.lock());
|
||||||
|
state->stmts = std::make_unique<State::Stmts>();
|
||||||
|
|
||||||
/* Create missing state directories if they don't already exist. */
|
/* Create missing state directories if they don't already exist. */
|
||||||
createDirs(realStoreDir);
|
createDirs(realStoreDir);
|
||||||
|
@ -221,32 +278,58 @@ LocalStore::LocalStore(const Params & params)
|
||||||
|
|
||||||
else openDB(*state, false);
|
else openDB(*state, false);
|
||||||
|
|
||||||
|
if (settings.isExperimentalFeatureEnabled("ca-derivations")) {
|
||||||
|
migrateCASchema(state->db, dbDir + "/ca-schema", globalLock);
|
||||||
|
}
|
||||||
|
|
||||||
/* Prepare SQL statements. */
|
/* Prepare SQL statements. */
|
||||||
state->stmtRegisterValidPath.create(state->db,
|
state->stmts->RegisterValidPath.create(state->db,
|
||||||
"insert into ValidPaths (path, hash, registrationTime, deriver, narSize, ultimate, sigs, ca) values (?, ?, ?, ?, ?, ?, ?, ?);");
|
"insert into ValidPaths (path, hash, registrationTime, deriver, narSize, ultimate, sigs, ca) values (?, ?, ?, ?, ?, ?, ?, ?);");
|
||||||
state->stmtUpdatePathInfo.create(state->db,
|
state->stmts->UpdatePathInfo.create(state->db,
|
||||||
"update ValidPaths set narSize = ?, hash = ?, ultimate = ?, sigs = ?, ca = ? where path = ?;");
|
"update ValidPaths set narSize = ?, hash = ?, ultimate = ?, sigs = ?, ca = ? where path = ?;");
|
||||||
state->stmtAddReference.create(state->db,
|
state->stmts->AddReference.create(state->db,
|
||||||
"insert or replace into Refs (referrer, reference) values (?, ?);");
|
"insert or replace into Refs (referrer, reference) values (?, ?);");
|
||||||
state->stmtQueryPathInfo.create(state->db,
|
state->stmts->QueryPathInfo.create(state->db,
|
||||||
"select id, hash, registrationTime, deriver, narSize, ultimate, sigs, ca from ValidPaths where path = ?;");
|
"select id, hash, registrationTime, deriver, narSize, ultimate, sigs, ca from ValidPaths where path = ?;");
|
||||||
state->stmtQueryReferences.create(state->db,
|
state->stmts->QueryReferences.create(state->db,
|
||||||
"select path from Refs join ValidPaths on reference = id where referrer = ?;");
|
"select path from Refs join ValidPaths on reference = id where referrer = ?;");
|
||||||
state->stmtQueryReferrers.create(state->db,
|
state->stmts->QueryReferrers.create(state->db,
|
||||||
"select path from Refs join ValidPaths on referrer = id where reference = (select id from ValidPaths where path = ?);");
|
"select path from Refs join ValidPaths on referrer = id where reference = (select id from ValidPaths where path = ?);");
|
||||||
state->stmtInvalidatePath.create(state->db,
|
state->stmts->InvalidatePath.create(state->db,
|
||||||
"delete from ValidPaths where path = ?;");
|
"delete from ValidPaths where path = ?;");
|
||||||
state->stmtAddDerivationOutput.create(state->db,
|
state->stmts->AddDerivationOutput.create(state->db,
|
||||||
"insert or replace into DerivationOutputs (drv, id, path) values (?, ?, ?);");
|
"insert or replace into DerivationOutputs (drv, id, path) values (?, ?, ?);");
|
||||||
state->stmtQueryValidDerivers.create(state->db,
|
state->stmts->QueryValidDerivers.create(state->db,
|
||||||
"select v.id, v.path from DerivationOutputs d join ValidPaths v on d.drv = v.id where d.path = ?;");
|
"select v.id, v.path from DerivationOutputs d join ValidPaths v on d.drv = v.id where d.path = ?;");
|
||||||
state->stmtQueryDerivationOutputs.create(state->db,
|
state->stmts->QueryDerivationOutputs.create(state->db,
|
||||||
"select id, path from DerivationOutputs where drv = ?;");
|
"select id, path from DerivationOutputs where drv = ?;");
|
||||||
// Use "path >= ?" with limit 1 rather than "path like '?%'" to
|
// Use "path >= ?" with limit 1 rather than "path like '?%'" to
|
||||||
// ensure efficient lookup.
|
// ensure efficient lookup.
|
||||||
state->stmtQueryPathFromHashPart.create(state->db,
|
state->stmts->QueryPathFromHashPart.create(state->db,
|
||||||
"select path from ValidPaths where path >= ? limit 1;");
|
"select path from ValidPaths where path >= ? limit 1;");
|
||||||
state->stmtQueryValidPaths.create(state->db, "select path from ValidPaths");
|
state->stmts->QueryValidPaths.create(state->db, "select path from ValidPaths");
|
||||||
|
if (settings.isExperimentalFeatureEnabled("ca-derivations")) {
|
||||||
|
state->stmts->RegisterRealisedOutput.create(state->db,
|
||||||
|
R"(
|
||||||
|
insert or replace into Realisations (drvPath, outputName, outputPath)
|
||||||
|
values (?, ?, (select id from ValidPaths where path = ?))
|
||||||
|
;
|
||||||
|
)");
|
||||||
|
state->stmts->QueryRealisedOutput.create(state->db,
|
||||||
|
R"(
|
||||||
|
select Output.path from Realisations
|
||||||
|
inner join ValidPaths as Output on Output.id = Realisations.outputPath
|
||||||
|
where drvPath = ? and outputName = ?
|
||||||
|
;
|
||||||
|
)");
|
||||||
|
state->stmts->QueryAllRealisedOutputs.create(state->db,
|
||||||
|
R"(
|
||||||
|
select outputName, Output.path from Realisations
|
||||||
|
inner join ValidPaths as Output on Output.id = Realisations.outputPath
|
||||||
|
where drvPath = ?
|
||||||
|
;
|
||||||
|
)");
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -284,16 +367,7 @@ std::string LocalStore::getUri()
|
||||||
|
|
||||||
|
|
||||||
int LocalStore::getSchema()
|
int LocalStore::getSchema()
|
||||||
{
|
{ return nix::getSchema(schemaPath); }
|
||||||
int curSchema = 0;
|
|
||||||
if (pathExists(schemaPath)) {
|
|
||||||
string s = readFile(schemaPath);
|
|
||||||
if (!string2Int(s, curSchema))
|
|
||||||
throw Error("'%1%' is corrupt", schemaPath);
|
|
||||||
}
|
|
||||||
return curSchema;
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
void LocalStore::openDB(State & state, bool create)
|
void LocalStore::openDB(State & state, bool create)
|
||||||
{
|
{
|
||||||
|
@ -573,21 +647,29 @@ void LocalStore::checkDerivationOutputs(const StorePath & drvPath, const Derivat
|
||||||
[&](DerivationOutputCAFloating _) {
|
[&](DerivationOutputCAFloating _) {
|
||||||
/* Nothing to check */
|
/* Nothing to check */
|
||||||
},
|
},
|
||||||
|
[&](DerivationOutputDeferred) {
|
||||||
|
},
|
||||||
}, i.second.output);
|
}, i.second.output);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
void LocalStore::linkDeriverToPath(const StorePath & deriver, const string & outputName, const StorePath & output)
|
void LocalStore::registerDrvOutput(const Realisation & info)
|
||||||
{
|
{
|
||||||
auto state(_state.lock());
|
auto state(_state.lock());
|
||||||
return linkDeriverToPath(*state, queryValidPathId(*state, deriver), outputName, output);
|
retrySQLite<void>([&]() {
|
||||||
|
state->stmts->RegisterRealisedOutput.use()
|
||||||
|
(info.id.strHash())
|
||||||
|
(info.id.outputName)
|
||||||
|
(printStorePath(info.outPath))
|
||||||
|
.exec();
|
||||||
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
void LocalStore::linkDeriverToPath(State & state, uint64_t deriver, const string & outputName, const StorePath & output)
|
void LocalStore::cacheDrvOutputMapping(State & state, const uint64_t deriver, const string & outputName, const StorePath & output)
|
||||||
{
|
{
|
||||||
retrySQLite<void>([&]() {
|
retrySQLite<void>([&]() {
|
||||||
state.stmtAddDerivationOutput.use()
|
state.stmts->AddDerivationOutput.use()
|
||||||
(deriver)
|
(deriver)
|
||||||
(outputName)
|
(outputName)
|
||||||
(printStorePath(output))
|
(printStorePath(output))
|
||||||
|
@ -604,7 +686,7 @@ uint64_t LocalStore::addValidPath(State & state,
|
||||||
throw Error("cannot add path '%s' to the Nix store because it claims to be content-addressed but isn't",
|
throw Error("cannot add path '%s' to the Nix store because it claims to be content-addressed but isn't",
|
||||||
printStorePath(info.path));
|
printStorePath(info.path));
|
||||||
|
|
||||||
state.stmtRegisterValidPath.use()
|
state.stmts->RegisterValidPath.use()
|
||||||
(printStorePath(info.path))
|
(printStorePath(info.path))
|
||||||
(info.narHash.to_string(Base16, true))
|
(info.narHash.to_string(Base16, true))
|
||||||
(info.registrationTime == 0 ? time(0) : info.registrationTime)
|
(info.registrationTime == 0 ? time(0) : info.registrationTime)
|
||||||
|
@ -621,7 +703,7 @@ uint64_t LocalStore::addValidPath(State & state,
|
||||||
efficiently query whether a path is an output of some
|
efficiently query whether a path is an output of some
|
||||||
derivation. */
|
derivation. */
|
||||||
if (info.path.isDerivation()) {
|
if (info.path.isDerivation()) {
|
||||||
auto drv = readDerivation(info.path);
|
auto drv = readInvalidDerivation(info.path);
|
||||||
|
|
||||||
/* Verify that the output paths in the derivation are correct
|
/* Verify that the output paths in the derivation are correct
|
||||||
(i.e., follow the scheme for computing output paths from
|
(i.e., follow the scheme for computing output paths from
|
||||||
|
@ -634,7 +716,7 @@ uint64_t LocalStore::addValidPath(State & state,
|
||||||
/* Floating CA derivations have indeterminate output paths until
|
/* Floating CA derivations have indeterminate output paths until
|
||||||
they are built, so don't register anything in that case */
|
they are built, so don't register anything in that case */
|
||||||
if (i.second.second)
|
if (i.second.second)
|
||||||
linkDeriverToPath(state, id, i.first, *i.second.second);
|
cacheDrvOutputMapping(state, id, i.first, *i.second.second);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -656,7 +738,7 @@ void LocalStore::queryPathInfoUncached(const StorePath & path,
|
||||||
auto state(_state.lock());
|
auto state(_state.lock());
|
||||||
|
|
||||||
/* Get the path info. */
|
/* Get the path info. */
|
||||||
auto useQueryPathInfo(state->stmtQueryPathInfo.use()(printStorePath(path)));
|
auto useQueryPathInfo(state->stmts->QueryPathInfo.use()(printStorePath(path)));
|
||||||
|
|
||||||
if (!useQueryPathInfo.next())
|
if (!useQueryPathInfo.next())
|
||||||
return std::shared_ptr<ValidPathInfo>();
|
return std::shared_ptr<ValidPathInfo>();
|
||||||
|
@ -676,7 +758,7 @@ void LocalStore::queryPathInfoUncached(const StorePath & path,
|
||||||
|
|
||||||
info->registrationTime = useQueryPathInfo.getInt(2);
|
info->registrationTime = useQueryPathInfo.getInt(2);
|
||||||
|
|
||||||
auto s = (const char *) sqlite3_column_text(state->stmtQueryPathInfo, 3);
|
auto s = (const char *) sqlite3_column_text(state->stmts->QueryPathInfo, 3);
|
||||||
if (s) info->deriver = parseStorePath(s);
|
if (s) info->deriver = parseStorePath(s);
|
||||||
|
|
||||||
/* Note that narSize = NULL yields 0. */
|
/* Note that narSize = NULL yields 0. */
|
||||||
|
@ -684,14 +766,14 @@ void LocalStore::queryPathInfoUncached(const StorePath & path,
|
||||||
|
|
||||||
info->ultimate = useQueryPathInfo.getInt(5) == 1;
|
info->ultimate = useQueryPathInfo.getInt(5) == 1;
|
||||||
|
|
||||||
s = (const char *) sqlite3_column_text(state->stmtQueryPathInfo, 6);
|
s = (const char *) sqlite3_column_text(state->stmts->QueryPathInfo, 6);
|
||||||
if (s) info->sigs = tokenizeString<StringSet>(s, " ");
|
if (s) info->sigs = tokenizeString<StringSet>(s, " ");
|
||||||
|
|
||||||
s = (const char *) sqlite3_column_text(state->stmtQueryPathInfo, 7);
|
s = (const char *) sqlite3_column_text(state->stmts->QueryPathInfo, 7);
|
||||||
if (s) info->ca = parseContentAddressOpt(s);
|
if (s) info->ca = parseContentAddressOpt(s);
|
||||||
|
|
||||||
/* Get the references. */
|
/* Get the references. */
|
||||||
auto useQueryReferences(state->stmtQueryReferences.use()(info->id));
|
auto useQueryReferences(state->stmts->QueryReferences.use()(info->id));
|
||||||
|
|
||||||
while (useQueryReferences.next())
|
while (useQueryReferences.next())
|
||||||
info->references.insert(parseStorePath(useQueryReferences.getStr(0)));
|
info->references.insert(parseStorePath(useQueryReferences.getStr(0)));
|
||||||
|
@ -706,7 +788,7 @@ void LocalStore::queryPathInfoUncached(const StorePath & path,
|
||||||
/* Update path info in the database. */
|
/* Update path info in the database. */
|
||||||
void LocalStore::updatePathInfo(State & state, const ValidPathInfo & info)
|
void LocalStore::updatePathInfo(State & state, const ValidPathInfo & info)
|
||||||
{
|
{
|
||||||
state.stmtUpdatePathInfo.use()
|
state.stmts->UpdatePathInfo.use()
|
||||||
(info.narSize, info.narSize != 0)
|
(info.narSize, info.narSize != 0)
|
||||||
(info.narHash.to_string(Base16, true))
|
(info.narHash.to_string(Base16, true))
|
||||||
(info.ultimate ? 1 : 0, info.ultimate)
|
(info.ultimate ? 1 : 0, info.ultimate)
|
||||||
|
@ -719,7 +801,7 @@ void LocalStore::updatePathInfo(State & state, const ValidPathInfo & info)
|
||||||
|
|
||||||
uint64_t LocalStore::queryValidPathId(State & state, const StorePath & path)
|
uint64_t LocalStore::queryValidPathId(State & state, const StorePath & path)
|
||||||
{
|
{
|
||||||
auto use(state.stmtQueryPathInfo.use()(printStorePath(path)));
|
auto use(state.stmts->QueryPathInfo.use()(printStorePath(path)));
|
||||||
if (!use.next())
|
if (!use.next())
|
||||||
throw InvalidPath("path '%s' is not valid", printStorePath(path));
|
throw InvalidPath("path '%s' is not valid", printStorePath(path));
|
||||||
return use.getInt(0);
|
return use.getInt(0);
|
||||||
|
@ -728,7 +810,7 @@ uint64_t LocalStore::queryValidPathId(State & state, const StorePath & path)
|
||||||
|
|
||||||
bool LocalStore::isValidPath_(State & state, const StorePath & path)
|
bool LocalStore::isValidPath_(State & state, const StorePath & path)
|
||||||
{
|
{
|
||||||
return state.stmtQueryPathInfo.use()(printStorePath(path)).next();
|
return state.stmts->QueryPathInfo.use()(printStorePath(path)).next();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -754,7 +836,7 @@ StorePathSet LocalStore::queryAllValidPaths()
|
||||||
{
|
{
|
||||||
return retrySQLite<StorePathSet>([&]() {
|
return retrySQLite<StorePathSet>([&]() {
|
||||||
auto state(_state.lock());
|
auto state(_state.lock());
|
||||||
auto use(state->stmtQueryValidPaths.use());
|
auto use(state->stmts->QueryValidPaths.use());
|
||||||
StorePathSet res;
|
StorePathSet res;
|
||||||
while (use.next()) res.insert(parseStorePath(use.getStr(0)));
|
while (use.next()) res.insert(parseStorePath(use.getStr(0)));
|
||||||
return res;
|
return res;
|
||||||
|
@ -764,7 +846,7 @@ StorePathSet LocalStore::queryAllValidPaths()
|
||||||
|
|
||||||
void LocalStore::queryReferrers(State & state, const StorePath & path, StorePathSet & referrers)
|
void LocalStore::queryReferrers(State & state, const StorePath & path, StorePathSet & referrers)
|
||||||
{
|
{
|
||||||
auto useQueryReferrers(state.stmtQueryReferrers.use()(printStorePath(path)));
|
auto useQueryReferrers(state.stmts->QueryReferrers.use()(printStorePath(path)));
|
||||||
|
|
||||||
while (useQueryReferrers.next())
|
while (useQueryReferrers.next())
|
||||||
referrers.insert(parseStorePath(useQueryReferrers.getStr(0)));
|
referrers.insert(parseStorePath(useQueryReferrers.getStr(0)));
|
||||||
|
@ -785,7 +867,7 @@ StorePathSet LocalStore::queryValidDerivers(const StorePath & path)
|
||||||
return retrySQLite<StorePathSet>([&]() {
|
return retrySQLite<StorePathSet>([&]() {
|
||||||
auto state(_state.lock());
|
auto state(_state.lock());
|
||||||
|
|
||||||
auto useQueryValidDerivers(state->stmtQueryValidDerivers.use()(printStorePath(path)));
|
auto useQueryValidDerivers(state->stmts->QueryValidDerivers.use()(printStorePath(path)));
|
||||||
|
|
||||||
StorePathSet derivers;
|
StorePathSet derivers;
|
||||||
while (useQueryValidDerivers.next())
|
while (useQueryValidDerivers.next())
|
||||||
|
@ -796,69 +878,38 @@ StorePathSet LocalStore::queryValidDerivers(const StorePath & path)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
std::map<std::string, std::optional<StorePath>> LocalStore::queryPartialDerivationOutputMap(const StorePath & path_)
|
std::map<std::string, std::optional<StorePath>>
|
||||||
|
LocalStore::queryDerivationOutputMapNoResolve(const StorePath& path_)
|
||||||
{
|
{
|
||||||
auto path = path_;
|
auto path = path_;
|
||||||
std::map<std::string, std::optional<StorePath>> outputs;
|
auto outputs = retrySQLite<std::map<std::string, std::optional<StorePath>>>([&]() {
|
||||||
Derivation drv = readDerivation(path);
|
|
||||||
for (auto & [outName, _] : drv.outputs) {
|
|
||||||
outputs.insert_or_assign(outName, std::nullopt);
|
|
||||||
}
|
|
||||||
bool haveCached = false;
|
|
||||||
{
|
|
||||||
auto resolutions = drvPathResolutions.lock();
|
|
||||||
auto resolvedPathOptIter = resolutions->find(path);
|
|
||||||
if (resolvedPathOptIter != resolutions->end()) {
|
|
||||||
auto & [_, resolvedPathOpt] = *resolvedPathOptIter;
|
|
||||||
if (resolvedPathOpt)
|
|
||||||
path = *resolvedPathOpt;
|
|
||||||
haveCached = true;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
/* can't just use else-if instead of `!haveCached` because we need to unlock
|
|
||||||
`drvPathResolutions` before it is locked in `Derivation::resolve`. */
|
|
||||||
if (!haveCached && drv.type() == DerivationType::CAFloating) {
|
|
||||||
/* Try resolve drv and use that path instead. */
|
|
||||||
auto attempt = drv.tryResolve(*this);
|
|
||||||
if (!attempt)
|
|
||||||
/* If we cannot resolve the derivation, we cannot have any path
|
|
||||||
assigned so we return the map of all std::nullopts. */
|
|
||||||
return outputs;
|
|
||||||
/* Just compute store path */
|
|
||||||
auto pathResolved = writeDerivation(*this, *std::move(attempt), NoRepair, true);
|
|
||||||
/* Store in memo table. */
|
|
||||||
/* FIXME: memo logic should not be local-store specific, should have
|
|
||||||
wrapper-method instead. */
|
|
||||||
drvPathResolutions.lock()->insert_or_assign(path, pathResolved);
|
|
||||||
path = std::move(pathResolved);
|
|
||||||
}
|
|
||||||
return retrySQLite<std::map<std::string, std::optional<StorePath>>>([&]() {
|
|
||||||
auto state(_state.lock());
|
auto state(_state.lock());
|
||||||
|
std::map<std::string, std::optional<StorePath>> outputs;
|
||||||
uint64_t drvId;
|
uint64_t drvId;
|
||||||
try {
|
|
||||||
drvId = queryValidPathId(*state, path);
|
drvId = queryValidPathId(*state, path);
|
||||||
} catch (InvalidPath &) {
|
auto use(state->stmts->QueryDerivationOutputs.use()(drvId));
|
||||||
/* FIXME? if the derivation doesn't exist, we cannot have a mapping
|
while (use.next())
|
||||||
for it. */
|
|
||||||
return outputs;
|
|
||||||
}
|
|
||||||
|
|
||||||
auto useQueryDerivationOutputs {
|
|
||||||
state->stmtQueryDerivationOutputs.use()
|
|
||||||
(drvId)
|
|
||||||
};
|
|
||||||
|
|
||||||
while (useQueryDerivationOutputs.next())
|
|
||||||
outputs.insert_or_assign(
|
outputs.insert_or_assign(
|
||||||
useQueryDerivationOutputs.getStr(0),
|
use.getStr(0), parseStorePath(use.getStr(1)));
|
||||||
parseStorePath(useQueryDerivationOutputs.getStr(1))
|
|
||||||
);
|
|
||||||
|
|
||||||
return outputs;
|
return outputs;
|
||||||
});
|
});
|
||||||
}
|
|
||||||
|
|
||||||
|
if (!settings.isExperimentalFeatureEnabled("ca-derivations"))
|
||||||
|
return outputs;
|
||||||
|
|
||||||
|
auto drv = readInvalidDerivation(path);
|
||||||
|
auto drvHashes = staticOutputHashes(*this, drv);
|
||||||
|
for (auto& [outputName, hash] : drvHashes) {
|
||||||
|
auto realisation = queryRealisation(DrvOutput{hash, outputName});
|
||||||
|
if (realisation)
|
||||||
|
outputs.insert_or_assign(outputName, realisation->outPath);
|
||||||
|
else
|
||||||
|
outputs.insert_or_assign(outputName, std::nullopt);
|
||||||
|
}
|
||||||
|
|
||||||
|
return outputs;
|
||||||
|
}
|
||||||
|
|
||||||
std::optional<StorePath> LocalStore::queryPathFromHashPart(const std::string & hashPart)
|
std::optional<StorePath> LocalStore::queryPathFromHashPart(const std::string & hashPart)
|
||||||
{
|
{
|
||||||
|
@ -869,11 +920,11 @@ std::optional<StorePath> LocalStore::queryPathFromHashPart(const std::string & h
|
||||||
return retrySQLite<std::optional<StorePath>>([&]() -> std::optional<StorePath> {
|
return retrySQLite<std::optional<StorePath>>([&]() -> std::optional<StorePath> {
|
||||||
auto state(_state.lock());
|
auto state(_state.lock());
|
||||||
|
|
||||||
auto useQueryPathFromHashPart(state->stmtQueryPathFromHashPart.use()(prefix));
|
auto useQueryPathFromHashPart(state->stmts->QueryPathFromHashPart.use()(prefix));
|
||||||
|
|
||||||
if (!useQueryPathFromHashPart.next()) return {};
|
if (!useQueryPathFromHashPart.next()) return {};
|
||||||
|
|
||||||
const char * s = (const char *) sqlite3_column_text(state->stmtQueryPathFromHashPart, 0);
|
const char * s = (const char *) sqlite3_column_text(state->stmts->QueryPathFromHashPart, 0);
|
||||||
if (s && prefix.compare(0, prefix.size(), s, prefix.size()) == 0)
|
if (s && prefix.compare(0, prefix.size(), s, prefix.size()) == 0)
|
||||||
return parseStorePath(s);
|
return parseStorePath(s);
|
||||||
return {};
|
return {};
|
||||||
|
@ -957,9 +1008,7 @@ void LocalStore::querySubstitutablePathInfos(const StorePathCAMap & paths, Subst
|
||||||
|
|
||||||
void LocalStore::registerValidPath(const ValidPathInfo & info)
|
void LocalStore::registerValidPath(const ValidPathInfo & info)
|
||||||
{
|
{
|
||||||
ValidPathInfos infos;
|
registerValidPaths({{info.path, info}});
|
||||||
infos.push_back(info);
|
|
||||||
registerValidPaths(infos);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -977,7 +1026,7 @@ void LocalStore::registerValidPaths(const ValidPathInfos & infos)
|
||||||
SQLiteTxn txn(state->db);
|
SQLiteTxn txn(state->db);
|
||||||
StorePathSet paths;
|
StorePathSet paths;
|
||||||
|
|
||||||
for (auto & i : infos) {
|
for (auto & [_, i] : infos) {
|
||||||
assert(i.narHash.type == htSHA256);
|
assert(i.narHash.type == htSHA256);
|
||||||
if (isValidPath_(*state, i.path))
|
if (isValidPath_(*state, i.path))
|
||||||
updatePathInfo(*state, i);
|
updatePathInfo(*state, i);
|
||||||
|
@ -986,26 +1035,37 @@ void LocalStore::registerValidPaths(const ValidPathInfos & infos)
|
||||||
paths.insert(i.path);
|
paths.insert(i.path);
|
||||||
}
|
}
|
||||||
|
|
||||||
for (auto & i : infos) {
|
for (auto & [_, i] : infos) {
|
||||||
auto referrer = queryValidPathId(*state, i.path);
|
auto referrer = queryValidPathId(*state, i.path);
|
||||||
for (auto & j : i.references)
|
for (auto & j : i.references)
|
||||||
state->stmtAddReference.use()(referrer)(queryValidPathId(*state, j)).exec();
|
state->stmts->AddReference.use()(referrer)(queryValidPathId(*state, j)).exec();
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Check that the derivation outputs are correct. We can't do
|
/* Check that the derivation outputs are correct. We can't do
|
||||||
this in addValidPath() above, because the references might
|
this in addValidPath() above, because the references might
|
||||||
not be valid yet. */
|
not be valid yet. */
|
||||||
for (auto & i : infos)
|
for (auto & [_, i] : infos)
|
||||||
if (i.path.isDerivation()) {
|
if (i.path.isDerivation()) {
|
||||||
// FIXME: inefficient; we already loaded the derivation in addValidPath().
|
// FIXME: inefficient; we already loaded the derivation in addValidPath().
|
||||||
checkDerivationOutputs(i.path, readDerivation(i.path));
|
checkDerivationOutputs(i.path,
|
||||||
|
readInvalidDerivation(i.path));
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Do a topological sort of the paths. This will throw an
|
/* Do a topological sort of the paths. This will throw an
|
||||||
error if a cycle is detected and roll back the
|
error if a cycle is detected and roll back the
|
||||||
transaction. Cycles can only occur when a derivation
|
transaction. Cycles can only occur when a derivation
|
||||||
has multiple outputs. */
|
has multiple outputs. */
|
||||||
topoSortPaths(paths);
|
topoSort(paths,
|
||||||
|
{[&](const StorePath & path) {
|
||||||
|
auto i = infos.find(path);
|
||||||
|
return i == infos.end() ? StorePathSet() : i->second.references;
|
||||||
|
}},
|
||||||
|
{[&](const StorePath & path, const StorePath & parent) {
|
||||||
|
return BuildError(
|
||||||
|
"cycle detected in the references of '%s' from '%s'",
|
||||||
|
printStorePath(path),
|
||||||
|
printStorePath(parent));
|
||||||
|
}});
|
||||||
|
|
||||||
txn.commit();
|
txn.commit();
|
||||||
});
|
});
|
||||||
|
@ -1018,7 +1078,7 @@ void LocalStore::invalidatePath(State & state, const StorePath & path)
|
||||||
{
|
{
|
||||||
debug("invalidating path '%s'", printStorePath(path));
|
debug("invalidating path '%s'", printStorePath(path));
|
||||||
|
|
||||||
state.stmtInvalidatePath.use()(printStorePath(path)).exec();
|
state.stmts->InvalidatePath.use()(printStorePath(path)).exec();
|
||||||
|
|
||||||
/* Note that the foreign key constraints on the Refs table take
|
/* Note that the foreign key constraints on the Refs table take
|
||||||
care of deleting the references entries for `path'. */
|
care of deleting the references entries for `path'. */
|
||||||
|
@ -1083,11 +1143,11 @@ void LocalStore::addToStore(const ValidPathInfo & info, Source & source,
|
||||||
auto hashResult = hashSink->finish();
|
auto hashResult = hashSink->finish();
|
||||||
|
|
||||||
if (hashResult.first != info.narHash)
|
if (hashResult.first != info.narHash)
|
||||||
throw Error("hash mismatch importing path '%s';\n wanted: %s\n got: %s",
|
throw Error("hash mismatch importing path '%s';\n specified: %s\n got: %s",
|
||||||
printStorePath(info.path), info.narHash.to_string(Base32, true), hashResult.first.to_string(Base32, true));
|
printStorePath(info.path), info.narHash.to_string(Base32, true), hashResult.first.to_string(Base32, true));
|
||||||
|
|
||||||
if (hashResult.second != info.narSize)
|
if (hashResult.second != info.narSize)
|
||||||
throw Error("size mismatch importing path '%s';\n wanted: %s\n got: %s",
|
throw Error("size mismatch importing path '%s';\n specified: %s\n got: %s",
|
||||||
printStorePath(info.path), info.narSize, hashResult.second);
|
printStorePath(info.path), info.narSize, hashResult.second);
|
||||||
|
|
||||||
autoGC();
|
autoGC();
|
||||||
|
@ -1131,7 +1191,7 @@ StorePath LocalStore::addToStoreFromDump(Source & source0, const string & name,
|
||||||
dump.resize(oldSize + want);
|
dump.resize(oldSize + want);
|
||||||
auto got = 0;
|
auto got = 0;
|
||||||
try {
|
try {
|
||||||
got = source.read((uint8_t *) dump.data() + oldSize, want);
|
got = source.read(dump.data() + oldSize, want);
|
||||||
} catch (EndOfFile &) {
|
} catch (EndOfFile &) {
|
||||||
inMemory = true;
|
inMemory = true;
|
||||||
break;
|
break;
|
||||||
|
@ -1584,5 +1644,18 @@ void LocalStore::createUser(const std::string & userName, uid_t userId)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
std::optional<const Realisation> LocalStore::queryRealisation(
|
||||||
|
const DrvOutput& id) {
|
||||||
|
typedef std::optional<const Realisation> Ret;
|
||||||
|
return retrySQLite<Ret>([&]() -> Ret {
|
||||||
|
auto state(_state.lock());
|
||||||
|
auto use(state->stmts->QueryRealisedOutput.use()(id.strHash())(
|
||||||
|
id.outputName));
|
||||||
|
if (!use.next())
|
||||||
|
return std::nullopt;
|
||||||
|
auto outputPath = parseStorePath(use.getStr(0));
|
||||||
|
return Ret{
|
||||||
|
Realisation{.id = id, .outPath = outputPath}};
|
||||||
|
});
|
||||||
}
|
}
|
||||||
|
} // namespace nix
|
||||||
|
|
|
@ -55,19 +55,8 @@ private:
|
||||||
/* The SQLite database object. */
|
/* The SQLite database object. */
|
||||||
SQLite db;
|
SQLite db;
|
||||||
|
|
||||||
/* Some precompiled SQLite statements. */
|
struct Stmts;
|
||||||
SQLiteStmt stmtRegisterValidPath;
|
std::unique_ptr<Stmts> stmts;
|
||||||
SQLiteStmt stmtUpdatePathInfo;
|
|
||||||
SQLiteStmt stmtAddReference;
|
|
||||||
SQLiteStmt stmtQueryPathInfo;
|
|
||||||
SQLiteStmt stmtQueryReferences;
|
|
||||||
SQLiteStmt stmtQueryReferrers;
|
|
||||||
SQLiteStmt stmtInvalidatePath;
|
|
||||||
SQLiteStmt stmtAddDerivationOutput;
|
|
||||||
SQLiteStmt stmtQueryValidDerivers;
|
|
||||||
SQLiteStmt stmtQueryDerivationOutputs;
|
|
||||||
SQLiteStmt stmtQueryPathFromHashPart;
|
|
||||||
SQLiteStmt stmtQueryValidPaths;
|
|
||||||
|
|
||||||
/* The file to which we write our temporary roots. */
|
/* The file to which we write our temporary roots. */
|
||||||
AutoCloseFD fdTempRoots;
|
AutoCloseFD fdTempRoots;
|
||||||
|
@ -90,7 +79,7 @@ private:
|
||||||
std::unique_ptr<PublicKeys> publicKeys;
|
std::unique_ptr<PublicKeys> publicKeys;
|
||||||
};
|
};
|
||||||
|
|
||||||
Sync<State, std::recursive_mutex> _state;
|
Sync<State> _state;
|
||||||
|
|
||||||
public:
|
public:
|
||||||
|
|
||||||
|
@ -138,7 +127,7 @@ public:
|
||||||
|
|
||||||
StorePathSet queryValidDerivers(const StorePath & path) override;
|
StorePathSet queryValidDerivers(const StorePath & path) override;
|
||||||
|
|
||||||
std::map<std::string, std::optional<StorePath>> queryPartialDerivationOutputMap(const StorePath & path) override;
|
std::map<std::string, std::optional<StorePath>> queryDerivationOutputMapNoResolve(const StorePath & path) override;
|
||||||
|
|
||||||
std::optional<StorePath> queryPathFromHashPart(const std::string & hashPart) override;
|
std::optional<StorePath> queryPathFromHashPart(const std::string & hashPart) override;
|
||||||
|
|
||||||
|
@ -219,6 +208,13 @@ public:
|
||||||
garbage until it exceeds maxFree. */
|
garbage until it exceeds maxFree. */
|
||||||
void autoGC(bool sync = true);
|
void autoGC(bool sync = true);
|
||||||
|
|
||||||
|
/* Register the store path 'output' as the output named 'outputName' of
|
||||||
|
derivation 'deriver'. */
|
||||||
|
void registerDrvOutput(const Realisation & info) override;
|
||||||
|
void cacheDrvOutputMapping(State & state, const uint64_t deriver, const string & outputName, const StorePath & output);
|
||||||
|
|
||||||
|
std::optional<const Realisation> queryRealisation(const DrvOutput&) override;
|
||||||
|
|
||||||
private:
|
private:
|
||||||
|
|
||||||
int getSchema();
|
int getSchema();
|
||||||
|
@ -287,17 +283,12 @@ private:
|
||||||
specified by the ‘secret-key-files’ option. */
|
specified by the ‘secret-key-files’ option. */
|
||||||
void signPathInfo(ValidPathInfo & info);
|
void signPathInfo(ValidPathInfo & info);
|
||||||
|
|
||||||
/* Register the store path 'output' as the output named 'outputName' of
|
|
||||||
derivation 'deriver'. */
|
|
||||||
void linkDeriverToPath(const StorePath & deriver, const string & outputName, const StorePath & output);
|
|
||||||
void linkDeriverToPath(State & state, uint64_t deriver, const string & outputName, const StorePath & output);
|
|
||||||
|
|
||||||
Path getRealStoreDir() override { return realStoreDir; }
|
Path getRealStoreDir() override { return realStoreDir; }
|
||||||
|
|
||||||
void createUser(const std::string & userName, uid_t userId) override;
|
void createUser(const std::string & userName, uid_t userId) override;
|
||||||
|
|
||||||
friend class DerivationGoal;
|
friend struct DerivationGoal;
|
||||||
friend class SubstitutionGoal;
|
friend struct SubstitutionGoal;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -4,7 +4,7 @@ libstore_NAME = libnixstore
|
||||||
|
|
||||||
libstore_DIR := $(d)
|
libstore_DIR := $(d)
|
||||||
|
|
||||||
libstore_SOURCES := $(wildcard $(d)/*.cc $(d)/builtins/*.cc)
|
libstore_SOURCES := $(wildcard $(d)/*.cc $(d)/builtins/*.cc $(d)/build/*.cc)
|
||||||
|
|
||||||
libstore_LIBS = libutil
|
libstore_LIBS = libutil
|
||||||
|
|
||||||
|
@ -32,7 +32,7 @@ ifeq ($(HAVE_SECCOMP), 1)
|
||||||
endif
|
endif
|
||||||
|
|
||||||
libstore_CXXFLAGS += \
|
libstore_CXXFLAGS += \
|
||||||
-I src/libutil -I src/libstore \
|
-I src/libutil -I src/libstore -I src/libstore/build \
|
||||||
-DNIX_PREFIX=\"$(prefix)\" \
|
-DNIX_PREFIX=\"$(prefix)\" \
|
||||||
-DNIX_STORE_DIR=\"$(storedir)\" \
|
-DNIX_STORE_DIR=\"$(storedir)\" \
|
||||||
-DNIX_DATA_DIR=\"$(datadir)\" \
|
-DNIX_DATA_DIR=\"$(datadir)\" \
|
||||||
|
@ -48,7 +48,7 @@ ifneq ($(sandbox_shell),)
|
||||||
libstore_CXXFLAGS += -DSANDBOX_SHELL="\"$(sandbox_shell)\""
|
libstore_CXXFLAGS += -DSANDBOX_SHELL="\"$(sandbox_shell)\""
|
||||||
endif
|
endif
|
||||||
|
|
||||||
$(d)/local-store.cc: $(d)/schema.sql.gen.hh
|
$(d)/local-store.cc: $(d)/schema.sql.gen.hh $(d)/ca-specific-schema.sql.gen.hh
|
||||||
|
|
||||||
$(d)/build.cc:
|
$(d)/build.cc:
|
||||||
|
|
||||||
|
@ -58,9 +58,12 @@ $(d)/build.cc:
|
||||||
@echo ')foo"' >> $@.tmp
|
@echo ')foo"' >> $@.tmp
|
||||||
@mv $@.tmp $@
|
@mv $@.tmp $@
|
||||||
|
|
||||||
clean-files += $(d)/schema.sql.gen.hh
|
clean-files += $(d)/schema.sql.gen.hh $(d)/ca-specific-schema.sql.gen.hh
|
||||||
|
|
||||||
$(eval $(call install-file-in, $(d)/nix-store.pc, $(prefix)/lib/pkgconfig, 0644))
|
$(eval $(call install-file-in, $(d)/nix-store.pc, $(prefix)/lib/pkgconfig, 0644))
|
||||||
|
|
||||||
$(foreach i, $(wildcard src/libstore/builtins/*.hh), \
|
$(foreach i, $(wildcard src/libstore/builtins/*.hh), \
|
||||||
$(eval $(call install-file-in, $(i), $(includedir)/nix/builtins, 0644)))
|
$(eval $(call install-file-in, $(i), $(includedir)/nix/builtins, 0644)))
|
||||||
|
|
||||||
|
$(foreach i, $(wildcard src/libstore/build/*.hh), \
|
||||||
|
$(eval $(call install-file-in, $(i), $(includedir)/nix/build, 0644)))
|
||||||
|
|
93
src/libstore/lock.cc
Normal file
93
src/libstore/lock.cc
Normal file
|
@ -0,0 +1,93 @@
|
||||||
|
#include "lock.hh"
|
||||||
|
#include "globals.hh"
|
||||||
|
#include "pathlocks.hh"
|
||||||
|
|
||||||
|
#include <grp.h>
|
||||||
|
#include <pwd.h>
|
||||||
|
|
||||||
|
#include <fcntl.h>
|
||||||
|
#include <unistd.h>
|
||||||
|
|
||||||
|
namespace nix {
|
||||||
|
|
||||||
|
UserLock::UserLock()
|
||||||
|
{
|
||||||
|
assert(settings.buildUsersGroup != "");
|
||||||
|
createDirs(settings.nixStateDir + "/userpool");
|
||||||
|
}
|
||||||
|
|
||||||
|
bool UserLock::findFreeUser() {
|
||||||
|
if (enabled()) return true;
|
||||||
|
|
||||||
|
/* Get the members of the build-users-group. */
|
||||||
|
struct group * gr = getgrnam(settings.buildUsersGroup.get().c_str());
|
||||||
|
if (!gr)
|
||||||
|
throw Error("the group '%1%' specified in 'build-users-group' does not exist",
|
||||||
|
settings.buildUsersGroup);
|
||||||
|
gid = gr->gr_gid;
|
||||||
|
|
||||||
|
/* Copy the result of getgrnam. */
|
||||||
|
Strings users;
|
||||||
|
for (char * * p = gr->gr_mem; *p; ++p) {
|
||||||
|
debug("found build user '%1%'", *p);
|
||||||
|
users.push_back(*p);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (users.empty())
|
||||||
|
throw Error("the build users group '%1%' has no members",
|
||||||
|
settings.buildUsersGroup);
|
||||||
|
|
||||||
|
/* Find a user account that isn't currently in use for another
|
||||||
|
build. */
|
||||||
|
for (auto & i : users) {
|
||||||
|
debug("trying user '%1%'", i);
|
||||||
|
|
||||||
|
struct passwd * pw = getpwnam(i.c_str());
|
||||||
|
if (!pw)
|
||||||
|
throw Error("the user '%1%' in the group '%2%' does not exist",
|
||||||
|
i, settings.buildUsersGroup);
|
||||||
|
|
||||||
|
|
||||||
|
fnUserLock = (format("%1%/userpool/%2%") % settings.nixStateDir % pw->pw_uid).str();
|
||||||
|
|
||||||
|
AutoCloseFD fd = open(fnUserLock.c_str(), O_RDWR | O_CREAT | O_CLOEXEC, 0600);
|
||||||
|
if (!fd)
|
||||||
|
throw SysError("opening user lock '%1%'", fnUserLock);
|
||||||
|
|
||||||
|
if (lockFile(fd.get(), ltWrite, false)) {
|
||||||
|
fdUserLock = std::move(fd);
|
||||||
|
user = i;
|
||||||
|
uid = pw->pw_uid;
|
||||||
|
|
||||||
|
/* Sanity check... */
|
||||||
|
if (uid == getuid() || uid == geteuid())
|
||||||
|
throw Error("the Nix user should not be a member of '%1%'",
|
||||||
|
settings.buildUsersGroup);
|
||||||
|
|
||||||
|
#if __linux__
|
||||||
|
/* Get the list of supplementary groups of this build user. This
|
||||||
|
is usually either empty or contains a group such as "kvm". */
|
||||||
|
supplementaryGIDs.resize(10);
|
||||||
|
int ngroups = supplementaryGIDs.size();
|
||||||
|
int err = getgrouplist(pw->pw_name, pw->pw_gid,
|
||||||
|
supplementaryGIDs.data(), &ngroups);
|
||||||
|
if (err == -1)
|
||||||
|
throw Error("failed to get list of supplementary groups for '%1%'", pw->pw_name);
|
||||||
|
|
||||||
|
supplementaryGIDs.resize(ngroups);
|
||||||
|
#endif
|
||||||
|
|
||||||
|
isEnabled = true;
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
void UserLock::kill()
|
||||||
|
{
|
||||||
|
killUser(uid);
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
37
src/libstore/lock.hh
Normal file
37
src/libstore/lock.hh
Normal file
|
@ -0,0 +1,37 @@
|
||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include "sync.hh"
|
||||||
|
#include "types.hh"
|
||||||
|
#include "util.hh"
|
||||||
|
|
||||||
|
namespace nix {
|
||||||
|
|
||||||
|
class UserLock
|
||||||
|
{
|
||||||
|
private:
|
||||||
|
Path fnUserLock;
|
||||||
|
AutoCloseFD fdUserLock;
|
||||||
|
|
||||||
|
bool isEnabled = false;
|
||||||
|
string user;
|
||||||
|
uid_t uid = 0;
|
||||||
|
gid_t gid = 0;
|
||||||
|
std::vector<gid_t> supplementaryGIDs;
|
||||||
|
|
||||||
|
public:
|
||||||
|
UserLock();
|
||||||
|
|
||||||
|
void kill();
|
||||||
|
|
||||||
|
string getUser() { return user; }
|
||||||
|
uid_t getUID() { assert(uid); return uid; }
|
||||||
|
uid_t getGID() { assert(gid); return gid; }
|
||||||
|
std::vector<gid_t> getSupplementaryGIDs() { return supplementaryGIDs; }
|
||||||
|
|
||||||
|
bool findFreeUser();
|
||||||
|
|
||||||
|
bool enabled() { return isEnabled; }
|
||||||
|
|
||||||
|
};
|
||||||
|
|
||||||
|
}
|
|
@ -87,7 +87,7 @@ struct NarAccessor : public FSAccessor
|
||||||
parents.top()->start = pos;
|
parents.top()->start = pos;
|
||||||
}
|
}
|
||||||
|
|
||||||
void receiveContents(unsigned char * data, size_t len) override
|
void receiveContents(std::string_view data) override
|
||||||
{ }
|
{ }
|
||||||
|
|
||||||
void createSymlink(const Path & path, const string & target) override
|
void createSymlink(const Path & path, const string & target) override
|
||||||
|
@ -96,7 +96,7 @@ struct NarAccessor : public FSAccessor
|
||||||
NarMember{FSAccessor::Type::tSymlink, false, 0, 0, target});
|
NarMember{FSAccessor::Type::tSymlink, false, 0, 0, target});
|
||||||
}
|
}
|
||||||
|
|
||||||
size_t read(unsigned char * data, size_t len) override
|
size_t read(char * data, size_t len) override
|
||||||
{
|
{
|
||||||
auto n = source.read(data, len);
|
auto n = source.read(data, len);
|
||||||
pos += n;
|
pos += n;
|
||||||
|
|
|
@ -107,6 +107,6 @@ struct ValidPathInfo
|
||||||
virtual ~ValidPathInfo() { }
|
virtual ~ValidPathInfo() { }
|
||||||
};
|
};
|
||||||
|
|
||||||
typedef list<ValidPathInfo> ValidPathInfos;
|
typedef std::map<StorePath, ValidPathInfo> ValidPathInfos;
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
49
src/libstore/realisation.cc
Normal file
49
src/libstore/realisation.cc
Normal file
|
@ -0,0 +1,49 @@
|
||||||
|
#include "realisation.hh"
|
||||||
|
#include "store-api.hh"
|
||||||
|
#include <nlohmann/json.hpp>
|
||||||
|
|
||||||
|
namespace nix {
|
||||||
|
|
||||||
|
MakeError(InvalidDerivationOutputId, Error);
|
||||||
|
|
||||||
|
DrvOutput DrvOutput::parse(const std::string &strRep) {
|
||||||
|
size_t n = strRep.find("!");
|
||||||
|
if (n == strRep.npos)
|
||||||
|
throw InvalidDerivationOutputId("Invalid derivation output id %s", strRep);
|
||||||
|
|
||||||
|
return DrvOutput{
|
||||||
|
.drvHash = Hash::parseAnyPrefixed(strRep.substr(0, n)),
|
||||||
|
.outputName = strRep.substr(n+1),
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
std::string DrvOutput::to_string() const {
|
||||||
|
return strHash() + "!" + outputName;
|
||||||
|
}
|
||||||
|
|
||||||
|
nlohmann::json Realisation::toJSON() const {
|
||||||
|
return nlohmann::json{
|
||||||
|
{"id", id.to_string()},
|
||||||
|
{"outPath", outPath.to_string()},
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
Realisation Realisation::fromJSON(
|
||||||
|
const nlohmann::json& json,
|
||||||
|
const std::string& whence) {
|
||||||
|
auto getField = [&](std::string fieldName) -> std::string {
|
||||||
|
auto fieldIterator = json.find(fieldName);
|
||||||
|
if (fieldIterator == json.end())
|
||||||
|
throw Error(
|
||||||
|
"Drv output info file '%1%' is corrupt, missing field %2%",
|
||||||
|
whence, fieldName);
|
||||||
|
return *fieldIterator;
|
||||||
|
};
|
||||||
|
|
||||||
|
return Realisation{
|
||||||
|
.id = DrvOutput::parse(getField("id")),
|
||||||
|
.outPath = StorePath(getField("outPath")),
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace nix
|
39
src/libstore/realisation.hh
Normal file
39
src/libstore/realisation.hh
Normal file
|
@ -0,0 +1,39 @@
|
||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include "path.hh"
|
||||||
|
#include <nlohmann/json_fwd.hpp>
|
||||||
|
|
||||||
|
namespace nix {
|
||||||
|
|
||||||
|
struct DrvOutput {
|
||||||
|
// The hash modulo of the derivation
|
||||||
|
Hash drvHash;
|
||||||
|
std::string outputName;
|
||||||
|
|
||||||
|
std::string to_string() const;
|
||||||
|
|
||||||
|
std::string strHash() const
|
||||||
|
{ return drvHash.to_string(Base16, true); }
|
||||||
|
|
||||||
|
static DrvOutput parse(const std::string &);
|
||||||
|
|
||||||
|
bool operator<(const DrvOutput& other) const { return to_pair() < other.to_pair(); }
|
||||||
|
bool operator==(const DrvOutput& other) const { return to_pair() == other.to_pair(); }
|
||||||
|
|
||||||
|
private:
|
||||||
|
// Just to make comparison operators easier to write
|
||||||
|
std::pair<Hash, std::string> to_pair() const
|
||||||
|
{ return std::make_pair(drvHash, outputName); }
|
||||||
|
};
|
||||||
|
|
||||||
|
struct Realisation {
|
||||||
|
DrvOutput id;
|
||||||
|
StorePath outPath;
|
||||||
|
|
||||||
|
nlohmann::json toJSON() const;
|
||||||
|
static Realisation fromJSON(const nlohmann::json& json, const std::string& whence);
|
||||||
|
};
|
||||||
|
|
||||||
|
typedef std::map<DrvOutput, Realisation> DrvOutputs;
|
||||||
|
|
||||||
|
}
|
|
@ -55,25 +55,21 @@ struct RefScanSink : Sink
|
||||||
|
|
||||||
RefScanSink() { }
|
RefScanSink() { }
|
||||||
|
|
||||||
void operator () (const unsigned char * data, size_t len);
|
void operator () (std::string_view data) override
|
||||||
};
|
{
|
||||||
|
|
||||||
|
|
||||||
void RefScanSink::operator () (const unsigned char * data, size_t len)
|
|
||||||
{
|
|
||||||
/* It's possible that a reference spans the previous and current
|
/* It's possible that a reference spans the previous and current
|
||||||
fragment, so search in the concatenation of the tail of the
|
fragment, so search in the concatenation of the tail of the
|
||||||
previous fragment and the start of the current fragment. */
|
previous fragment and the start of the current fragment. */
|
||||||
string s = tail + string((const char *) data, len > refLength ? refLength : len);
|
string s = tail + std::string(data, 0, refLength);
|
||||||
search((const unsigned char *) s.data(), s.size(), hashes, seen);
|
search((const unsigned char *) s.data(), s.size(), hashes, seen);
|
||||||
|
|
||||||
search(data, len, hashes, seen);
|
search((const unsigned char *) data.data(), data.size(), hashes, seen);
|
||||||
|
|
||||||
size_t tailLen = len <= refLength ? len : refLength;
|
size_t tailLen = data.size() <= refLength ? data.size() : refLength;
|
||||||
tail =
|
tail = std::string(tail, tail.size() < refLength - tailLen ? 0 : tail.size() - (refLength - tailLen));
|
||||||
string(tail, tail.size() < refLength - tailLen ? 0 : tail.size() - (refLength - tailLen)) +
|
tail.append({data.data() + data.size() - tailLen, tailLen});
|
||||||
string((const char *) data + len - tailLen, tailLen);
|
}
|
||||||
}
|
};
|
||||||
|
|
||||||
|
|
||||||
std::pair<PathSet, HashResult> scanForReferences(const string & path,
|
std::pair<PathSet, HashResult> scanForReferences(const string & path,
|
||||||
|
@ -129,10 +125,10 @@ RewritingSink::RewritingSink(const std::string & from, const std::string & to, S
|
||||||
assert(from.size() == to.size());
|
assert(from.size() == to.size());
|
||||||
}
|
}
|
||||||
|
|
||||||
void RewritingSink::operator () (const unsigned char * data, size_t len)
|
void RewritingSink::operator () (std::string_view data)
|
||||||
{
|
{
|
||||||
std::string s(prev);
|
std::string s(prev);
|
||||||
s.append((const char *) data, len);
|
s.append(data);
|
||||||
|
|
||||||
size_t j = 0;
|
size_t j = 0;
|
||||||
while ((j = s.find(from, j)) != string::npos) {
|
while ((j = s.find(from, j)) != string::npos) {
|
||||||
|
@ -146,14 +142,14 @@ void RewritingSink::operator () (const unsigned char * data, size_t len)
|
||||||
|
|
||||||
pos += consumed;
|
pos += consumed;
|
||||||
|
|
||||||
if (consumed) nextSink((unsigned char *) s.data(), consumed);
|
if (consumed) nextSink(s.substr(0, consumed));
|
||||||
}
|
}
|
||||||
|
|
||||||
void RewritingSink::flush()
|
void RewritingSink::flush()
|
||||||
{
|
{
|
||||||
if (prev.empty()) return;
|
if (prev.empty()) return;
|
||||||
pos += prev.size();
|
pos += prev.size();
|
||||||
nextSink((unsigned char *) prev.data(), prev.size());
|
nextSink(prev);
|
||||||
prev.clear();
|
prev.clear();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -163,9 +159,9 @@ HashModuloSink::HashModuloSink(HashType ht, const std::string & modulus)
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
void HashModuloSink::operator () (const unsigned char * data, size_t len)
|
void HashModuloSink::operator () (std::string_view data)
|
||||||
{
|
{
|
||||||
rewritingSink(data, len);
|
rewritingSink(data);
|
||||||
}
|
}
|
||||||
|
|
||||||
HashResult HashModuloSink::finish()
|
HashResult HashModuloSink::finish()
|
||||||
|
@ -176,10 +172,8 @@ HashResult HashModuloSink::finish()
|
||||||
NAR with self-references and a NAR with some of the
|
NAR with self-references and a NAR with some of the
|
||||||
self-references already zeroed out do not produce a hash
|
self-references already zeroed out do not produce a hash
|
||||||
collision. FIXME: proof. */
|
collision. FIXME: proof. */
|
||||||
for (auto & pos : rewritingSink.matches) {
|
for (auto & pos : rewritingSink.matches)
|
||||||
auto s = fmt("|%d", pos);
|
hashSink(fmt("|%d", pos));
|
||||||
hashSink((unsigned char *) s.data(), s.size());
|
|
||||||
}
|
|
||||||
|
|
||||||
auto h = hashSink.finish();
|
auto h = hashSink.finish();
|
||||||
return {h.first, rewritingSink.pos};
|
return {h.first, rewritingSink.pos};
|
||||||
|
|
|
@ -19,7 +19,7 @@ struct RewritingSink : Sink
|
||||||
|
|
||||||
RewritingSink(const std::string & from, const std::string & to, Sink & nextSink);
|
RewritingSink(const std::string & from, const std::string & to, Sink & nextSink);
|
||||||
|
|
||||||
void operator () (const unsigned char * data, size_t len) override;
|
void operator () (std::string_view data) override;
|
||||||
|
|
||||||
void flush();
|
void flush();
|
||||||
};
|
};
|
||||||
|
@ -31,7 +31,7 @@ struct HashModuloSink : AbstractHashSink
|
||||||
|
|
||||||
HashModuloSink(HashType ht, const std::string & modulus);
|
HashModuloSink(HashType ht, const std::string & modulus);
|
||||||
|
|
||||||
void operator () (const unsigned char * data, size_t len) override;
|
void operator () (std::string_view data) override;
|
||||||
|
|
||||||
HashResult finish() override;
|
HashResult finish() override;
|
||||||
};
|
};
|
||||||
|
|
|
@ -75,7 +75,7 @@ std::pair<ref<FSAccessor>, Path> RemoteFSAccessor::fetch(const Path & path_)
|
||||||
throw SysError("seeking in '%s'", cacheFile);
|
throw SysError("seeking in '%s'", cacheFile);
|
||||||
|
|
||||||
std::string buf(length, 0);
|
std::string buf(length, 0);
|
||||||
readFull(fd.get(), (unsigned char *) buf.data(), length);
|
readFull(fd.get(), buf.data(), length);
|
||||||
|
|
||||||
return buf;
|
return buf;
|
||||||
});
|
});
|
||||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue