Merge branch 'path-info' into ca-drv-exotic

This commit is contained in:
John Ericson 2022-03-10 16:20:01 +00:00
commit 938650700f
427 changed files with 22834 additions and 36076 deletions

View file

@ -0,0 +1,7 @@
**Release Notes**
Please include relevant [release notes](https://github.com/NixOS/nix/blob/master/doc/manual/src/release-notes/rl-next.md) as needed.
**Testing**
If this issue is a regression or something that should block release, please consider including a test either in the [testsuite](https://github.com/NixOS/nix/tree/master/tests) or as a [hydraJob]( https://github.com/NixOS/nix/blob/master/flake.nix#L396) so that it can be part of the [automatic checks](https://hydra.nixos.org/jobset/nix/master).

26
.github/workflows/backport.yml vendored Normal file
View file

@ -0,0 +1,26 @@
name: Backport
on:
pull_request_target:
types: [closed, labeled]
jobs:
backport:
name: Backport Pull Request
if: github.repository_owner == 'NixOS' && github.event.pull_request.merged == true && (github.event_name != 'labeled' || startsWith('backport', github.event.label.name))
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
with:
ref: ${{ github.event.pull_request.head.sha }}
# required to find all branches
fetch-depth: 0
- name: Create backport PRs
# should be kept in sync with `version`
uses: zeebe-io/backport-action@v0.0.7
with:
# Config README: https://github.com/zeebe-io/backport-action#backport-action
github_token: ${{ secrets.GITHUB_TOKEN }}
github_workspace: ${{ github.workspace }}
pull_description: |-
Bot-based backport to `${target_branch}`, triggered by a label in #${pull_number}.
# should be kept in sync with `uses`
version: v0.0.5

View file

@ -1,20 +1,23 @@
name: "Test" name: "CI"
on: on:
pull_request: pull_request:
push: push:
jobs: jobs:
tests: tests:
needs: [check_cachix] needs: [check_cachix]
strategy: strategy:
matrix: matrix:
os: [ubuntu-latest, macos-latest] os: [ubuntu-latest, macos-latest]
runs-on: ${{ matrix.os }} runs-on: ${{ matrix.os }}
timeout-minutes: 60
steps: steps:
- uses: actions/checkout@v2.3.4 - uses: actions/checkout@v2.4.0
with: with:
fetch-depth: 0 fetch-depth: 0
- uses: cachix/install-nix-action@v14 - uses: cachix/install-nix-action@v16
- run: echo CACHIX_NAME="$(echo $GITHUB_REPOSITORY-install-tests | tr "[A-Z]/" "[a-z]-")" >> $GITHUB_ENV - run: echo CACHIX_NAME="$(echo $GITHUB_REPOSITORY-install-tests | tr "[A-Z]/" "[a-z]-")" >> $GITHUB_ENV
- uses: cachix/cachix-action@v10 - uses: cachix/cachix-action@v10
if: needs.check_cachix.outputs.secret == 'true' if: needs.check_cachix.outputs.secret == 'true'
@ -22,7 +25,8 @@ jobs:
name: '${{ env.CACHIX_NAME }}' name: '${{ env.CACHIX_NAME }}'
signingKey: '${{ secrets.CACHIX_SIGNING_KEY }}' signingKey: '${{ secrets.CACHIX_SIGNING_KEY }}'
authToken: '${{ secrets.CACHIX_AUTH_TOKEN }}' authToken: '${{ secrets.CACHIX_AUTH_TOKEN }}'
- run: nix-build -A checks.$(nix-instantiate --eval -E '(builtins.currentSystem)') - run: nix --experimental-features 'nix-command flakes' flake check -L
check_cachix: check_cachix:
name: Cachix secret present for installer tests name: Cachix secret present for installer tests
runs-on: ubuntu-latest runs-on: ubuntu-latest
@ -34,6 +38,7 @@ jobs:
env: env:
_CACHIX_SECRETS: ${{ secrets.CACHIX_SIGNING_KEY }}${{ secrets.CACHIX_AUTH_TOKEN }} _CACHIX_SECRETS: ${{ secrets.CACHIX_SIGNING_KEY }}${{ secrets.CACHIX_AUTH_TOKEN }}
run: echo "::set-output name=secret::${{ env._CACHIX_SECRETS != '' }}" run: echo "::set-output name=secret::${{ env._CACHIX_SECRETS != '' }}"
installer: installer:
needs: [tests, check_cachix] needs: [tests, check_cachix]
if: github.event_name == 'push' && needs.check_cachix.outputs.secret == 'true' if: github.event_name == 'push' && needs.check_cachix.outputs.secret == 'true'
@ -41,11 +46,11 @@ jobs:
outputs: outputs:
installerURL: ${{ steps.prepare-installer.outputs.installerURL }} installerURL: ${{ steps.prepare-installer.outputs.installerURL }}
steps: steps:
- uses: actions/checkout@v2.3.4 - uses: actions/checkout@v2.4.0
with: with:
fetch-depth: 0 fetch-depth: 0
- run: echo CACHIX_NAME="$(echo $GITHUB_REPOSITORY-install-tests | tr "[A-Z]/" "[a-z]-")" >> $GITHUB_ENV - run: echo CACHIX_NAME="$(echo $GITHUB_REPOSITORY-install-tests | tr "[A-Z]/" "[a-z]-")" >> $GITHUB_ENV
- uses: cachix/install-nix-action@v14 - uses: cachix/install-nix-action@v16
- uses: cachix/cachix-action@v10 - uses: cachix/cachix-action@v10
with: with:
name: '${{ env.CACHIX_NAME }}' name: '${{ env.CACHIX_NAME }}'
@ -53,6 +58,7 @@ jobs:
authToken: '${{ secrets.CACHIX_AUTH_TOKEN }}' authToken: '${{ secrets.CACHIX_AUTH_TOKEN }}'
- id: prepare-installer - id: prepare-installer
run: scripts/prepare-installer-for-github-actions run: scripts/prepare-installer-for-github-actions
installer_test: installer_test:
needs: [installer, check_cachix] needs: [installer, check_cachix]
if: github.event_name == 'push' && needs.check_cachix.outputs.secret == 'true' if: github.event_name == 'push' && needs.check_cachix.outputs.secret == 'true'
@ -61,10 +67,42 @@ jobs:
os: [ubuntu-latest, macos-latest] os: [ubuntu-latest, macos-latest]
runs-on: ${{ matrix.os }} runs-on: ${{ matrix.os }}
steps: steps:
- uses: actions/checkout@v2.3.4 - uses: actions/checkout@v2.4.0
- run: echo CACHIX_NAME="$(echo $GITHUB_REPOSITORY-install-tests | tr "[A-Z]/" "[a-z]-")" >> $GITHUB_ENV - run: echo CACHIX_NAME="$(echo $GITHUB_REPOSITORY-install-tests | tr "[A-Z]/" "[a-z]-")" >> $GITHUB_ENV
- uses: cachix/install-nix-action@v14 - uses: cachix/install-nix-action@v16
with: with:
install_url: '${{needs.installer.outputs.installerURL}}' install_url: '${{needs.installer.outputs.installerURL}}'
install_options: "--tarball-url-prefix https://${{ env.CACHIX_NAME }}.cachix.org/serve" install_options: "--tarball-url-prefix https://${{ env.CACHIX_NAME }}.cachix.org/serve"
- run: nix-instantiate -E 'builtins.currentTime' --eval - run: nix-instantiate -E 'builtins.currentTime' --eval
docker_push_image:
needs: [check_cachix, tests]
if: >-
github.event_name == 'push' &&
github.ref_name == 'master' &&
needs.check_cachix.outputs.secret == 'true'
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2.4.0
with:
fetch-depth: 0
- uses: cachix/install-nix-action@v16
- run: echo CACHIX_NAME="$(echo $GITHUB_REPOSITORY-install-tests | tr "[A-Z]/" "[a-z]-")" >> $GITHUB_ENV
- run: echo NIX_VERSION="$(nix-instantiate --eval -E '(import ./default.nix).defaultPackage.${builtins.currentSystem}.version' | tr -d \")" >> $GITHUB_ENV
- uses: cachix/cachix-action@v10
if: needs.check_cachix.outputs.secret == 'true'
with:
name: '${{ env.CACHIX_NAME }}'
signingKey: '${{ secrets.CACHIX_SIGNING_KEY }}'
authToken: '${{ secrets.CACHIX_AUTH_TOKEN }}'
- run: nix --experimental-features 'nix-command flakes' build .#dockerImage -L
- run: docker load -i ./result/image.tar.gz
- run: docker tag nix:$NIX_VERSION nixos/nix:$NIX_VERSION
- run: docker tag nix:$NIX_VERSION nixos/nix:master
- name: Login to Docker Hub
uses: docker/login-action@v1
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- run: docker push nixos/nix:$NIX_VERSION
- run: docker push nixos/nix:master

16
.github/workflows/hydra_status.yml vendored Normal file
View file

@ -0,0 +1,16 @@
name: Hydra status
on:
schedule:
- cron: "12,42 * * * *"
workflow_dispatch:
jobs:
check_hydra_status:
name: Check Hydra status
if: github.repository_owner == 'NixOS'
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2.4.0
with:
fetch-depth: 0
- run: bash scripts/check-hydra-status.sh

6
.gitignore vendored
View file

@ -26,8 +26,6 @@ perl/Makefile.config
# /scripts/ # /scripts/
/scripts/nix-profile.sh /scripts/nix-profile.sh
/scripts/nix-reduce-build
/scripts/nix-http-export.cgi
/scripts/nix-profile-daemon.sh /scripts/nix-profile-daemon.sh
# /src/libexpr/ # /src/libexpr/
@ -122,3 +120,7 @@ GTAGS
compile_commands.json compile_commands.json
nix-rust/target nix-rust/target
result
.vscode/

View file

@ -1 +1 @@
2.5 2.8.0

View file

@ -16,6 +16,7 @@ LDFLAGS = @LDFLAGS@
LIBARCHIVE_LIBS = @LIBARCHIVE_LIBS@ LIBARCHIVE_LIBS = @LIBARCHIVE_LIBS@
LIBBROTLI_LIBS = @LIBBROTLI_LIBS@ LIBBROTLI_LIBS = @LIBBROTLI_LIBS@
LIBCURL_LIBS = @LIBCURL_LIBS@ LIBCURL_LIBS = @LIBCURL_LIBS@
LOWDOWN_LIBS = @LOWDOWN_LIBS@
OPENSSL_LIBS = @OPENSSL_LIBS@ OPENSSL_LIBS = @OPENSSL_LIBS@
LIBSECCOMP_LIBS = @LIBSECCOMP_LIBS@ LIBSECCOMP_LIBS = @LIBSECCOMP_LIBS@
PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_NAME = @PACKAGE_NAME@

View file

@ -1,8 +1,8 @@
diff --git a/pthread_stop_world.c b/pthread_stop_world.c diff --git a/pthread_stop_world.c b/pthread_stop_world.c
index 1cee6a0b..46c3acd9 100644 index 4b2c429..1fb4c52 100644
--- a/pthread_stop_world.c --- a/pthread_stop_world.c
+++ b/pthread_stop_world.c +++ b/pthread_stop_world.c
@@ -674,6 +674,8 @@ GC_INNER void GC_push_all_stacks(void) @@ -673,6 +673,8 @@ GC_INNER void GC_push_all_stacks(void)
struct GC_traced_stack_sect_s *traced_stack_sect; struct GC_traced_stack_sect_s *traced_stack_sect;
pthread_t self = pthread_self(); pthread_t self = pthread_self();
word total_size = 0; word total_size = 0;
@ -11,7 +11,7 @@ index 1cee6a0b..46c3acd9 100644
if (!EXPECT(GC_thr_initialized, TRUE)) if (!EXPECT(GC_thr_initialized, TRUE))
GC_thr_init(); GC_thr_init();
@@ -723,6 +725,28 @@ GC_INNER void GC_push_all_stacks(void) @@ -722,6 +724,31 @@ GC_INNER void GC_push_all_stacks(void)
hi = p->altstack + p->altstack_size; hi = p->altstack + p->altstack_size;
/* FIXME: Need to scan the normal stack too, but how ? */ /* FIXME: Need to scan the normal stack too, but how ? */
/* FIXME: Assume stack grows down */ /* FIXME: Assume stack grows down */
@ -22,6 +22,9 @@ index 1cee6a0b..46c3acd9 100644
+ if (pthread_attr_getstacksize(&pattr, &stack_limit)) { + if (pthread_attr_getstacksize(&pattr, &stack_limit)) {
+ ABORT("GC_push_all_stacks: pthread_attr_getstacksize failed!"); + ABORT("GC_push_all_stacks: pthread_attr_getstacksize failed!");
+ } + }
+ if (pthread_attr_destroy(&pattr)) {
+ ABORT("GC_push_all_stacks: pthread_attr_destroy failed!");
+ }
+ // When a thread goes into a coroutine, we lose its original sp until + // When a thread goes into a coroutine, we lose its original sp until
+ // control flow returns to the thread. + // control flow returns to the thread.
+ // While in the coroutine, the sp points outside the thread stack, + // While in the coroutine, the sp points outside the thread stack,

View file

@ -188,17 +188,24 @@ PKG_CHECK_MODULES([EDITLINE], [libeditline], [CXXFLAGS="$EDITLINE_CFLAGS $CXXFLA
[AC_MSG_ERROR([Nix requires libeditline; it was not found via pkg-config, but via its header, but required functions do not work. Maybe it is too old? >= 1.14 is required.])]) [AC_MSG_ERROR([Nix requires libeditline; it was not found via pkg-config, but via its header, but required functions do not work. Maybe it is too old? >= 1.14 is required.])])
]) ])
# Look for libsodium, an optional dependency. # Look for libsodium.
PKG_CHECK_MODULES([SODIUM], [libsodium], [CXXFLAGS="$SODIUM_CFLAGS $CXXFLAGS"]) PKG_CHECK_MODULES([SODIUM], [libsodium], [CXXFLAGS="$SODIUM_CFLAGS $CXXFLAGS"])
# Look for libbrotli{enc,dec}. # Look for libbrotli{enc,dec}.
PKG_CHECK_MODULES([LIBBROTLI], [libbrotlienc libbrotlidec], [CXXFLAGS="$LIBBROTLI_CFLAGS $CXXFLAGS"]) PKG_CHECK_MODULES([LIBBROTLI], [libbrotlienc libbrotlidec], [CXXFLAGS="$LIBBROTLI_CFLAGS $CXXFLAGS"])
# Look for libcpuid. # Look for libcpuid.
have_libcpuid=
if test "$machine_name" = "x86_64"; then if test "$machine_name" = "x86_64"; then
PKG_CHECK_MODULES([LIBCPUID], [libcpuid], [CXXFLAGS="$LIBCPUID_CFLAGS $CXXFLAGS"]) AC_ARG_ENABLE([cpuid],
AS_HELP_STRING([--disable-cpuid], [Do not determine microarchitecture levels with libcpuid (relevant to x86_64 only)]))
if test "x$enable_cpuid" != "xno"; then
PKG_CHECK_MODULES([LIBCPUID], [libcpuid],
[CXXFLAGS="$LIBCPUID_CFLAGS $CXXFLAGS"
have_libcpuid=1 have_libcpuid=1
AC_DEFINE([HAVE_LIBCPUID], [1], [Use libcpuid]) AC_DEFINE([HAVE_LIBCPUID], [1], [Use libcpuid])]
)
fi
fi fi
AC_SUBST(HAVE_LIBCPUID, [$have_libcpuid]) AC_SUBST(HAVE_LIBCPUID, [$have_libcpuid])
@ -255,13 +262,17 @@ fi
PKG_CHECK_MODULES([GTEST], [gtest_main]) PKG_CHECK_MODULES([GTEST], [gtest_main])
# Look for nlohmann/json.
PKG_CHECK_MODULES([NLOHMANN_JSON], [nlohmann_json >= 3.9])
# documentation generation switch # documentation generation switch
AC_ARG_ENABLE(doc-gen, AS_HELP_STRING([--disable-doc-gen],[disable documentation generation]), AC_ARG_ENABLE(doc-gen, AS_HELP_STRING([--disable-doc-gen],[disable documentation generation]),
doc_generate=$enableval, doc_generate=yes) doc_generate=$enableval, doc_generate=yes)
AC_SUBST(doc_generate) AC_SUBST(doc_generate)
# Look for lowdown library. # Look for lowdown library.
PKG_CHECK_MODULES([LOWDOWN], [lowdown >= 0.8.0], [CXXFLAGS="$LOWDOWN_CFLAGS $CXXFLAGS"]) PKG_CHECK_MODULES([LOWDOWN], [lowdown >= 0.9.0], [CXXFLAGS="$LOWDOWN_CFLAGS $CXXFLAGS"])
# Setuid installations. # Setuid installations.
AC_CHECK_FUNCS([setresuid setreuid lchown]) AC_CHECK_FUNCS([setresuid setreuid lchown])

View file

@ -1,3 +1,3 @@
(import (fetchTarball https://github.com/edolstra/flake-compat/archive/master.tar.gz) { (import (fetchTarball "https://github.com/edolstra/flake-compat/archive/master.tar.gz") {
src = ./.; src = ./.;
}).defaultNix }).defaultNix

View file

@ -6,9 +6,9 @@ builtins:
concatStrings (map concatStrings (map
(name: (name:
let builtin = builtins.${name}; in let builtin = builtins.${name}; in
"<dt><code>${name} " "<dt id=\"builtins-${name}\"><a href=\"#builtins-${name}\"><code>${name} "
+ concatStringsSep " " (map (s: "<var>${s}</var>") builtin.args) + concatStringsSep " " (map (s: "<var>${s}</var>") builtin.args)
+ "</code></dt>" + "</code></a></dt>"
+ "<dd>\n\n" + "<dd>\n\n"
+ builtin.doc + builtin.doc
+ "\n\n</dd>" + "\n\n</dd>"

View file

@ -8,7 +8,8 @@ concatStrings (map
let option = options.${name}; in let option = options.${name}; in
" - `${name}` \n\n" " - `${name}` \n\n"
+ concatStrings (map (s: " ${s}\n") (splitLines option.description)) + "\n\n" + concatStrings (map (s: " ${s}\n") (splitLines option.description)) + "\n\n"
+ " **Default:** " + ( + (if option.documentDefault
then " **Default:** " + (
if option.value == "" || option.value == [] if option.value == "" || option.value == []
then "*empty*" then "*empty*"
else if isBool option.value else if isBool option.value
@ -19,6 +20,7 @@ concatStrings (map
# JSON, but that converts to "{ }" here. # JSON, but that converts to "{ }" here.
(if isAttrs option.value then "`\"\"`" (if isAttrs option.value then "`\"\"`"
else "`" + toString option.value + "`")) + "\n\n" else "`" + toString option.value + "`")) + "\n\n"
else " **Default:** *machine-specific*\n")
+ (if option.aliases != [] + (if option.aliases != []
then " **Deprecated alias:** " + (concatStringsSep ", " (map (s: "`${s}`") option.aliases)) + "\n\n" then " **Deprecated alias:** " + (concatStringsSep ", " (map (s: "`${s}`") option.aliases)) + "\n\n"
else "") else "")

View file

@ -12,11 +12,13 @@ man-pages := $(foreach n, \
clean-files += $(d)/*.1 $(d)/*.5 $(d)/*.8 clean-files += $(d)/*.1 $(d)/*.5 $(d)/*.8
# Provide a dummy environment for nix, so that it will not access files outside the macOS sandbox. # Provide a dummy environment for nix, so that it will not access files outside the macOS sandbox.
# Set cores to 0 because otherwise nix show-config resolves the cores based on the current machine
dummy-env = env -i \ dummy-env = env -i \
HOME=/dummy \ HOME=/dummy \
NIX_CONF_DIR=/dummy \ NIX_CONF_DIR=/dummy \
NIX_SSL_CERT_FILE=/dummy/no-ca-bundle.crt \ NIX_SSL_CERT_FILE=/dummy/no-ca-bundle.crt \
NIX_STATE_DIR=/dummy NIX_STATE_DIR=/dummy \
NIX_CONFIG='cores = 0'
nix-eval = $(dummy-env) $(bindir)/nix eval --experimental-features nix-command -I nix/corepkgs=corepkgs --store dummy:// --impure --raw nix-eval = $(dummy-env) $(bindir)/nix eval --experimental-features nix-command -I nix/corepkgs=corepkgs --store dummy:// --impure --raw
@ -70,6 +72,7 @@ $(d)/builtins.json: $(bindir)/nix
@mv $@.tmp $@ @mv $@.tmp $@
# Generate the HTML manual. # Generate the HTML manual.
html: $(docdir)/manual/index.html
install: $(docdir)/manual/index.html install: $(docdir)/manual/index.html
# Generate 'nix' manpages. # Generate 'nix' manpages.

View file

@ -9,6 +9,7 @@
- [Prerequisites](installation/prerequisites-source.md) - [Prerequisites](installation/prerequisites-source.md)
- [Obtaining a Source Distribution](installation/obtaining-source.md) - [Obtaining a Source Distribution](installation/obtaining-source.md)
- [Building Nix from Source](installation/building-source.md) - [Building Nix from Source](installation/building-source.md)
- [Using Nix within Docker](installation/installing-docker.md)
- [Security](installation/nix-security.md) - [Security](installation/nix-security.md)
- [Single-User Mode](installation/single-user.md) - [Single-User Mode](installation/single-user.md)
- [Multi-User Mode](installation/multi-user.md) - [Multi-User Mode](installation/multi-user.md)
@ -70,7 +71,11 @@
- [Hacking](contributing/hacking.md) - [Hacking](contributing/hacking.md)
- [CLI guideline](contributing/cli-guideline.md) - [CLI guideline](contributing/cli-guideline.md)
- [Release Notes](release-notes/release-notes.md) - [Release Notes](release-notes/release-notes.md)
- [Release 2.4 (2021-XX-XX)](release-notes/rl-2.4.md) - [Release X.Y (202?-??-??)](release-notes/rl-next.md)
- [Release 2.7 (2022-03-07)](release-notes/rl-2.7.md)
- [Release 2.6 (2022-01-24)](release-notes/rl-2.6.md)
- [Release 2.5 (2021-12-13)](release-notes/rl-2.5.md)
- [Release 2.4 (2021-11-01)](release-notes/rl-2.4.md)
- [Release 2.3 (2019-09-04)](release-notes/rl-2.3.md) - [Release 2.3 (2019-09-04)](release-notes/rl-2.3.md)
- [Release 2.2 (2019-01-11)](release-notes/rl-2.2.md) - [Release 2.2 (2019-01-11)](release-notes/rl-2.2.md)
- [Release 2.1 (2018-09-02)](release-notes/rl-2.1.md) - [Release 2.1 (2018-09-02)](release-notes/rl-2.1.md)

View file

@ -53,8 +53,8 @@ example, the following command allows you to build a derivation for
$ uname $ uname
Linux Linux
$ nix build \ $ nix build --impure \
'(with import <nixpkgs> { system = "x86_64-darwin"; }; runCommand "foo" {} "uname > $out")' \ --expr '(with import <nixpkgs> { system = "x86_64-darwin"; }; runCommand "foo" {} "uname > $out")' \
--builders 'ssh://mac x86_64-darwin' --builders 'ssh://mac x86_64-darwin'
[1/0/1 built, 0.0 MiB DL] building foo on ssh://mac [1/0/1 built, 0.0 MiB DL] building foo on ssh://mac

View file

@ -16,8 +16,9 @@ By default Nix reads settings from the following places:
will be loaded in reverse order. will be loaded in reverse order.
Otherwise it will look for `nix/nix.conf` files in `XDG_CONFIG_DIRS` Otherwise it will look for `nix/nix.conf` files in `XDG_CONFIG_DIRS`
and `XDG_CONFIG_HOME`. If these are unset, it will look in and `XDG_CONFIG_HOME`. If unset, `XDG_CONFIG_DIRS` defaults to
`$HOME/.config/nix.conf`. `/etc/xdg`, and `XDG_CONFIG_HOME` defaults to `$HOME/.config`
as per [XDG Base Directory Specification](https://specifications.freedesktop.org/basedir-spec/basedir-spec-latest.html).
- If `NIX_CONFIG` is set, its contents is treated as the contents of - If `NIX_CONFIG` is set, its contents is treated as the contents of
a configuration file. a configuration file.

View file

@ -238,7 +238,16 @@ a number of possible ways:
## Examples ## Examples
To install a specific version of `gcc` from the active Nix expression: To install a package using a specific attribute path from the active Nix expression:
```console
$ nix-env -iA gcc40mips
installing `gcc-4.0.2'
$ nix-env -iA xorg.xorgserver
installing `xorg-server-1.2.0'
```
To install a specific version of `gcc` using the derivation name:
```console ```console
$ nix-env --install gcc-3.3.2 $ nix-env --install gcc-3.3.2
@ -246,6 +255,9 @@ installing `gcc-3.3.2'
uninstalling `gcc-3.1' uninstalling `gcc-3.1'
``` ```
Using attribute path for selecting a package is preferred,
as it is much faster and there will not be multiple matches.
Note the previously installed version is removed, since Note the previously installed version is removed, since
`--preserve-installed` was not specified. `--preserve-installed` was not specified.
@ -256,13 +268,6 @@ $ nix-env --install gcc
installing `gcc-3.3.2' installing `gcc-3.3.2'
``` ```
To install using a specific attribute:
```console
$ nix-env -i -A gcc40mips
$ nix-env -i -A xorg.xorgserver
```
To install all derivations in the Nix expression `foo.nix`: To install all derivations in the Nix expression `foo.nix`:
```console ```console
@ -374,22 +379,29 @@ For the other flags, see `--install`.
## Examples ## Examples
```console ```console
$ nix-env --upgrade gcc $ nix-env --upgrade -A nixpkgs.gcc
upgrading `gcc-3.3.1' to `gcc-3.4' upgrading `gcc-3.3.1' to `gcc-3.4'
``` ```
When there are no updates available, nothing will happen:
```console ```console
$ nix-env -u gcc-3.3.2 --always (switch to a specific version) $ nix-env --upgrade -A nixpkgs.pan
```
Using `-A` is preferred when possible, as it is faster and unambiguous but
it is also possible to upgrade to a specific version by matching the derivation name:
```console
$ nix-env -u gcc-3.3.2 --always
upgrading `gcc-3.4' to `gcc-3.3.2' upgrading `gcc-3.4' to `gcc-3.3.2'
``` ```
```console To try to upgrade everything
$ nix-env --upgrade pan (matching packages based on the part of the derivation name without version):
(no upgrades available, so nothing happens)
```
```console ```console
$ nix-env -u (try to upgrade everything) $ nix-env -u
upgrading `hello-2.1.2' to `hello-2.1.3' upgrading `hello-2.1.2' to `hello-2.1.3'
upgrading `mozilla-1.2' to `mozilla-1.4' upgrading `mozilla-1.2' to `mozilla-1.4'
``` ```
@ -401,7 +413,7 @@ of a derivation `x` by looking at their respective `name` attributes.
The names (e.g., `gcc-3.3.1` are split into two parts: the package name The names (e.g., `gcc-3.3.1` are split into two parts: the package name
(`gcc`), and the version (`3.3.1`). The version part starts after the (`gcc`), and the version (`3.3.1`). The version part starts after the
first dash not followed by a letter. `x` is considered an upgrade of `y` first dash not followed by a letter. `x` is considered an upgrade of `y`
if their package names match, and the version of `y` is higher that that if their package names match, and the version of `y` is higher than that
of `x`. of `x`.
The versions are compared by splitting them into contiguous components The versions are compared by splitting them into contiguous components

View file

@ -11,8 +11,8 @@
[`--command` *cmd*] [`--command` *cmd*]
[`--run` *cmd*] [`--run` *cmd*]
[`--exclude` *regexp*] [`--exclude` *regexp*]
[--pure] [`--pure`]
[--keep *name*] [`--keep` *name*]
{{`--packages` | `-p`} {*packages* | *expressions*} … | [*path*]} {{`--packages` | `-p`} {*packages* | *expressions*} … | [*path*]}
# Description # Description
@ -101,7 +101,8 @@ The following common options are supported:
- `NIX_BUILD_SHELL`\ - `NIX_BUILD_SHELL`\
Shell used to start the interactive environment. Defaults to the Shell used to start the interactive environment. Defaults to the
`bash` found in `PATH`. `bash` found in `<nixpkgs>`, falling back to the `bash` found in
`PATH` if not found.
# Examples # Examples
@ -110,13 +111,19 @@ shell in which to build it:
```console ```console
$ nix-shell '<nixpkgs>' -A pan $ nix-shell '<nixpkgs>' -A pan
[nix-shell]$ unpackPhase [nix-shell]$ eval ${unpackPhase:-unpackPhase}
[nix-shell]$ cd pan-* [nix-shell]$ cd pan-*
[nix-shell]$ configurePhase [nix-shell]$ eval ${configurePhase:-configurePhase}
[nix-shell]$ buildPhase [nix-shell]$ eval ${buildPhase:-buildPhase}
[nix-shell]$ ./pan/gui/pan [nix-shell]$ ./pan/gui/pan
``` ```
The reason we use form `eval ${configurePhase:-configurePhase}` here is because
those packages that override these phases do so by exporting the overridden
values in the environment variable of the same name.
Here bash is being told to either evaluate the contents of 'configurePhase',
if it exists as a variable, otherwise evaluate the configurePhase function.
To clear the environment first, and do some additional automatic To clear the environment first, and do some additional automatic
initialisation of the interactive shell: initialisation of the interactive shell:

View file

@ -125,7 +125,7 @@ Special exit codes:
- `104`\ - `104`\
Not deterministic, the build succeeded in check mode but the Not deterministic, the build succeeded in check mode but the
resulting output is not binary reproducable. resulting output is not binary reproducible.
With the `--keep-going` flag it's possible for multiple failures to With the `--keep-going` flag it's possible for multiple failures to
occur, in this case the 1xx status codes are or combined using binary occur, in this case the 1xx status codes are or combined using binary
@ -321,8 +321,8 @@ symlink.
This query has one option: This query has one option:
- `--include-outputs` - `--include-outputs`
Also include the output path of store derivations, and their Also include the existing output paths of store derivations,
closures. and their closures.
This query can be used to implement various kinds of deployment. A This query can be used to implement various kinds of deployment. A
*source deployment* is obtained by distributing the closure of a *source deployment* is obtained by distributing the closure of a

View file

@ -162,11 +162,11 @@ Most Nix commands accept the following command-line options:
}: ... }: ...
``` ```
So if you call this Nix expression (e.g., when you do `nix-env -i So if you call this Nix expression (e.g., when you do `nix-env -iA
pkgname`), the function will be called automatically using the pkgname`), the function will be called automatically using the
value [`builtins.currentSystem`](../expressions/builtins.md) for value [`builtins.currentSystem`](../expressions/builtins.md) for
the `system` argument. You can override this using `--arg`, e.g., the `system` argument. You can override this using `--arg`, e.g.,
`nix-env -i pkgname --arg system \"i686-freebsd\"`. (Note that `nix-env -iA pkgname --arg system \"i686-freebsd\"`. (Note that
since the argument is a Nix string literal, you have to escape the since the argument is a Nix string literal, you have to escape the
quotes.) quotes.)

View file

@ -3,7 +3,7 @@
## Goals ## Goals
Purpose of this document is to provide a clear direction to **help design Purpose of this document is to provide a clear direction to **help design
delightful command line** experience. This document contain guidelines to delightful command line** experience. This document contains guidelines to
follow to ensure a consistent and approachable user experience. follow to ensure a consistent and approachable user experience.
## Overview ## Overview
@ -176,7 +176,7 @@ $ nix init --template=template#pyton
------------------------------------------------------------------------ ------------------------------------------------------------------------
Initializing Nix project at `/path/to/here`. Initializing Nix project at `/path/to/here`.
Select a template for you new project: Select a template for you new project:
|> template#pyton |> template#python
template#python-pip template#python-pip
template#python-poetry template#python-poetry
``` ```
@ -230,8 +230,8 @@ Now **Learn** part of the output is where you educate users. You should only
show it when you know that a build will take some time and not annoy users of show it when you know that a build will take some time and not annoy users of
the builds that take only few seconds. the builds that take only few seconds.
Every feature like this should go though a intensive review and testing to Every feature like this should go through an intensive review and testing to
collect as much a feedback as possible and to fine tune every little detail. If collect as much feedback as possible and to fine tune every little detail. If
done right this can be an awesome features beginners and advance users will done right this can be an awesome features beginners and advance users will
love, but if not done perfectly it will annoy users and leave bad impression. love, but if not done perfectly it will annoy users and leave bad impression.
@ -240,7 +240,7 @@ love, but if not done perfectly it will annoy users and leave bad impression.
Input to a command is provided via `ARGUMENTS` and `OPTIONS`. Input to a command is provided via `ARGUMENTS` and `OPTIONS`.
`ARGUMENTS` represent a required input for a function. When choosing to use `ARGUMENTS` represent a required input for a function. When choosing to use
`ARGUMENT` over function please be aware of the downsides that come with it: `ARGUMENTS` over `OPTIONS` please be aware of the downsides that come with it:
- User will need to remember the order of `ARGUMENTS`. This is not a problem if - User will need to remember the order of `ARGUMENTS`. This is not a problem if
there is only one `ARGUMENT`. there is only one `ARGUMENT`.
@ -253,7 +253,7 @@ developer consider the downsides and choose wisely.
## Naming the `OPTIONS` ## Naming the `OPTIONS`
Then only naming convention - apart from the ones mentioned in Naming the The only naming convention - apart from the ones mentioned in Naming the
`COMMANDS` section is how flags are named. `COMMANDS` section is how flags are named.
Flags are a type of `OPTION` that represent an option that can be turned ON of Flags are a type of `OPTION` that represent an option that can be turned ON of
@ -271,12 +271,12 @@ to improve the discoverability of possible input. A new user will most likely
not know which `ARGUMENTS` and `OPTIONS` are required or which values are not know which `ARGUMENTS` and `OPTIONS` are required or which values are
possible for those options. possible for those options.
In cases, the user might not provide the input or they provide wrong input, In case the user does not provide the input or they provide wrong input,
rather then show the error, prompt a user with an option to find and select rather than show the error, prompt a user with an option to find and select
correct input (see examples). correct input (see examples).
Prompting is of course not required when TTY is not attached to STDIN. This Prompting is of course not required when TTY is not attached to STDIN. This
would mean that scripts wont need to handle prompt, but rather handle errors. would mean that scripts won't need to handle prompt, but rather handle errors.
A place to use prompt and provide user with interactive select A place to use prompt and provide user with interactive select
@ -300,7 +300,7 @@ going to happen.
```shell ```shell
$ nix build --option substitutors https://cache.example.org $ nix build --option substitutors https://cache.example.org
------------------------------------------------------------------------ ------------------------------------------------------------------------
Warning! A security related question need to be answered. Warning! A security related question needs to be answered.
------------------------------------------------------------------------ ------------------------------------------------------------------------
The following substitutors will be used to in `my-project`: The following substitutors will be used to in `my-project`:
- https://cache.example.org - https://cache.example.org
@ -311,14 +311,14 @@ $ nix build --option substitutors https://cache.example.org
# Output # Output
Terminal output can be quite limiting in many ways. Which should forces us to Terminal output can be quite limiting in many ways. Which should force us to
think about the experience even more. As with every design the output is a think about the experience even more. As with every design the output is a
compromise between being terse and being verbose, between showing help to compromise between being terse and being verbose, between showing help to
beginners and annoying advance users. For this it is important that we know beginners and annoying advance users. For this it is important that we know
what are the priorities. what are the priorities.
Nix command line should be first and foremost written with beginners in mind. Nix command line should be first and foremost written with beginners in mind.
But users wont stay beginners for long and what was once useful might quickly But users won't stay beginners for long and what was once useful might quickly
become annoying. There is no golden rule that we can give in this guideline become annoying. There is no golden rule that we can give in this guideline
that would make it easier how to draw a line and find best compromise. that would make it easier how to draw a line and find best compromise.
@ -342,7 +342,7 @@ also allowing them to redirect content to a file. For example:
```shell ```shell
$ nix build > build.txt $ nix build > build.txt
------------------------------------------------------------------------ ------------------------------------------------------------------------
Error! Atrribute `bin` missing at (1:94) from string. Error! Attribute `bin` missing at (1:94) from string.
------------------------------------------------------------------------ ------------------------------------------------------------------------
1| with import <nixpkgs> { }; (pkgs.runCommandCC or pkgs.runCommand) "shell" { buildInputs = [ (surge.bin) ]; } "" 1| with import <nixpkgs> { }; (pkgs.runCommandCC or pkgs.runCommand) "shell" { buildInputs = [ (surge.bin) ]; } ""
@ -508,7 +508,7 @@ can, with a few key strokes, be changed into and advance introspection tool.
### Progress ### Progress
For longer running commands we should provide and overview of the progress. For longer running commands we should provide and overview the progress.
This is shown best in `nix build` example: This is shown best in `nix build` example:
```shell ```shell
@ -553,7 +553,7 @@ going to happen.
```shell ```shell
$ nix build --option substitutors https://cache.example.org $ nix build --option substitutors https://cache.example.org
------------------------------------------------------------------------ ------------------------------------------------------------------------
Warning! A security related question need to be answered. Warning! A security related question needs to be answered.
------------------------------------------------------------------------ ------------------------------------------------------------------------
The following substitutors will be used to in `my-project`: The following substitutors will be used to in `my-project`:
- https://cache.example.org - https://cache.example.org

View file

@ -35,6 +35,25 @@ variables are set up so that those dependencies can be found:
$ nix-shell $ nix-shell
``` ```
or if you have a flake-enabled nix:
```console
$ nix develop
```
To get a shell with a different compilation environment (e.g. stdenv,
gccStdenv, clangStdenv, clang11Stdenv):
```console
$ nix-shell -A devShells.x86_64-linux.clang11StdenvPackages
```
or if you have a flake-enabled nix:
```console
$ nix develop .#clang11StdenvPackages
```
To build Nix itself in this shell: To build Nix itself in this shell:
```console ```console

View file

@ -237,7 +237,7 @@ Derivations can declare some infrequently used optional attributes.
- `preferLocalBuild`\ - `preferLocalBuild`\
If this attribute is set to `true` and [distributed building is If this attribute is set to `true` and [distributed building is
enabled](../advanced-topics/distributed-builds.md), then, if enabled](../advanced-topics/distributed-builds.md), then, if
possible, the derivaton will be built locally instead of forwarded possible, the derivation will be built locally instead of forwarded
to a remote machine. This is appropriate for trivial builders to a remote machine. This is appropriate for trivial builders
where the cost of doing a download or remote build would exceed where the cost of doing a download or remote build would exceed
the cost of building locally. the cost of building locally.

View file

@ -12,5 +12,5 @@ For instance, `derivation` is also available as `builtins.derivation`.
<dl> <dl>
<dt><code>derivation <var>attrs</var></code>; <dt><code>derivation <var>attrs</var></code>;
<code>builtins.derivation <var>attrs</var></code></dt> <code>builtins.derivation <var>attrs</var></code></dt>
<dd><p><var>derivation</var> in described in <dd><p><var>derivation</var> is described in
<a href="derivations.md">its own section</a>.</p></dd> <a href="derivations.md">its own section</a>.</p></dd>

View file

@ -26,7 +26,7 @@ elements (referenced from the figure by number):
called with three arguments: `stdenv`, `fetchurl`, and `perl`. They called with three arguments: `stdenv`, `fetchurl`, and `perl`. They
are needed to build Hello, but we don't know how to build them here; are needed to build Hello, but we don't know how to build them here;
that's why they are function arguments. `stdenv` is a package that that's why they are function arguments. `stdenv` is a package that
is used by almost all Nix Packages packages; it provides a is used by almost all Nix Packages; it provides a
“standard” environment consisting of the things you would expect “standard” environment consisting of the things you would expect
in a basic Unix environment: a C/C++ compiler (GCC, to be precise), in a basic Unix environment: a C/C++ compiler (GCC, to be precise),
the Bash shell, fundamental Unix tools such as `cp`, `grep`, `tar`, the Bash shell, fundamental Unix tools such as `cp`, `grep`, `tar`,

View file

@ -284,6 +284,10 @@ The points of interest are:
function is called with the `localServer` argument set to `true` but function is called with the `localServer` argument set to `true` but
the `db4` argument set to `null`, then the evaluation fails. the `db4` argument set to `null`, then the evaluation fails.
Note that `->` is the [logical
implication](https://en.wikipedia.org/wiki/Truth_table#Logical_implication)
Boolean operation.
2. This is a more subtle condition: if Subversion is built with Apache 2. This is a more subtle condition: if Subversion is built with Apache
(`httpServer`) support, then the Expat library (an XML library) used (`httpServer`) support, then the Expat library (an XML library) used
by Subversion should be same as the one used by Apache. This is by Subversion should be same as the one used by Apache. This is

View file

@ -17,12 +17,12 @@ order of precedence (from strongest to weakest binding).
| String Concatenation | *string1* `+` *string2* | left | String concatenation. | 7 | | String Concatenation | *string1* `+` *string2* | left | String concatenation. | 7 |
| Not | `!` *e* | none | Boolean negation. | 8 | | Not | `!` *e* | none | Boolean negation. | 8 |
| Update | *e1* `//` *e2* | right | Return a set consisting of the attributes in *e1* and *e2* (with the latter taking precedence over the former in case of equally named attributes). | 9 | | Update | *e1* `//` *e2* | right | Return a set consisting of the attributes in *e1* and *e2* (with the latter taking precedence over the former in case of equally named attributes). | 9 |
| Less Than | *e1* `<` *e2*, | none | Arithmetic comparison. | 10 | | Less Than | *e1* `<` *e2*, | none | Arithmetic/lexicographic comparison. | 10 |
| Less Than or Equal To | *e1* `<=` *e2* | none | Arithmetic comparison. | 10 | | Less Than or Equal To | *e1* `<=` *e2* | none | Arithmetic/lexicographic comparison. | 10 |
| Greater Than | *e1* `>` *e2* | none | Arithmetic comparison. | 10 | | Greater Than | *e1* `>` *e2* | none | Arithmetic/lexicographic comparison. | 10 |
| Greater Than or Equal To | *e1* `>=` *e2* | none | Arithmetic comparison. | 10 | | Greater Than or Equal To | *e1* `>=` *e2* | none | Arithmetic/lexicographic comparison. | 10 |
| Equality | *e1* `==` *e2* | none | Equality. | 11 | | Equality | *e1* `==` *e2* | none | Equality. | 11 |
| Inequality | *e1* `!=` *e2* | none | Inequality. | 11 | | Inequality | *e1* `!=` *e2* | none | Inequality. | 11 |
| Logical AND | *e1* `&&` *e2* | left | Logical AND. | 12 | | Logical AND | *e1* `&&` *e2* | left | Logical AND. | 12 |
| Logical OR | *e1* `\|\|` *e2* | left | Logical OR. | 13 | | Logical OR | *e1* <code>&#124;&#124;</code> *e2* | left | Logical OR. | 13 |
| Logical Implication | *e1* `->` *e2* | none | Logical implication (equivalent to `!e1 \|\| e2`). | 14 | | Logical Implication | *e1* `->` *e2* | none | Logical implication (equivalent to <code>!e1 &#124;&#124; e2</code>). | 14 |

View file

@ -64,7 +64,7 @@ Nix has the following basic data types:
the start of each line. To be precise, it strips from each line a the start of each line. To be precise, it strips from each line a
number of spaces equal to the minimal indentation of the string as a number of spaces equal to the minimal indentation of the string as a
whole (disregarding the indentation of empty lines). For instance, whole (disregarding the indentation of empty lines). For instance,
the first and second line are indented two space, while the third the first and second line are indented two spaces, while the third
line is indented four spaces. Thus, two spaces are stripped from line is indented four spaces. Thus, two spaces are stripped from
each line, so the resulting string is each line, so the resulting string is

View file

@ -1,6 +1,6 @@
# Building and Testing # Building and Testing
You can now try to build Hello. Of course, you could do `nix-env -i You can now try to build Hello. Of course, you could do `nix-env -f . -iA
hello`, but you may not want to install a possibly broken package just hello`, but you may not want to install a possibly broken package just
yet. The best way to test the package is by using the command yet. The best way to test the package is by using the command
`nix-build`, which builds a Nix expression and creates a symlink named `nix-build`, which builds a Nix expression and creates a symlink named

View file

@ -98,3 +98,7 @@
store. It can contain regular files, directories and symbolic store. It can contain regular files, directories and symbolic
links. NARs are generated and unpacked using `nix-store --dump` links. NARs are generated and unpacked using `nix-store --dump`
and `nix-store --restore`. and `nix-store --restore`.
- `∅` \
The empty set symbol. In the context of profile history, this denotes a package is not present in a particular version of the profile.
- `ε` \
The epsilon symbol. In the context of a package, this means the version is empty. More precisely, the derivation does not have a version attribute.

View file

@ -1,9 +1,9 @@
# Building Nix from Source # Building Nix from Source
After unpacking or checking out the Nix sources, issue the following After cloning Nix's Git repository, issue the following commands:
commands:
```console ```console
$ ./bootstrap.sh
$ ./configure options... $ ./configure options...
$ make $ make
$ make install $ make install
@ -11,13 +11,6 @@ $ make install
Nix requires GNU Make so you may need to invoke `gmake` instead. Nix requires GNU Make so you may need to invoke `gmake` instead.
When building from the Git repository, these should be preceded by the
command:
```console
$ ./bootstrap.sh
```
The installation path can be specified by passing the `--prefix=prefix` The installation path can be specified by passing the `--prefix=prefix`
to `configure`. The default installation directory is `/usr/local`. You to `configure`. The default installation directory is `/usr/local`. You
can change this to any location you like. You must have write permission can change this to any location you like. You must have write permission

View file

@ -119,6 +119,30 @@ this to run the installer, but it may help if you run into trouble:
- update `/etc/synthetic.conf` to direct macOS to create a "synthetic" - update `/etc/synthetic.conf` to direct macOS to create a "synthetic"
empty root directory to mount your volume empty root directory to mount your volume
- specify mount options for the volume in `/etc/fstab` - specify mount options for the volume in `/etc/fstab`
- `rw`: read-write
- `noauto`: prevent the system from auto-mounting the volume (so the
LaunchDaemon mentioned below can control mounting it, and to avoid
masking problems with that mounting service).
- `nobrowse`: prevent the Nix Store volume from showing up on your
desktop; also keeps Spotlight from spending resources to index
this volume
<!-- TODO:
- `suid`: honor setuid? surely not? ...
- `owners`: honor file ownership on the volume
For now I'll avoid pretending to understand suid/owners more
than I do. There've been some vague reports of file-ownership
and permission issues, particularly in cloud/VM/headless setups.
My pet theory is that this has something to do with these setups
not having a token that gets delegated to initial/admin accounts
on macOS. See scripts/create-darwin-volume.sh for a little more.
In any case, by Dec 4 2021, it _seems_ like some combination of
suid, owners, and calling diskutil enableOwnership have stopped
new reports from coming in. But I hesitate to celebrate because we
haven't really named and catalogued the behavior, understood what
we're fixing, and validated that all 3 components are essential.
-->
- if you have FileVault enabled - if you have FileVault enabled
- generate an encryption password - generate an encryption password
- put it in your system Keychain - put it in your system Keychain

View file

@ -0,0 +1,59 @@
# Using Nix within Docker
To run the latest stable release of Nix with Docker run the following command:
```console
$ docker run -ti nixos/nix
Unable to find image 'nixos/nix:latest' locally
latest: Pulling from nixos/nix
5843afab3874: Pull complete
b52bf13f109c: Pull complete
1e2415612aa3: Pull complete
Digest: sha256:27f6e7f60227e959ee7ece361f75d4844a40e1cc6878b6868fe30140420031ff
Status: Downloaded newer image for nixos/nix:latest
35ca4ada6e96:/# nix --version
nix (Nix) 2.3.12
35ca4ada6e96:/# exit
```
# What is included in Nix's Docker image?
The official Docker image is created using `pkgs.dockerTools.buildLayeredImage`
(and not with `Dockerfile` as it is usual with Docker images). You can still
base your custom Docker image on it as you would do with any other Docker
image.
The Docker image is also not based on any other image and includes minimal set
of runtime dependencies that are required to use Nix:
- pkgs.nix
- pkgs.bashInteractive
- pkgs.coreutils-full
- pkgs.gnutar
- pkgs.gzip
- pkgs.gnugrep
- pkgs.which
- pkgs.curl
- pkgs.less
- pkgs.wget
- pkgs.man
- pkgs.cacert.out
- pkgs.findutils
# Docker image with the latest development version of Nix
To get the latest image that was built by [Hydra](https://hydra.nixos.org) run
the following command:
```console
$ curl -L https://hydra.nixos.org/job/nix/master/dockerImage.x86_64-linux/latest/download/1 | docker load
$ docker run -ti nix:2.5pre20211105
```
You can also build a Docker image from source yourself:
```console
$ nix build ./\#hydraJobs.dockerImage.x86_64-linux
$ docker load -i ./result/image.tar.gz
$ docker run -ti nix:2.5pre20211105
```

View file

@ -1,4 +1,4 @@
# Installing Nix from Source # Installing Nix from Source
If no binary package is available, you can download and compile a source If no binary package is available or if you want to hack on Nix, you
distribution. can build Nix from its Git repository.

View file

@ -1,14 +1,9 @@
# Obtaining a Source Distribution # Obtaining the Source
The source tarball of the most recent stable release can be downloaded The most recent sources of Nix can be obtained from its [Git
from the [Nix homepage](http://nixos.org/nix/download.html). You can repository](https://github.com/NixOS/nix). For example, the following
also grab the [most recent development command will check out the latest revision into a directory called
release](http://hydra.nixos.org/job/nix/master/release/latest-finished#tabs-constituents). `nix`:
Alternatively, the most recent sources of Nix can be obtained from its
[Git repository](https://github.com/NixOS/nix). For example, the
following command will check out the latest revision into a directory
called `nix`:
```console ```console
$ git clone https://github.com/NixOS/nix $ git clone https://github.com/NixOS/nix

View file

@ -2,9 +2,8 @@
- GNU Autoconf (<https://www.gnu.org/software/autoconf/>) and the - GNU Autoconf (<https://www.gnu.org/software/autoconf/>) and the
autoconf-archive macro collection autoconf-archive macro collection
(<https://www.gnu.org/software/autoconf-archive/>). These are only (<https://www.gnu.org/software/autoconf-archive/>). These are
needed to run the bootstrap script, and are not necessary if your needed to run the bootstrap script.
source distribution came with a pre-built `./configure` script.
- GNU Make. - GNU Make.
@ -45,6 +44,11 @@
obtained from the its repository obtained from the its repository
<https://github.com/troglobit/editline>. <https://github.com/troglobit/editline>.
- The `libsodium` library for verifying cryptographic signatures
of contents fetched from binary caches.
It can be obtained from the official web site
<https://libsodium.org>.
- Recent versions of Bison and Flex to build the parser. (This is - Recent versions of Bison and Flex to build the parser. (This is
because Nix needs GLR support in Bison and reentrancy support in because Nix needs GLR support in Bison and reentrancy support in
Flex.) For Bison, you need version 2.6, which can be obtained from Flex.) For Bison, you need version 2.6, which can be obtained from
@ -52,11 +56,18 @@
you need version 2.5.35, which is available on you need version 2.5.35, which is available on
[SourceForge](http://lex.sourceforge.net/). Slightly older versions [SourceForge](http://lex.sourceforge.net/). Slightly older versions
may also work, but ancient versions like the ubiquitous 2.5.4a may also work, but ancient versions like the ubiquitous 2.5.4a
won't. Note that these are only required if you modify the parser or won't.
when you are building from the Git repository.
- The `libseccomp` is used to provide syscall filtering on Linux. This - The `libseccomp` is used to provide syscall filtering on Linux. This
is an optional dependency and can be disabled passing a is an optional dependency and can be disabled passing a
`--disable-seccomp-sandboxing` option to the `configure` script (Not `--disable-seccomp-sandboxing` option to the `configure` script (Not
recommended unless your system doesn't support `libseccomp`). To get recommended unless your system doesn't support `libseccomp`). To get
the library, visit <https://github.com/seccomp/libseccomp>. the library, visit <https://github.com/seccomp/libseccomp>.
- On 64-bit x86 machines only, `libcpuid` library
is used to determine which microarchitecture levels are supported
(e.g., as whether to have `x86_64-v2-linux` among additional system types).
The library is available from its homepage
<http://libcpuid.sourceforge.net>.
This is an optional dependency and can be disabled
by providing a `--disable-cpuid` to the `configure` script.

View file

@ -4,4 +4,4 @@ Nix is currently supported on the following platforms:
- Linux (i686, x86\_64, aarch64). - Linux (i686, x86\_64, aarch64).
- macOS (x86\_64). - macOS (x86\_64, aarch64).

View file

@ -76,7 +76,7 @@ there after an upgrade. This means that you can _roll back_ to the
old version: old version:
```console ```console
$ nix-env --upgrade some-packages $ nix-env --upgrade -A nixpkgs.some-package
$ nix-env --rollback $ nix-env --rollback
``` ```
@ -122,12 +122,12 @@ Nix expressions generally describe how to build a package from
source, so an installation action like source, so an installation action like
```console ```console
$ nix-env --install firefox $ nix-env --install -A nixpkgs.firefox
``` ```
_could_ cause quite a bit of build activity, as not only Firefox but _could_ cause quite a bit of build activity, as not only Firefox but
also all its dependencies (all the way up to the C library and the also all its dependencies (all the way up to the C library and the
compiler) would have to built, at least if they are not already in the compiler) would have to be built, at least if they are not already in the
Nix store. This is a _source deployment model_. For most users, Nix store. This is a _source deployment model_. For most users,
building from source is not very pleasant as it takes far too long. building from source is not very pleasant as it takes far too long.
However, Nix can automatically skip building from source and instead However, Nix can automatically skip building from source and instead

View file

@ -24,7 +24,7 @@ collection; you could write your own Nix expressions based on Nixpkgs,
or completely new ones.) or completely new ones.)
You can manually download the latest version of Nixpkgs from You can manually download the latest version of Nixpkgs from
<http://nixos.org/nixpkgs/download.html>. However, its much more <https://github.com/NixOS/nixpkgs>. However, its much more
convenient to use the Nixpkgs [*channel*](channels.md), since it makes convenient to use the Nixpkgs [*channel*](channels.md), since it makes
it easy to stay up to date with new versions of Nixpkgs. Nixpkgs is it easy to stay up to date with new versions of Nixpkgs. Nixpkgs is
automatically added to your list of “subscribed” channels when you automatically added to your list of “subscribed” channels when you
@ -40,48 +40,52 @@ $ nix-channel --update
> >
> On NixOS, youre automatically subscribed to a NixOS channel > On NixOS, youre automatically subscribed to a NixOS channel
> corresponding to your NixOS major release (e.g. > corresponding to your NixOS major release (e.g.
> <http://nixos.org/channels/nixos-14.12>). A NixOS channel is identical > <http://nixos.org/channels/nixos-21.11>). A NixOS channel is identical
> to the Nixpkgs channel, except that it contains only Linux binaries > to the Nixpkgs channel, except that it contains only Linux binaries
> and is updated only if a set of regression tests succeed. > and is updated only if a set of regression tests succeed.
You can view the set of available packages in Nixpkgs: You can view the set of available packages in Nixpkgs:
```console ```console
$ nix-env -qa $ nix-env -qaP
aterm-2.2 nixpkgs.aterm aterm-2.2
bash-3.0 nixpkgs.bash bash-3.0
binutils-2.15 nixpkgs.binutils binutils-2.15
bison-1.875d nixpkgs.bison bison-1.875d
blackdown-1.4.2 nixpkgs.blackdown blackdown-1.4.2
bzip2-1.0.2 nixpkgs.bzip2 bzip2-1.0.2
``` ```
The flag `-q` specifies a query operation, and `-a` means that you want The flag `-q` specifies a query operation, `-a` means that you want
to show the “available” (i.e., installable) packages, as opposed to the to show the “available” (i.e., installable) packages, as opposed to the
installed packages. If you downloaded Nixpkgs yourself, or if you installed packages, and `-P` prints the attribute paths that can be used
checked it out from GitHub, then you need to pass the path to your to unambiguously select a package for installation (listed in the first column).
Nixpkgs tree using the `-f` flag: If you downloaded Nixpkgs yourself, or if you checked it out from GitHub,
then you need to pass the path to your Nixpkgs tree using the `-f` flag:
```console ```console
$ nix-env -qaf /path/to/nixpkgs $ nix-env -qaPf /path/to/nixpkgs
aterm aterm-2.2
bash bash-3.0
``` ```
where */path/to/nixpkgs* is where youve unpacked or checked out where */path/to/nixpkgs* is where youve unpacked or checked out
Nixpkgs. Nixpkgs.
You can select specific packages by name: You can filter the packages by name:
```console ```console
$ nix-env -qa firefox $ nix-env -qaP firefox
firefox-34.0.5 nixpkgs.firefox-esr firefox-91.3.0esr
firefox-with-plugins-34.0.5 nixpkgs.firefox firefox-94.0.1
``` ```
and using regular expressions: and using regular expressions:
```console ```console
$ nix-env -qa 'firefox.*' $ nix-env -qaP 'firefox.*'
``` ```
It is also possible to see the *status* of available packages, i.e., It is also possible to see the *status* of available packages, i.e.,
@ -89,11 +93,11 @@ whether they are installed into the user environment and/or present in
the system: the system:
```console ```console
$ nix-env -qas $ nix-env -qaPs
-PS bash-3.0 -PS nixpkgs.bash bash-3.0
--S binutils-2.15 --S nixpkgs.binutils binutils-2.15
IPS bison-1.875d IPS nixpkgs.bison bison-1.875d
``` ```
@ -106,13 +110,13 @@ which is Nixs mechanism for doing binary deployment. It just means that
Nix knows that it can fetch a pre-built package from somewhere Nix knows that it can fetch a pre-built package from somewhere
(typically a network server) instead of building it locally. (typically a network server) instead of building it locally.
You can install a package using `nix-env -i`. For instance, You can install a package using `nix-env -iA`. For instance,
```console ```console
$ nix-env -i subversion $ nix-env -iA nixpkgs.subversion
``` ```
will install the package called `subversion` (which is, of course, the will install the package called `subversion` from `nixpkgs` channel (which is, of course, the
[Subversion version management system](http://subversion.tigris.org/)). [Subversion version management system](http://subversion.tigris.org/)).
> **Note** > **Note**
@ -122,7 +126,7 @@ will install the package called `subversion` (which is, of course, the
> binary cache <https://cache.nixos.org>; it contains binaries for most > binary cache <https://cache.nixos.org>; it contains binaries for most
> packages in Nixpkgs. Only if no binary is available in the binary > packages in Nixpkgs. Only if no binary is available in the binary
> cache, Nix will build the package from source. So if `nix-env > cache, Nix will build the package from source. So if `nix-env
> -i subversion` results in Nix building stuff from source, then either > -iA nixpkgs.subversion` results in Nix building stuff from source, then either
> the package is not built for your platform by the Nixpkgs build > the package is not built for your platform by the Nixpkgs build
> servers, or your version of Nixpkgs is too old or too new. For > servers, or your version of Nixpkgs is too old or too new. For
> instance, if you have a very recent checkout of Nixpkgs, then the > instance, if you have a very recent checkout of Nixpkgs, then the
@ -133,7 +137,10 @@ will install the package called `subversion` (which is, of course, the
> using a Git checkout of the Nixpkgs tree), you will get binaries for > using a Git checkout of the Nixpkgs tree), you will get binaries for
> most packages. > most packages.
Naturally, packages can also be uninstalled: Naturally, packages can also be uninstalled. Unlike when installing, you will
need to use the derivation name (though the version part can be omitted),
instead of the attribute path, as `nix-env` does not record which attribute
was used for installing:
```console ```console
$ nix-env -e subversion $ nix-env -e subversion
@ -143,7 +150,7 @@ Upgrading to a new version is just as easy. If you have a new release of
Nix Packages, you can do: Nix Packages, you can do:
```console ```console
$ nix-env -u subversion $ nix-env -uA nixpkgs.subversion
``` ```
This will *only* upgrade Subversion if there is a “newer” version in the This will *only* upgrade Subversion if there is a “newer” version in the

View file

@ -9,7 +9,7 @@ The daemon that handles binary cache requests via HTTP, `nix-serve`, is
not part of the Nix distribution, but you can install it from Nixpkgs: not part of the Nix distribution, but you can install it from Nixpkgs:
```console ```console
$ nix-env -i nix-serve $ nix-env -iA nixpkgs.nix-serve
``` ```
You can then start the server, listening for HTTP connections on You can then start the server, listening for HTTP connections on
@ -35,7 +35,7 @@ On the client side, you can tell Nix to use your binary cache using
`--option extra-binary-caches`, e.g.: `--option extra-binary-caches`, e.g.:
```console ```console
$ nix-env -i firefox --option extra-binary-caches http://avalon:8080/ $ nix-env -iA nixpkgs.firefox --option extra-binary-caches http://avalon:8080/
``` ```
The option `extra-binary-caches` tells Nix to use this binary cache in The option `extra-binary-caches` tells Nix to use this binary cache in

View file

@ -44,7 +44,7 @@ collector as follows:
$ nix-store --gc $ nix-store --gc
``` ```
The behaviour of the gargage collector is affected by the The behaviour of the garbage collector is affected by the
`keep-derivations` (default: true) and `keep-outputs` (default: false) `keep-derivations` (default: true) and `keep-outputs` (default: false)
options in the Nix configuration file. The defaults will ensure that all options in the Nix configuration file. The defaults will ensure that all
derivations that are build-time dependencies of garbage collector roots derivations that are build-time dependencies of garbage collector roots

View file

@ -39,7 +39,7 @@ just Subversion 1.1.2 (arrows in the figure indicate symlinks). This
would be what we would obtain if we had done would be what we would obtain if we had done
```console ```console
$ nix-env -i subversion $ nix-env -iA nixpkgs.subversion
``` ```
on a set of Nix expressions that contained Subversion 1.1.2. on a set of Nix expressions that contained Subversion 1.1.2.
@ -54,7 +54,7 @@ environment is generated based on the current one. For instance,
generation 43 was created from generation 42 when we did generation 43 was created from generation 42 when we did
```console ```console
$ nix-env -i subversion firefox $ nix-env -iA nixpkgs.subversion nixpkgs.firefox
``` ```
on a set of Nix expressions that contained Firefox and a new version of on a set of Nix expressions that contained Firefox and a new version of
@ -127,7 +127,7 @@ All `nix-env` operations work on the profile pointed to by
(abbreviation `-p`): (abbreviation `-p`):
```console ```console
$ nix-env -p /nix/var/nix/profiles/other-profile -i subversion $ nix-env -p /nix/var/nix/profiles/other-profile -iA nixpkgs.subversion
``` ```
This will *not* change the `~/.nix-profile` symlink. This will *not* change the `~/.nix-profile` symlink.

View file

@ -6,7 +6,7 @@ automatically fetching any store paths in Firefoxs closure if they are
available on the server `avalon`: available on the server `avalon`:
```console ```console
$ nix-env -i firefox --substituters ssh://alice@avalon $ nix-env -iA nixpkgs.firefox --substituters ssh://alice@avalon
``` ```
This works similar to the binary cache substituter that Nix usually This works similar to the binary cache substituter that Nix usually

View file

@ -19,19 +19,19 @@ to subsequent chapters.
channel: channel:
```console ```console
$ nix-env -qa $ nix-env -qaP
docbook-xml-4.3 nixpkgs.docbook_xml_dtd_43 docbook-xml-4.3
docbook-xml-4.5 nixpkgs.docbook_xml_dtd_45 docbook-xml-4.5
firefox-33.0.2 nixpkgs.firefox firefox-33.0.2
hello-2.9 nixpkgs.hello hello-2.9
libxslt-1.1.28 nixpkgs.libxslt libxslt-1.1.28
``` ```
1. Install some packages from the channel: 1. Install some packages from the channel:
```console ```console
$ nix-env -i hello $ nix-env -iA nixpkgs.hello
``` ```
This should download pre-built packages; it should not build them This should download pre-built packages; it should not build them

View file

@ -1,4 +1,4 @@
# Release 2.4 (2021-10-XX) # Release 2.4 (2021-11-01)
This is the first release in more than two years and is the result of This is the first release in more than two years and is the result of
more than 2800 commits from 195 contributors since release 2.3. more than 2800 commits from 195 contributors since release 2.3.
@ -276,18 +276,62 @@ more than 2800 commits from 195 contributors since release 2.3.
* Plugins can now register `nix` subcommands. * Plugins can now register `nix` subcommands.
* The `--indirect` flag to `nix-store --add-root` has become a no-op.
`--add-root` will always generate indirect GC roots from now on.
## Incompatible changes ## Incompatible changes
* The `nix` command is now marked as an experimental feature. This * The `nix` command is now marked as an experimental feature. This
means that you need to add means that you need to add
> experimental-features = nix-command ```
experimental-features = nix-command
```
to your `nix.conf` if you want to use it, or pass to your `nix.conf` if you want to use it, or pass
`--extra-experimental-features nix-command` on the command line. `--extra-experimental-features nix-command` on the command line.
* The old `nix run` has been renamed to `nix shell` (and there is a * The `nix` command no longer has a syntax for referring to packages
new `nix run` that does something else, as described above). in a channel. This means that the following no longer works:
```console
nix build nixpkgs.hello # Nix 2.3
```
Instead, you can either use the `#` syntax to select a package from
a flake, e.g.
```console
nix build nixpkgs#hello
```
Or, if you want to use the `nixpkgs` channel in the `NIX_PATH`
environment variable:
```console
nix build -f '<nixpkgs>' hello
```
* The old `nix run` has been renamed to `nix shell`, while there is a
new `nix run` that runs a default command. So instead of
```console
nix run nixpkgs.hello -c hello # Nix 2.3
```
you should use
```console
nix shell nixpkgs#hello -c hello
```
or just
```console
nix run nixpkgs#hello
```
if the command you want to run has the same name as the package.
* It is now an error to modify the `plugin-files` setting via a * It is now an error to modify the `plugin-files` setting via a
command-line flag that appears after the first non-flag argument to command-line flag that appears after the first non-flag argument to
@ -354,6 +398,7 @@ dramforever,
Dustin DeWeese, Dustin DeWeese,
edef, edef,
Eelco Dolstra, Eelco Dolstra,
Ellie Hermaszewska,
Emilio Karakey, Emilio Karakey,
Emily, Emily,
Eric Culp, Eric Culp,
@ -364,7 +409,7 @@ Federico Pellegrin,
Finn Behrens, Finn Behrens,
Florian Franzen, Florian Franzen,
Félix Baylac-Jacqué, Félix Baylac-Jacqué,
Gabriel Gonzalez, Gabriella Gonzalez,
Geoff Reedy, Geoff Reedy,
Georges Dubus, Georges Dubus,
Graham Christensen, Graham Christensen,
@ -387,7 +432,6 @@ Jaroslavas Pocepko,
Jarrett Keifer, Jarrett Keifer,
Jeremy Schlatter, Jeremy Schlatter,
Joachim Breitner, Joachim Breitner,
Joe Hermaszewski,
Joe Pea, Joe Pea,
John Ericson, John Ericson,
Jonathan Ringer, Jonathan Ringer,

View file

@ -1,2 +1,16 @@
# Release 2.5 (2021-XX-XX) # Release 2.5 (2021-12-13)
* The garbage collector no longer blocks new builds, so the message
`waiting for the big garbage collector lock...` is a thing of the
past.
* Binary cache stores now have a setting `compression-level`.
* `nix develop` now has a flag `--unpack` to run `unpackPhase`.
* Lists can now be compared lexicographically using the `<` operator.
* New built-in function: `builtins.groupBy`, with the same functionality as
Nixpkgs' `lib.groupBy`, but faster.
* `nix repl` now has a `:log` command.

View file

@ -0,0 +1,21 @@
# Release 2.6 (2022-01-24)
* The Nix CLI now searches for a `flake.nix` up until the root of the current
Git repository or a filesystem boundary rather than just in the current
directory.
* The TOML parser used by `builtins.fromTOML` has been replaced by [a
more compliant one](https://github.com/ToruNiina/toml11).
* Added `:st`/`:show-trace` commands to `nix repl`, which are used to
set or toggle display of error traces.
* New builtin function `builtins.zipAttrsWith` with the same
functionality as `lib.zipAttrsWith` from Nixpkgs, but much more
efficient.
* New command `nix store copy-log` to copy build logs from one store
to another.
* The `commit-lockfile-summary` option can be set to a non-empty
string to override the commit summary used when commiting an updated
lockfile. This may be used in conjunction with the `nixConfig`
attribute in `flake.nix` to better conform to repository
conventions.
* `docker run -ti nixos/nix:master` will place you in the Docker
container with the latest version of Nix from the `master` branch.

View file

@ -0,0 +1,33 @@
# Release X.Y (2022-03-07)
* Nix will now make some helpful suggestions when you mistype
something on the command line. For instance, if you type `nix build
nixpkgs#thunderbrd`, it will suggest `thunderbird`.
* A number of "default" flake output attributes have been
renamed. These are:
* `defaultPackage.<system>``packages.<system>.default`
* `defaultApps.<system>``apps.<system>.default`
* `defaultTemplate``templates.default`
* `defaultBundler.<system>``bundlers.<system>.default`
* `overlay``overlays.default`
* `devShell.<system>``devShells.<system>.default`
The old flake output attributes still work, but `nix flake check`
will warn about them.
* Breaking API change: `nix bundle` now supports bundlers of the form
`bundler.<system>.<name>= derivation: another-derivation;`. This
supports additional functionality to inspect evaluation information
during bundling. A new
[repository](https://github.com/NixOS/bundlers) has various bundlers
implemented.
* `nix store ping` now reports the version of the remote Nix daemon.
* `nix flake {init,new}` now display information about which files have been
created.
* Templates can now define a `welcomeText` attribute, which is printed out by
`nix flake {init,new} --template <template>`.

View file

@ -0,0 +1 @@
# Release X.Y (202?-??-??)

264
docker.nix Normal file
View file

@ -0,0 +1,264 @@
{ pkgs ? import <nixpkgs> { }
, lib ? pkgs.lib
, name ? "nix"
, tag ? "latest"
, channelName ? "nixpkgs"
, channelURL ? "https://nixos.org/channels/nixpkgs-unstable"
}:
let
defaultPkgs = with pkgs; [
nix
bashInteractive
coreutils-full
gnutar
gzip
gnugrep
which
curl
less
wget
man
cacert.out
findutils
iana-etc
git
];
users = {
root = {
uid = 0;
shell = "/bin/bash";
home = "/root";
gid = 0;
};
} // lib.listToAttrs (
map
(
n: {
name = "nixbld${toString n}";
value = {
uid = 30000 + n;
gid = 30000;
groups = [ "nixbld" ];
description = "Nix build user ${toString n}";
};
}
)
(lib.lists.range 1 32)
);
groups = {
root.gid = 0;
nixbld.gid = 30000;
};
userToPasswd = (
k:
{ uid
, gid ? 65534
, home ? "/var/empty"
, description ? ""
, shell ? "/bin/false"
, groups ? [ ]
}: "${k}:x:${toString uid}:${toString gid}:${description}:${home}:${shell}"
);
passwdContents = (
lib.concatStringsSep "\n"
(lib.attrValues (lib.mapAttrs userToPasswd users))
);
userToShadow = k: { ... }: "${k}:!:1::::::";
shadowContents = (
lib.concatStringsSep "\n"
(lib.attrValues (lib.mapAttrs userToShadow users))
);
# Map groups to members
# {
# group = [ "user1" "user2" ];
# }
groupMemberMap = (
let
# Create a flat list of user/group mappings
mappings = (
builtins.foldl'
(
acc: user:
let
groups = users.${user}.groups or [ ];
in
acc ++ map
(group: {
inherit user group;
})
groups
)
[ ]
(lib.attrNames users)
);
in
(
builtins.foldl'
(
acc: v: acc // {
${v.group} = acc.${v.group} or [ ] ++ [ v.user ];
}
)
{ }
mappings)
);
groupToGroup = k: { gid }:
let
members = groupMemberMap.${k} or [ ];
in
"${k}:x:${toString gid}:${lib.concatStringsSep "," members}";
groupContents = (
lib.concatStringsSep "\n"
(lib.attrValues (lib.mapAttrs groupToGroup groups))
);
nixConf = {
sandbox = "false";
build-users-group = "nixbld";
trusted-public-keys = "cache.nixos.org-1:6NCHdD59X431o0gWypbMrAURkbJ16ZPMQFGspcDShjY=";
};
nixConfContents = (lib.concatStringsSep "\n" (lib.mapAttrsFlatten (n: v: "${n} = ${v}") nixConf)) + "\n";
baseSystem =
let
nixpkgs = pkgs.path;
channel = pkgs.runCommand "channel-nixos" { } ''
mkdir $out
ln -s ${nixpkgs} $out/nixpkgs
echo "[]" > $out/manifest.nix
'';
rootEnv = pkgs.buildPackages.buildEnv {
name = "root-profile-env";
paths = defaultPkgs;
};
manifest = pkgs.buildPackages.runCommand "manifest.nix" { } ''
cat > $out <<EOF
[
${lib.concatStringsSep "\n" (builtins.map (drv: let
outputs = drv.outputsToInstall or [ "out" ];
in ''
{
${lib.concatStringsSep "\n" (builtins.map (output: ''
${output} = { outPath = "${lib.getOutput output drv}"; };
'') outputs)}
outputs = [ ${lib.concatStringsSep " " (builtins.map (x: "\"${x}\"") outputs)} ];
name = "${drv.name}";
outPath = "${drv}";
system = "${drv.system}";
type = "derivation";
meta = { };
}
'') defaultPkgs)}
]
EOF
'';
profile = pkgs.buildPackages.runCommand "user-environment" { } ''
mkdir $out
cp -a ${rootEnv}/* $out/
ln -s ${manifest} $out/manifest.nix
'';
in
pkgs.runCommand "base-system"
{
inherit passwdContents groupContents shadowContents nixConfContents;
passAsFile = [
"passwdContents"
"groupContents"
"shadowContents"
"nixConfContents"
];
allowSubstitutes = false;
preferLocalBuild = true;
} ''
env
set -x
mkdir -p $out/etc
mkdir -p $out/etc/ssl/certs
ln -s /nix/var/nix/profiles/default/etc/ssl/certs/ca-bundle.crt $out/etc/ssl/certs
cat $passwdContentsPath > $out/etc/passwd
echo "" >> $out/etc/passwd
cat $groupContentsPath > $out/etc/group
echo "" >> $out/etc/group
cat $shadowContentsPath > $out/etc/shadow
echo "" >> $out/etc/shadow
mkdir -p $out/usr
ln -s /nix/var/nix/profiles/share $out/usr/
mkdir -p $out/nix/var/nix/gcroots
mkdir $out/tmp
mkdir -p $out/var/tmp
mkdir -p $out/etc/nix
cat $nixConfContentsPath > $out/etc/nix/nix.conf
mkdir -p $out/root
mkdir -p $out/nix/var/nix/profiles/per-user/root
ln -s ${profile} $out/nix/var/nix/profiles/default-1-link
ln -s $out/nix/var/nix/profiles/default-1-link $out/nix/var/nix/profiles/default
ln -s /nix/var/nix/profiles/default $out/root/.nix-profile
ln -s ${channel} $out/nix/var/nix/profiles/per-user/root/channels-1-link
ln -s $out/nix/var/nix/profiles/per-user/root/channels-1-link $out/nix/var/nix/profiles/per-user/root/channels
mkdir -p $out/root/.nix-defexpr
ln -s $out/nix/var/nix/profiles/per-user/root/channels $out/root/.nix-defexpr/channels
echo "${channelURL} ${channelName}" > $out/root/.nix-channels
mkdir -p $out/bin $out/usr/bin
ln -s ${pkgs.coreutils}/bin/env $out/usr/bin/env
ln -s ${pkgs.bashInteractive}/bin/bash $out/bin/sh
'';
in
pkgs.dockerTools.buildLayeredImageWithNixDb {
inherit name tag;
contents = [ baseSystem ];
extraCommands = ''
rm -rf nix-support
ln -s /nix/var/nix/profiles nix/var/nix/gcroots/profiles
'';
fakeRootCommands = ''
chmod 1777 tmp
chmod 1777 var/tmp
'';
config = {
Cmd = [ "/root/.nix-profile/bin/bash" ];
Env = [
"USER=root"
"PATH=${lib.concatStringsSep ":" [
"/root/.nix-profile/bin"
"/nix/var/nix/profiles/default/bin"
"/nix/var/nix/profiles/default/sbin"
]}"
"MANPATH=${lib.concatStringsSep ":" [
"/root/.nix-profile/share/man"
"/nix/var/nix/profiles/default/share/man"
]}"
"SSL_CERT_FILE=/nix/var/nix/profiles/default/etc/ssl/certs/ca-bundle.crt"
"GIT_SSL_CAINFO=/nix/var/nix/profiles/default/etc/ssl/certs/ca-bundle.crt"
"NIX_SSL_CERT_FILE=/nix/var/nix/profiles/default/etc/ssl/certs/ca-bundle.crt"
"NIX_PATH=/nix/var/nix/profiles/per-user/root/channels:/root/.nix-defexpr/channels"
];
};
}

View file

@ -31,10 +31,26 @@
"type": "indirect" "type": "indirect"
} }
}, },
"nixpkgs-regression": {
"locked": {
"lastModified": 1643052045,
"narHash": "sha256-uGJ0VXIhWKGXxkeNnq4TvV3CIOkUJ3PAoLZ3HMzNVMw=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "215d4d0fd80ca5163643b03a33fde804a29cc1e2",
"type": "github"
},
"original": {
"id": "nixpkgs",
"rev": "215d4d0fd80ca5163643b03a33fde804a29cc1e2",
"type": "indirect"
}
},
"root": { "root": {
"inputs": { "inputs": {
"lowdown-src": "lowdown-src", "lowdown-src": "lowdown-src",
"nixpkgs": "nixpkgs" "nixpkgs": "nixpkgs",
"nixpkgs-regression": "nixpkgs-regression"
} }
} }
}, },

168
flake.nix
View file

@ -2,9 +2,10 @@
description = "The purely functional package manager"; description = "The purely functional package manager";
inputs.nixpkgs.url = "nixpkgs/nixos-21.05-small"; inputs.nixpkgs.url = "nixpkgs/nixos-21.05-small";
inputs.nixpkgs-regression.url = "nixpkgs/215d4d0fd80ca5163643b03a33fde804a29cc1e2";
inputs.lowdown-src = { url = "github:kristapsdz/lowdown"; flake = false; }; inputs.lowdown-src = { url = "github:kristapsdz/lowdown"; flake = false; };
outputs = { self, nixpkgs, lowdown-src }: outputs = { self, nixpkgs, nixpkgs-regression, lowdown-src }:
let let
@ -22,15 +23,36 @@
crossSystems = [ "armv6l-linux" "armv7l-linux" ]; crossSystems = [ "armv6l-linux" "armv7l-linux" ];
stdenvs = [ "gccStdenv" "clangStdenv" "clang11Stdenv" "stdenv" ];
forAllSystems = f: nixpkgs.lib.genAttrs systems (system: f system); forAllSystems = f: nixpkgs.lib.genAttrs systems (system: f system);
forAllSystemsAndStdenvs = f: forAllSystems (system:
nixpkgs.lib.listToAttrs
(map
(n:
nixpkgs.lib.nameValuePair "${n}Packages" (
f system n
)) stdenvs
)
);
forAllStdenvs = stdenvs: f: nixpkgs.lib.genAttrs stdenvs (stdenv: f stdenv);
# Memoize nixpkgs for different platforms for efficiency. # Memoize nixpkgs for different platforms for efficiency.
nixpkgsFor = forAllSystems (system: nixpkgsFor =
let stdenvsPackages = forAllSystemsAndStdenvs
(system: stdenv:
import nixpkgs { import nixpkgs {
inherit system; inherit system;
overlays = [ self.overlay ]; overlays = [
(overlayFor (p: p.${stdenv}))
];
} }
); );
in
# Add the `stdenvPackages` at toplevel, both because these are the ones
# we want most of the time and for backwards compatibility
forAllSystems (system: stdenvsPackages.${system} // stdenvsPackages.${system}.stdenvPackages);
commonDeps = pkgs: with pkgs; rec { commonDeps = pkgs: with pkgs; rec {
# Use "busybox-sandbox-shell" if present, # Use "busybox-sandbox-shell" if present,
@ -61,6 +83,7 @@
configureFlags = configureFlags =
lib.optionals stdenv.isLinux [ lib.optionals stdenv.isLinux [
"--with-boost=${boost}/lib"
"--with-sandbox-shell=${sh}/bin/busybox" "--with-sandbox-shell=${sh}/bin/busybox"
"LDFLAGS=-fuse-ld=gold" "LDFLAGS=-fuse-ld=gold"
]; ];
@ -74,7 +97,7 @@
buildPackages.mdbook buildPackages.mdbook
buildPackages.autoconf-archive buildPackages.autoconf-archive
buildPackages.autoreconfHook buildPackages.autoreconfHook
buildPackages.pkgconfig buildPackages.pkg-config
# Tests # Tests
buildPackages.git buildPackages.git
@ -90,7 +113,7 @@
libarchive libarchive
boost boost
lowdown-nix lowdown-nix
gmock gtest
] ]
++ lib.optionals stdenv.isLinux [libseccomp] ++ lib.optionals stdenv.isLinux [libseccomp]
++ lib.optional (stdenv.isLinux || stdenv.isDarwin) libsodium ++ lib.optional (stdenv.isLinux || stdenv.isDarwin) libsodium
@ -110,6 +133,7 @@
./boehmgc-coroutine-sp-fallback.diff ./boehmgc-coroutine-sp-fallback.diff
]; ];
})) }))
nlohmann_json
]; ];
perlDeps = perlDeps =
@ -183,7 +207,8 @@
installCheckPhase = "make installcheck -j$NIX_BUILD_CORES -l$NIX_BUILD_CORES"; installCheckPhase = "make installcheck -j$NIX_BUILD_CORES -l$NIX_BUILD_CORES";
}; };
binaryTarball = buildPackages: nix: pkgs: let binaryTarball = buildPackages: nix: pkgs:
let
inherit (pkgs) cacert; inherit (pkgs) cacert;
installerClosureInfo = buildPackages.closureInfo { rootPaths = [ nix cacert ]; }; installerClosureInfo = buildPackages.closureInfo { rootPaths = [ nix cacert ]; };
in in
@ -254,18 +279,15 @@
$(cat ${installerClosureInfo}/store-paths) $(cat ${installerClosureInfo}/store-paths)
''; '';
in { overlayFor = getStdenv: final: prev:
let currentStdenv = getStdenv final; in
# A Nixpkgs overlay that overrides the 'nix' and {
# 'nix.perl-bindings' packages.
overlay = final: prev: {
nixStable = prev.nix; nixStable = prev.nix;
# Forward from the previous stage as we dont want it to pick the lowdown override # Forward from the previous stage as we dont want it to pick the lowdown override
nixUnstable = prev.nixUnstable; nixUnstable = prev.nixUnstable;
nix = with final; with commonDeps pkgs; stdenv.mkDerivation { nix = with final; with commonDeps pkgs; currentStdenv.mkDerivation {
name = "nix-${version}"; name = "nix-${version}";
inherit version; inherit version;
@ -280,6 +302,8 @@
propagatedBuildInputs = propagatedDeps; propagatedBuildInputs = propagatedDeps;
disallowedReferences = [ boost ];
preConfigure = preConfigure =
'' ''
# Copy libboost_context so we don't get all of Boost in our closure. # Copy libboost_context so we don't get all of Boost in our closure.
@ -287,9 +311,16 @@
mkdir -p $out/lib mkdir -p $out/lib
cp -pd ${boost}/lib/{libboost_context*,libboost_thread*,libboost_system*} $out/lib cp -pd ${boost}/lib/{libboost_context*,libboost_thread*,libboost_system*} $out/lib
rm -f $out/lib/*.a rm -f $out/lib/*.a
${lib.optionalString stdenv.isLinux '' ${lib.optionalString currentStdenv.isLinux ''
chmod u+w $out/lib/*.so.* chmod u+w $out/lib/*.so.*
patchelf --set-rpath $out/lib:${stdenv.cc.cc.lib}/lib $out/lib/libboost_thread.so.* patchelf --set-rpath $out/lib:${currentStdenv.cc.cc.lib}/lib $out/lib/libboost_thread.so.*
''}
${lib.optionalString currentStdenv.isDarwin ''
for LIB in $out/lib/*.dylib; do
chmod u+w $LIB
install_name_tool -id $LIB $LIB
done
install_name_tool -change ${boost}/lib/libboost_system.dylib $out/lib/libboost_system.dylib $out/lib/libboost_thread.dylib
''} ''}
''; '';
@ -307,6 +338,12 @@
postInstall = '' postInstall = ''
mkdir -p $doc/nix-support mkdir -p $doc/nix-support
echo "doc manual $doc/share/doc/nix/manual" >> $doc/nix-support/hydra-build-products echo "doc manual $doc/share/doc/nix/manual" >> $doc/nix-support/hydra-build-products
${lib.optionalString currentStdenv.isDarwin ''
install_name_tool \
-change ${boost}/lib/libboost_context.dylib \
$out/lib/libboost_context.dylib \
$out/lib/libnixutil.dylib
''}
''; '';
doInstallCheck = true; doInstallCheck = true;
@ -316,7 +353,7 @@
strictDeps = true; strictDeps = true;
passthru.perl-bindings = with final; stdenv.mkDerivation { passthru.perl-bindings = with final; currentStdenv.mkDerivation {
name = "nix-perl-${version}"; name = "nix-perl-${version}";
src = self; src = self;
@ -324,7 +361,7 @@
nativeBuildInputs = nativeBuildInputs =
[ buildPackages.autoconf-archive [ buildPackages.autoconf-archive
buildPackages.autoreconfHook buildPackages.autoreconfHook
buildPackages.pkgconfig buildPackages.pkg-config
]; ];
buildInputs = buildInputs =
@ -335,8 +372,8 @@
pkgs.perl pkgs.perl
boost boost
] ]
++ lib.optional (stdenv.isLinux || stdenv.isDarwin) libsodium ++ lib.optional (currentStdenv.isLinux || currentStdenv.isDarwin) libsodium
++ lib.optional stdenv.isDarwin darwin.apple_sdk.frameworks.Security; ++ lib.optional currentStdenv.isDarwin darwin.apple_sdk.frameworks.Security;
configureFlags = '' configureFlags = ''
--with-dbi=${perlPackages.DBI}/${pkgs.perl.libPrefix} --with-dbi=${perlPackages.DBI}/${pkgs.perl.libPrefix}
@ -350,7 +387,7 @@
}; };
lowdown-nix = with final; stdenv.mkDerivation rec { lowdown-nix = with final; currentStdenv.mkDerivation rec {
name = "lowdown-0.9.0"; name = "lowdown-0.9.0";
src = lowdown-src; src = lowdown-src;
@ -360,15 +397,20 @@
nativeBuildInputs = [ buildPackages.which ]; nativeBuildInputs = [ buildPackages.which ];
configurePhase = '' configurePhase = ''
${if (stdenv.isDarwin && stdenv.isAarch64) then "echo \"HAVE_SANDBOX_INIT=false\" > configure.local" else ""} ${if (currentStdenv.isDarwin && currentStdenv.isAarch64) then "echo \"HAVE_SANDBOX_INIT=false\" > configure.local" else ""}
./configure \ ./configure \
PREFIX=${placeholder "dev"} \ PREFIX=${placeholder "dev"} \
BINDIR=${placeholder "bin"}/bin BINDIR=${placeholder "bin"}/bin
''; '';
}; };
}; };
in {
# A Nixpkgs overlay that overrides the 'nix' and
# 'nix.perl-bindings' packages.
overlay = overlayFor (p: p.stdenv);
hydraJobs = { hydraJobs = {
# Binary package for various platforms. # Binary package for various platforms.
@ -404,6 +446,9 @@
installerScript = installScriptFor [ "x86_64-linux" "i686-linux" "aarch64-linux" "x86_64-darwin" "aarch64-darwin" "armv6l-linux" "armv7l-linux" ]; installerScript = installScriptFor [ "x86_64-linux" "i686-linux" "aarch64-linux" "x86_64-darwin" "aarch64-darwin" "armv6l-linux" "armv7l-linux" ];
installerScriptForGHA = installScriptFor [ "x86_64-linux" "x86_64-darwin" "armv6l-linux" "armv7l-linux"]; installerScriptForGHA = installScriptFor [ "x86_64-linux" "x86_64-darwin" "armv6l-linux" "armv7l-linux"];
# docker image with Nix inside
dockerImage = nixpkgs.lib.genAttrs linux64BitSystems (system: self.packages.${system}.dockerImage);
# Line coverage analysis. # Line coverage analysis.
coverage = coverage =
with nixpkgsFor.x86_64-linux; with nixpkgsFor.x86_64-linux;
@ -456,6 +501,12 @@
inherit (self) overlay; inherit (self) overlay;
}); });
tests.sourcehutFlakes = (import ./tests/sourcehut-flakes.nix rec {
system = "x86_64-linux";
inherit nixpkgs;
inherit (self) overlay;
});
tests.setuid = nixpkgs.lib.genAttrs tests.setuid = nixpkgs.lib.genAttrs
["i686-linux" "x86_64-linux"] ["i686-linux" "x86_64-linux"]
(system: (system:
@ -464,36 +515,25 @@
inherit (self) overlay; inherit (self) overlay;
}); });
/* # Make sure that nix-env still produces the exact same result
# Check whether we can still evaluate all of Nixpkgs. # on a particular version of Nixpkgs.
tests.evalNixpkgs = tests.evalNixpkgs =
import (nixpkgs + "/pkgs/top-level/make-tarball.nix") {
# FIXME: fix pkgs/top-level/make-tarball.nix in NixOS to not require a revCount.
inherit nixpkgs;
pkgs = nixpkgsFor.x86_64-linux;
officialRelease = false;
};
# Check whether we can still evaluate NixOS.
tests.evalNixOS =
with nixpkgsFor.x86_64-linux; with nixpkgsFor.x86_64-linux;
runCommand "eval-nixos" { buildInputs = [ nix ]; } runCommand "eval-nixos" { buildInputs = [ nix ]; }
'' ''
export NIX_STATE_DIR=$TMPDIR type -p nix-env
# Note: we're filtering out nixos-install-tools because https://github.com/NixOS/nixpkgs/pull/153594#issuecomment-1020530593.
nix-instantiate ${nixpkgs}/nixos/release-combined.nix -A tested --dry-run \ time nix-env --store dummy:// -f ${nixpkgs-regression} -qaP --drv-path | sort | grep -v nixos-install-tools > packages
--arg nixpkgs '{ outPath = ${nixpkgs}; revCount = 123; shortRev = "abcdefgh"; }' [[ $(sha1sum < packages | cut -c1-40) = ff451c521e61e4fe72bdbe2d0ca5d1809affa733 ]]
mkdir $out
touch $out
''; '';
*/
metrics.nixpkgs = import "${nixpkgs-regression}/pkgs/top-level/metrics.nix" {
pkgs = nixpkgsFor.x86_64-linux;
nixpkgs = nixpkgs-regression;
}; };
checks = forAllSystems (system: { installTests = forAllSystems (system:
binaryTarball = self.hydraJobs.binaryTarball.${system};
perlBindings = self.hydraJobs.perlBindings.${system};
installTests =
let pkgs = nixpkgsFor.${system}; in let pkgs = nixpkgsFor.${system}; in
pkgs.runCommand "install-tests" { pkgs.runCommand "install-tests" {
againstSelf = testNixVersions pkgs pkgs.nix pkgs.pkgs.nix; againstSelf = testNixVersions pkgs pkgs.nix pkgs.pkgs.nix;
@ -505,7 +545,16 @@
# Disabled because the latest stable version doesn't handle # Disabled because the latest stable version doesn't handle
# `NIX_DAEMON_SOCKET_PATH` which is required for the tests to work # `NIX_DAEMON_SOCKET_PATH` which is required for the tests to work
# againstLatestStable = testNixVersions pkgs pkgs.nix pkgs.nixStable; # againstLatestStable = testNixVersions pkgs pkgs.nix pkgs.nixStable;
} "touch $out"; } "touch $out");
};
checks = forAllSystems (system: {
binaryTarball = self.hydraJobs.binaryTarball.${system};
perlBindings = self.hydraJobs.perlBindings.${system};
installTests = self.hydraJobs.installTests.${system};
} // (nixpkgs.lib.optionalAttrs (builtins.elem system linux64BitSystems)) {
dockerImage = self.hydraJobs.dockerImage.${system};
}); });
packages = forAllSystems (system: { packages = forAllSystems (system: {
@ -551,6 +600,20 @@
hardeningDisable = [ "pie" ]; hardeningDisable = [ "pie" ];
}; };
dockerImage =
let
pkgs = nixpkgsFor.${system};
image = import ./docker.nix { inherit pkgs; tag = version; };
in
pkgs.runCommand
"docker-image-tarball-${version}"
{ meta.description = "Docker image with Nix for ${system}"; }
''
mkdir -p $out/nix-support
image=$out/image.tar.gz
ln -s ${image} $image
echo "file binary-dist $image" >> $out/nix-support/hydra-build-products
'';
} // builtins.listToAttrs (map (crossSystem: { } // builtins.listToAttrs (map (crossSystem: {
name = "nix-${crossSystem}"; name = "nix-${crossSystem}";
value = let value = let
@ -590,15 +653,21 @@
doInstallCheck = true; doInstallCheck = true;
installCheckFlags = "sysconfdir=$(out)/etc"; installCheckFlags = "sysconfdir=$(out)/etc";
}; };
}) crossSystems))); }) crossSystems)) // (builtins.listToAttrs (map (stdenvName:
nixpkgsFor.${system}.lib.nameValuePair
"nix-${stdenvName}"
nixpkgsFor.${system}."${stdenvName}Packages".nix
) stdenvs)));
defaultPackage = forAllSystems (system: self.packages.${system}.nix); defaultPackage = forAllSystems (system: self.packages.${system}.nix);
devShell = forAllSystems (system: devShell = forAllSystems (system: self.devShells.${system}.stdenvPackages);
devShells = forAllSystemsAndStdenvs (system: stdenv:
with nixpkgsFor.${system}; with nixpkgsFor.${system};
with commonDeps pkgs; with commonDeps pkgs;
stdenv.mkDerivation { nixpkgsFor.${system}.${stdenv}.mkDerivation {
name = "nix"; name = "nix";
outputs = [ "out" "dev" "doc" ]; outputs = [ "out" "dev" "doc" ];
@ -617,6 +686,9 @@
PATH=$prefix/bin:$PATH PATH=$prefix/bin:$PATH
unset PYTHONPATH unset PYTHONPATH
export MANPATH=$out/share/man:$MANPATH export MANPATH=$out/share/man:$MANPATH
# Make bash completion work.
XDG_DATA_DIRS+=:$out/share
''; '';
}); });

View file

@ -12,7 +12,6 @@ use LWP::UserAgent;
use Net::Amazon::S3; use Net::Amazon::S3;
my $evalId = $ARGV[0] or die "Usage: $0 EVAL-ID\n"; my $evalId = $ARGV[0] or die "Usage: $0 EVAL-ID\n";
my $nixRev = $ARGV[1] or die; # FIXME
my $releasesBucketName = "nix-releases"; my $releasesBucketName = "nix-releases";
my $channelsBucketName = "nix-channels"; my $channelsBucketName = "nix-channels";
@ -36,10 +35,11 @@ sub fetch {
} }
my $evalUrl = "https://hydra.nixos.org/eval/$evalId"; my $evalUrl = "https://hydra.nixos.org/eval/$evalId";
#my $evalInfo = decode_json(fetch($evalUrl, 'application/json')); my $evalInfo = decode_json(fetch($evalUrl, 'application/json'));
#print Dumper($evalInfo); #print Dumper($evalInfo);
my $flakeUrl = $evalInfo->{flake} or die;
#my $nixRev = $evalInfo->{jobsetevalinputs}->{nix}->{revision} or die; my $flakeInfo = decode_json(`nix flake metadata --json "$flakeUrl"` or die);
my $nixRev = $flakeInfo->{revision} or die;
my $buildInfo = decode_json(fetch("$evalUrl/job/build.x86_64-linux", 'application/json')); my $buildInfo = decode_json(fetch("$evalUrl/job/build.x86_64-linux", 'application/json'));
#print Dumper($buildInfo); #print Dumper($buildInfo);
@ -48,13 +48,18 @@ my $releaseName = $buildInfo->{nixname};
$releaseName =~ /nix-(.*)$/ or die; $releaseName =~ /nix-(.*)$/ or die;
my $version = $1; my $version = $1;
print STDERR "Nix revision is $nixRev, version is $version\n"; print STDERR "Flake URL is $flakeUrl, Nix revision is $nixRev, version is $version\n";
my $releaseDir = "nix/$releaseName"; my $releaseDir = "nix/$releaseName";
my $tmpDir = "$TMPDIR/nix-release/$releaseName"; my $tmpDir = "$TMPDIR/nix-release/$releaseName";
File::Path::make_path($tmpDir); File::Path::make_path($tmpDir);
my $narCache = "$TMPDIR/nar-cache";
File::Path::make_path($narCache);
my $binaryCache = "https://cache.nixos.org/?local-nar-cache=$narCache";
# S3 setup. # S3 setup.
my $aws_access_key_id = $ENV{'AWS_ACCESS_KEY_ID'} or die "No AWS_ACCESS_KEY_ID given."; my $aws_access_key_id = $ENV{'AWS_ACCESS_KEY_ID'} or die "No AWS_ACCESS_KEY_ID given.";
my $aws_secret_access_key = $ENV{'AWS_SECRET_ACCESS_KEY'} or die "No AWS_SECRET_ACCESS_KEY given."; my $aws_secret_access_key = $ENV{'AWS_SECRET_ACCESS_KEY'} or die "No AWS_SECRET_ACCESS_KEY given.";
@ -80,6 +85,7 @@ sub downloadFile {
my ($jobName, $productNr, $dstName) = @_; my ($jobName, $productNr, $dstName) = @_;
my $buildInfo = decode_json(fetch("$evalUrl/job/$jobName", 'application/json')); my $buildInfo = decode_json(fetch("$evalUrl/job/$jobName", 'application/json'));
#print STDERR "$jobName: ", Dumper($buildInfo), "\n";
my $srcFile = $buildInfo->{buildproducts}->{$productNr}->{path} or die "job '$jobName' lacks product $productNr\n"; my $srcFile = $buildInfo->{buildproducts}->{$productNr}->{path} or die "job '$jobName' lacks product $productNr\n";
$dstName //= basename($srcFile); $dstName //= basename($srcFile);
@ -87,19 +93,27 @@ sub downloadFile {
if (!-e $tmpFile) { if (!-e $tmpFile) {
print STDERR "downloading $srcFile to $tmpFile...\n"; print STDERR "downloading $srcFile to $tmpFile...\n";
system("NIX_REMOTE=https://cache.nixos.org/ nix store cat '$srcFile' > '$tmpFile'") == 0
my $fileInfo = decode_json(`NIX_REMOTE=$binaryCache nix store ls --json '$srcFile'`);
$srcFile = $fileInfo->{target} if $fileInfo->{type} eq 'symlink';
#print STDERR $srcFile, " ", Dumper($fileInfo), "\n";
system("NIX_REMOTE=$binaryCache nix store cat '$srcFile' > '$tmpFile'.tmp") == 0
or die "unable to fetch $srcFile\n"; or die "unable to fetch $srcFile\n";
rename("$tmpFile.tmp", $tmpFile) or die;
} }
my $sha256_expected = $buildInfo->{buildproducts}->{$productNr}->{sha256hash} or die; my $sha256_expected = $buildInfo->{buildproducts}->{$productNr}->{sha256hash};
my $sha256_actual = `nix hash file --base16 --type sha256 '$tmpFile'`; my $sha256_actual = `nix hash file --base16 --type sha256 '$tmpFile'`;
chomp $sha256_actual; chomp $sha256_actual;
if ($sha256_expected ne $sha256_actual) { if (defined($sha256_expected) && $sha256_expected ne $sha256_actual) {
print STDERR "file $tmpFile is corrupt, got $sha256_actual, expected $sha256_expected\n"; print STDERR "file $tmpFile is corrupt, got $sha256_actual, expected $sha256_expected\n";
exit 1; exit 1;
} }
write_file("$tmpFile.sha256", $sha256_expected); write_file("$tmpFile.sha256", $sha256_actual);
if (! -e "$tmpFile.asc") { if (! -e "$tmpFile.asc") {
system("gpg2 --detach-sign --armor $tmpFile") == 0 or die "unable to sign $tmpFile\n"; system("gpg2 --detach-sign --armor $tmpFile") == 0 or die "unable to sign $tmpFile\n";
@ -117,6 +131,60 @@ downloadFile("binaryTarballCross.x86_64-linux.armv6l-linux", "1");
downloadFile("binaryTarballCross.x86_64-linux.armv7l-linux", "1"); downloadFile("binaryTarballCross.x86_64-linux.armv7l-linux", "1");
downloadFile("installerScript", "1"); downloadFile("installerScript", "1");
# Upload docker images to dockerhub.
my $dockerManifest = "";
my $dockerManifestLatest = "";
for my $platforms (["x86_64-linux", "amd64"], ["aarch64-linux", "arm64"]) {
my $system = $platforms->[0];
my $dockerPlatform = $platforms->[1];
my $fn = "nix-$version-docker-image-$dockerPlatform.tar.gz";
downloadFile("dockerImage.$system", "1", $fn);
print STDERR "loading docker image for $dockerPlatform...\n";
system("docker load -i $tmpDir/$fn") == 0 or die;
my $tag = "nixos/nix:$version-$dockerPlatform";
my $latestTag = "nixos/nix:latest-$dockerPlatform";
print STDERR "tagging $version docker image for $dockerPlatform...\n";
system("docker tag nix:$version $tag") == 0 or die;
if ($isLatest) {
print STDERR "tagging latest docker image for $dockerPlatform...\n";
system("docker tag nix:$version $latestTag") == 0 or die;
}
print STDERR "pushing $version docker image for $dockerPlatform...\n";
system("docker push -q $tag") == 0 or die;
if ($isLatest) {
print STDERR "pushing latest docker image for $dockerPlatform...\n";
system("docker push -q $latestTag") == 0 or die;
}
$dockerManifest .= " --amend $tag";
$dockerManifestLatest .= " --amend $latestTag"
}
print STDERR "creating multi-platform docker manifest...\n";
system("docker manifest rm nixos/nix:$version");
system("docker manifest create nixos/nix:$version $dockerManifest") == 0 or die;
if ($isLatest) {
print STDERR "creating latest multi-platform docker manifest...\n";
system("docker manifest rm nixos/nix:latest");
system("docker manifest create nixos/nix:latest $dockerManifestLatest") == 0 or die;
}
print STDERR "pushing multi-platform docker manifest...\n";
system("docker manifest push nixos/nix:$version") == 0 or die;
if ($isLatest) {
print STDERR "pushing latest multi-platform docker manifest...\n";
system("docker manifest push nixos/nix:latest") == 0 or die;
}
# Upload release files to S3.
for my $fn (glob "$tmpDir/*") { for my $fn (glob "$tmpDir/*") {
my $name = basename($fn); my $name = basename($fn);
my $dstKey = "$releaseDir/" . $name; my $dstKey = "$releaseDir/" . $name;
@ -143,12 +211,7 @@ if ($isLatest) {
sub getStorePath { sub getStorePath {
my ($jobName) = @_; my ($jobName) = @_;
my $buildInfo = decode_json(fetch("$evalUrl/job/$jobName", 'application/json')); my $buildInfo = decode_json(fetch("$evalUrl/job/$jobName", 'application/json'));
for my $product (values %{$buildInfo->{buildproducts}}) { return $buildInfo->{buildoutputs}->{out}->{path} or die "cannot get store path for '$jobName'";
next unless $product->{type} eq "nix-build";
next if $product->{path} =~ /[a-z]+$/;
return $product->{path};
}
die;
} }
write_file("$nixpkgsDir/nixos/modules/installer/tools/nix-fallback-paths.nix", write_file("$nixpkgsDir/nixos/modules/installer/tools/nix-fallback-paths.nix",

View file

@ -7,13 +7,15 @@ function _complete_nix {
local completion=${line%% *} local completion=${line%% *}
if [[ -z $have_type ]]; then if [[ -z $have_type ]]; then
have_type=1 have_type=1
if [[ $completion = filenames ]]; then if [[ $completion == filenames ]]; then
compopt -o filenames compopt -o filenames
elif [[ $completion == attrs ]]; then
compopt -o nospace
fi fi
else else
COMPREPLY+=("$completion") COMPREPLY+=("$completion")
fi fi
done < <(NIX_GET_COMPLETIONS=$cword "${words[@]}") done < <(NIX_GET_COMPLETIONS=$cword "${words[@]/#\~/$HOME}" 2>/dev/null)
__ltrim_colon_completions "$cur" __ltrim_colon_completions "$cur"
} }

View file

@ -19,7 +19,6 @@ end
function _nix_accepts_files function _nix_accepts_files
set -l response (_nix_complete) set -l response (_nix_complete)
# First line is either filenames or no-filenames.
test $response[1] = 'filenames' test $response[1] = 'filenames'
end end

View file

@ -25,5 +25,10 @@
<string>/var/log/nix-daemon.log</string> <string>/var/log/nix-daemon.log</string>
<key>StandardOutPath</key> <key>StandardOutPath</key>
<string>/dev/null</string> <string>/dev/null</string>
<key>SoftResourceLimits</key>
<dict>
<key>NumberOfFiles</key>
<integer>4096</integer>
</dict>
</dict> </dict>
</plist> </plist>

View file

@ -4,7 +4,7 @@ function _nix() {
local ifs_bk="$IFS" local ifs_bk="$IFS"
local input=("${(Q)words[@]}") local input=("${(Q)words[@]}")
IFS=$'\n' IFS=$'\n'
local res=($(NIX_GET_COMPLETIONS=$((CURRENT - 1)) "$input[@]")) local res=($(NIX_GET_COMPLETIONS=$((CURRENT - 1)) "$input[@]" 2>/dev/null))
IFS="$ifs_bk" IFS="$ifs_bk"
local tpe="${${res[1]}%%> *}" local tpe="${${res[1]}%%> *}"
local -a suggestions local -a suggestions

View file

@ -125,8 +125,8 @@ define build-library
$(1)_PATH := $$(_d)/$$($(1)_NAME).a $(1)_PATH := $$(_d)/$$($(1)_NAME).a
$$($(1)_PATH): $$($(1)_OBJS) | $$(_d)/ $$($(1)_PATH): $$($(1)_OBJS) | $$(_d)/
$(trace-ld) $(LD) -Ur -o $$(_d)/$$($(1)_NAME).o $$? $$(trace-ld) $(LD) -Ur -o $$(_d)/$$($(1)_NAME).o $$?
$(trace-ar) $(AR) crs $$@ $$(_d)/$$($(1)_NAME).o $$(trace-ar) $(AR) crs $$@ $$(_d)/$$($(1)_NAME).o
$(1)_LDFLAGS_USE += $$($(1)_PATH) $$($(1)_LDFLAGS) $(1)_LDFLAGS_USE += $$($(1)_PATH) $$($(1)_LDFLAGS)

View file

@ -14,9 +14,27 @@ if [ -t 1 ]; then
yellow="" yellow=""
normal="" normal=""
fi fi
run_test () {
(cd tests && env ${TESTS_ENVIRONMENT} init.sh 2>/dev/null > /dev/null) (cd tests && env ${TESTS_ENVIRONMENT} init.sh 2>/dev/null > /dev/null)
log="$(cd $(dirname $1) && env ${TESTS_ENVIRONMENT} $(basename $1) 2>&1)" log="$(cd $(dirname $1) && env ${TESTS_ENVIRONMENT} $(basename $1) 2>&1)"
status=$? status=$?
}
run_test "$1"
# Hack: Retry the test if it fails with “unexpected EOF reading a line” as these
# appear randomly without anyone knowing why.
# See https://github.com/NixOS/nix/issues/3605 for more info
if [[ $status -ne 0 && $status -ne 99 && \
"$(uname)" == "Darwin" && \
"$log" =~ "unexpected EOF reading a line" \
]]; then
echo "$post_run_msg [${yellow}FAIL$normal] (possibly flaky, so will be retried)"
echo "$log" | sed 's/^/ /'
run_test "$1"
fi
if [ $status -eq 0 ]; then if [ $status -eq 0 ]; then
echo "$post_run_msg [${green}PASS$normal]" echo "$post_run_msg [${green}PASS$normal]"
elif [ $status -eq 99 ]; then elif [ $status -eq 99 ]; then

399
nix-rust/Cargo.lock generated
View file

@ -1,399 +0,0 @@
# This file is automatically @generated by Cargo.
# It is not intended for manual editing.
[[package]]
name = "assert_matches"
version = "1.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
name = "autocfg"
version = "0.1.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
name = "bit-set"
version = "0.5.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"bit-vec 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "bit-vec"
version = "0.5.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
name = "bitflags"
version = "1.2.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
name = "byteorder"
version = "1.3.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
name = "c2-chacha"
version = "0.2.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"ppv-lite86 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "cfg-if"
version = "0.1.10"
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
name = "cloudabi"
version = "0.0.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"bitflags 1.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "fnv"
version = "1.0.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
name = "fuchsia-cprng"
version = "0.1.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
name = "getrandom"
version = "0.1.13"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)",
"libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)",
"wasi 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "hex"
version = "0.3.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
name = "lazy_static"
version = "1.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
name = "libc"
version = "0.2.66"
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
name = "nix-rust"
version = "0.1.0"
dependencies = [
"assert_matches 1.3.0 (registry+https://github.com/rust-lang/crates.io-index)",
"hex 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)",
"lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
"libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)",
"proptest 0.9.4 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "num-traits"
version = "0.2.10"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"autocfg 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "ppv-lite86"
version = "0.2.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
name = "proptest"
version = "0.9.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"bit-set 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)",
"bitflags 1.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
"byteorder 1.3.2 (registry+https://github.com/rust-lang/crates.io-index)",
"lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
"num-traits 0.2.10 (registry+https://github.com/rust-lang/crates.io-index)",
"quick-error 1.2.2 (registry+https://github.com/rust-lang/crates.io-index)",
"rand 0.6.5 (registry+https://github.com/rust-lang/crates.io-index)",
"rand_chacha 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
"rand_xorshift 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
"regex-syntax 0.6.12 (registry+https://github.com/rust-lang/crates.io-index)",
"rusty-fork 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)",
"tempfile 3.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "quick-error"
version = "1.2.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
name = "rand"
version = "0.6.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"autocfg 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)",
"libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)",
"rand_chacha 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
"rand_core 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)",
"rand_hc 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
"rand_isaac 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
"rand_jitter 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)",
"rand_os 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)",
"rand_pcg 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
"rand_xorshift 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
"winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "rand"
version = "0.7.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"getrandom 0.1.13 (registry+https://github.com/rust-lang/crates.io-index)",
"libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)",
"rand_chacha 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
"rand_core 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)",
"rand_hc 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "rand_chacha"
version = "0.1.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"autocfg 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)",
"rand_core 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "rand_chacha"
version = "0.2.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"c2-chacha 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)",
"rand_core 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "rand_core"
version = "0.3.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"rand_core 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "rand_core"
version = "0.4.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
name = "rand_core"
version = "0.5.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"getrandom 0.1.13 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "rand_hc"
version = "0.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"rand_core 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "rand_hc"
version = "0.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"rand_core 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "rand_isaac"
version = "0.1.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"rand_core 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "rand_jitter"
version = "0.1.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)",
"rand_core 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)",
"winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "rand_os"
version = "0.1.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"cloudabi 0.0.3 (registry+https://github.com/rust-lang/crates.io-index)",
"fuchsia-cprng 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
"libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)",
"rand_core 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)",
"rdrand 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
"winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "rand_pcg"
version = "0.1.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"autocfg 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)",
"rand_core 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "rand_xorshift"
version = "0.1.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"rand_core 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "rdrand"
version = "0.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"rand_core 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "redox_syscall"
version = "0.1.56"
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
name = "regex-syntax"
version = "0.6.12"
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
name = "remove_dir_all"
version = "0.5.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "rusty-fork"
version = "0.2.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"fnv 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)",
"quick-error 1.2.2 (registry+https://github.com/rust-lang/crates.io-index)",
"tempfile 3.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
"wait-timeout 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "tempfile"
version = "3.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)",
"libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)",
"rand 0.7.2 (registry+https://github.com/rust-lang/crates.io-index)",
"redox_syscall 0.1.56 (registry+https://github.com/rust-lang/crates.io-index)",
"remove_dir_all 0.5.2 (registry+https://github.com/rust-lang/crates.io-index)",
"winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "wait-timeout"
version = "0.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "wasi"
version = "0.7.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
name = "winapi"
version = "0.3.8"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"winapi-i686-pc-windows-gnu 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
"winapi-x86_64-pc-windows-gnu 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "winapi-i686-pc-windows-gnu"
version = "0.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
name = "winapi-x86_64-pc-windows-gnu"
version = "0.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
[metadata]
"checksum assert_matches 1.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "7deb0a829ca7bcfaf5da70b073a8d128619259a7be8216a355e23f00763059e5"
"checksum autocfg 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)" = "1d49d90015b3c36167a20fe2810c5cd875ad504b39cff3d4eae7977e6b7c1cb2"
"checksum bit-set 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)" = "e84c238982c4b1e1ee668d136c510c67a13465279c0cb367ea6baf6310620a80"
"checksum bit-vec 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)" = "f59bbe95d4e52a6398ec21238d31577f2b28a9d86807f06ca59d191d8440d0bb"
"checksum bitflags 1.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "cf1de2fe8c75bc145a2f577add951f8134889b4795d47466a54a5c846d691693"
"checksum byteorder 1.3.2 (registry+https://github.com/rust-lang/crates.io-index)" = "a7c3dd8985a7111efc5c80b44e23ecdd8c007de8ade3b96595387e812b957cf5"
"checksum c2-chacha 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)" = "214238caa1bf3a496ec3392968969cab8549f96ff30652c9e56885329315f6bb"
"checksum cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)" = "4785bdd1c96b2a846b2bd7cc02e86b6b3dbf14e7e53446c4f54c92a361040822"
"checksum cloudabi 0.0.3 (registry+https://github.com/rust-lang/crates.io-index)" = "ddfc5b9aa5d4507acaf872de71051dfd0e309860e88966e1051e462a077aac4f"
"checksum fnv 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)" = "2fad85553e09a6f881f739c29f0b00b0f01357c743266d478b68951ce23285f3"
"checksum fuchsia-cprng 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "a06f77d526c1a601b7c4cdd98f54b5eaabffc14d5f2f0296febdc7f357c6d3ba"
"checksum getrandom 0.1.13 (registry+https://github.com/rust-lang/crates.io-index)" = "e7db7ca94ed4cd01190ceee0d8a8052f08a247aa1b469a7f68c6a3b71afcf407"
"checksum hex 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)" = "805026a5d0141ffc30abb3be3173848ad46a1b1664fe632428479619a3644d77"
"checksum lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646"
"checksum libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)" = "d515b1f41455adea1313a4a2ac8a8a477634fbae63cc6100e3aebb207ce61558"
"checksum num-traits 0.2.10 (registry+https://github.com/rust-lang/crates.io-index)" = "d4c81ffc11c212fa327657cb19dd85eb7419e163b5b076bede2bdb5c974c07e4"
"checksum ppv-lite86 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)" = "74490b50b9fbe561ac330df47c08f3f33073d2d00c150f719147d7c54522fa1b"
"checksum proptest 0.9.4 (registry+https://github.com/rust-lang/crates.io-index)" = "cf147e022eacf0c8a054ab864914a7602618adba841d800a9a9868a5237a529f"
"checksum quick-error 1.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "9274b940887ce9addde99c4eee6b5c44cc494b182b97e73dc8ffdcb3397fd3f0"
"checksum rand 0.6.5 (registry+https://github.com/rust-lang/crates.io-index)" = "6d71dacdc3c88c1fde3885a3be3fbab9f35724e6ce99467f7d9c5026132184ca"
"checksum rand 0.7.2 (registry+https://github.com/rust-lang/crates.io-index)" = "3ae1b169243eaf61759b8475a998f0a385e42042370f3a7dbaf35246eacc8412"
"checksum rand_chacha 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "556d3a1ca6600bfcbab7c7c91ccb085ac7fbbcd70e008a98742e7847f4f7bcef"
"checksum rand_chacha 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "03a2a90da8c7523f554344f921aa97283eadf6ac484a6d2a7d0212fa7f8d6853"
"checksum rand_core 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "7a6fdeb83b075e8266dcc8762c22776f6877a63111121f5f8c7411e5be7eed4b"
"checksum rand_core 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)" = "9c33a3c44ca05fa6f1807d8e6743f3824e8509beca625669633be0acbdf509dc"
"checksum rand_core 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)" = "90bde5296fc891b0cef12a6d03ddccc162ce7b2aff54160af9338f8d40df6d19"
"checksum rand_hc 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "7b40677c7be09ae76218dc623efbf7b18e34bced3f38883af07bb75630a21bc4"
"checksum rand_hc 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ca3129af7b92a17112d59ad498c6f81eaf463253766b90396d39ea7a39d6613c"
"checksum rand_isaac 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "ded997c9d5f13925be2a6fd7e66bf1872597f759fd9dd93513dd7e92e5a5ee08"
"checksum rand_jitter 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)" = "1166d5c91dc97b88d1decc3285bb0a99ed84b05cfd0bc2341bdf2d43fc41e39b"
"checksum rand_os 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)" = "7b75f676a1e053fc562eafbb47838d67c84801e38fc1ba459e8f180deabd5071"
"checksum rand_pcg 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "abf9b09b01790cfe0364f52bf32995ea3c39f4d2dd011eac241d2914146d0b44"
"checksum rand_xorshift 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "cbf7e9e623549b0e21f6e97cf8ecf247c1a8fd2e8a992ae265314300b2455d5c"
"checksum rdrand 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "678054eb77286b51581ba43620cc911abf02758c91f93f479767aed0f90458b2"
"checksum redox_syscall 0.1.56 (registry+https://github.com/rust-lang/crates.io-index)" = "2439c63f3f6139d1b57529d16bc3b8bb855230c8efcc5d3a896c8bea7c3b1e84"
"checksum regex-syntax 0.6.12 (registry+https://github.com/rust-lang/crates.io-index)" = "11a7e20d1cce64ef2fed88b66d347f88bd9babb82845b2b858f3edbf59a4f716"
"checksum remove_dir_all 0.5.2 (registry+https://github.com/rust-lang/crates.io-index)" = "4a83fa3702a688b9359eccba92d153ac33fd2e8462f9e0e3fdf155239ea7792e"
"checksum rusty-fork 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "3dd93264e10c577503e926bd1430193eeb5d21b059148910082245309b424fae"
"checksum tempfile 3.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "7a6e24d9338a0a5be79593e2fa15a648add6138caa803e2d5bc782c371732ca9"
"checksum wait-timeout 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "9f200f5b12eb75f8c1ed65abd4b2db8a6e1b138a20de009dacee265a2498f3f6"
"checksum wasi 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)" = "b89c3ce4ce14bdc6fb6beaf9ec7928ca331de5df7e5ea278375642a2f478570d"
"checksum winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)" = "8093091eeb260906a183e6ae1abdba2ef5ef2257a21801128899c3fc699229c6"
"checksum winapi-i686-pc-windows-gnu 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6"
"checksum winapi-x86_64-pc-windows-gnu 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f"

View file

@ -1,23 +0,0 @@
[package]
name = "nix-rust"
version = "0.1.0"
authors = ["Eelco Dolstra <edolstra@gmail.com>"]
edition = "2018"
[lib]
name = "nixrust"
crate-type = ["cdylib"]
[dependencies]
libc = "0.2"
#futures-preview = { version = "=0.3.0-alpha.19" }
#hyper = "0.13.0-alpha.4"
#http = "0.1"
#tokio = { version = "0.2.0-alpha.6", default-features = false, features = ["rt-full"] }
lazy_static = "1.4"
#byteorder = "1.3"
[dev-dependencies]
hex = "0.3"
assert_matches = "1.3"
proptest = "0.9"

View file

@ -1,48 +0,0 @@
ifeq ($(OPTIMIZE), 1)
RUST_MODE = --release
RUST_DIR = release
else
RUST_MODE =
RUST_DIR = debug
endif
libnixrust_PATH := $(d)/target/$(RUST_DIR)/libnixrust.$(SO_EXT)
libnixrust_INSTALL_PATH := $(libdir)/libnixrust.$(SO_EXT)
libnixrust_LDFLAGS_USE := -L$(d)/target/$(RUST_DIR) -lnixrust
libnixrust_LDFLAGS_USE_INSTALLED := -L$(libdir) -lnixrust
ifdef HOST_LINUX
libnixrust_LDFLAGS_USE += -ldl
libnixrust_LDFLAGS_USE_INSTALLED += -ldl
endif
ifdef HOST_DARWIN
libnixrust_BUILD_FLAGS = NIX_LDFLAGS="-undefined dynamic_lookup"
else
libnixrust_LDFLAGS_USE += -Wl,-rpath,$(abspath $(d)/target/$(RUST_DIR))
libnixrust_LDFLAGS_USE_INSTALLED += -Wl,-rpath,$(libdir)
endif
$(libnixrust_PATH): $(call rwildcard, $(d)/src, *.rs) $(d)/Cargo.toml
$(trace-gen) cd nix-rust && CARGO_HOME=$$(if [[ -d vendor ]]; then echo vendor; fi) \
$(libnixrust_BUILD_FLAGS) \
cargo build $(RUST_MODE) $$(if [[ -d vendor ]]; then echo --offline; fi) \
&& touch target/$(RUST_DIR)/libnixrust.$(SO_EXT)
$(libnixrust_INSTALL_PATH): $(libnixrust_PATH)
$(target-gen) cp $^ $@
ifdef HOST_DARWIN
install_name_tool -id $@ $@
endif
clean: clean-rust
clean-rust:
$(suppress) rm -rfv nix-rust/target
ifndef HOST_DARWIN
check: rust-tests
rust-tests:
$(trace-test) cd nix-rust && CARGO_HOME=$$(if [[ -d vendor ]]; then echo vendor; fi) cargo test --release $$(if [[ -d vendor ]]; then echo --offline; fi)
endif

View file

@ -1,77 +0,0 @@
use super::{error, store::path, store::StorePath, util};
#[no_mangle]
pub unsafe extern "C" fn ffi_String_new(s: &str, out: *mut String) {
// FIXME: check whether 's' is valid UTF-8?
out.write(s.to_string())
}
#[no_mangle]
pub unsafe extern "C" fn ffi_String_drop(self_: *mut String) {
std::ptr::drop_in_place(self_);
}
#[no_mangle]
pub extern "C" fn ffi_StorePath_new(
path: &str,
store_dir: &str,
) -> Result<StorePath, error::CppException> {
StorePath::new(std::path::Path::new(path), std::path::Path::new(store_dir))
.map_err(|err| err.into())
}
#[no_mangle]
pub extern "C" fn ffi_StorePath_new2(
hash: &[u8; crate::store::path::STORE_PATH_HASH_BYTES],
name: &str,
) -> Result<StorePath, error::CppException> {
StorePath::from_parts(*hash, name).map_err(|err| err.into())
}
#[no_mangle]
pub extern "C" fn ffi_StorePath_fromBaseName(
base_name: &str,
) -> Result<StorePath, error::CppException> {
StorePath::new_from_base_name(base_name).map_err(|err| err.into())
}
#[no_mangle]
pub unsafe extern "C" fn ffi_StorePath_drop(self_: *mut StorePath) {
std::ptr::drop_in_place(self_);
}
#[no_mangle]
pub extern "C" fn ffi_StorePath_to_string(self_: &StorePath) -> Vec<u8> {
let mut buf = vec![0; path::STORE_PATH_HASH_CHARS + 1 + self_.name.name().len()];
util::base32::encode_into(self_.hash.hash(), &mut buf[0..path::STORE_PATH_HASH_CHARS]);
buf[path::STORE_PATH_HASH_CHARS] = b'-';
buf[path::STORE_PATH_HASH_CHARS + 1..].clone_from_slice(self_.name.name().as_bytes());
buf
}
#[no_mangle]
pub extern "C" fn ffi_StorePath_less_than(a: &StorePath, b: &StorePath) -> bool {
a < b
}
#[no_mangle]
pub extern "C" fn ffi_StorePath_eq(a: &StorePath, b: &StorePath) -> bool {
a == b
}
#[no_mangle]
pub extern "C" fn ffi_StorePath_clone(self_: &StorePath) -> StorePath {
self_.clone()
}
#[no_mangle]
pub extern "C" fn ffi_StorePath_name(self_: &StorePath) -> &str {
self_.name.name()
}
#[no_mangle]
pub extern "C" fn ffi_StorePath_hash_data(
self_: &StorePath,
) -> &[u8; crate::store::path::STORE_PATH_HASH_BYTES] {
self_.hash.hash()
}

View file

@ -1,118 +0,0 @@
use std::fmt;
#[derive(Debug)]
pub enum Error {
InvalidPath(crate::store::StorePath),
BadStorePath(std::path::PathBuf),
NotInStore(std::path::PathBuf),
BadNarInfo,
BadBase32,
StorePathNameEmpty,
StorePathNameTooLong,
BadStorePathName,
NarSizeFieldTooBig,
BadNarString,
BadNarPadding,
BadNarVersionMagic,
MissingNarOpenTag,
MissingNarCloseTag,
MissingNarField,
BadNarField(String),
BadExecutableField,
IOError(std::io::Error),
#[cfg(unused)]
HttpError(hyper::error::Error),
Misc(String),
#[cfg(not(test))]
Foreign(CppException),
BadTarFileMemberName(String),
}
impl From<std::io::Error> for Error {
fn from(err: std::io::Error) -> Self {
Error::IOError(err)
}
}
#[cfg(unused)]
impl From<hyper::error::Error> for Error {
fn from(err: hyper::error::Error) -> Self {
Error::HttpError(err)
}
}
impl fmt::Display for Error {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
Error::InvalidPath(_) => write!(f, "invalid path"),
Error::BadNarInfo => write!(f, ".narinfo file is corrupt"),
Error::BadStorePath(path) => write!(f, "path '{}' is not a store path", path.display()),
Error::NotInStore(path) => {
write!(f, "path '{}' is not in the Nix store", path.display())
}
Error::BadBase32 => write!(f, "invalid base32 string"),
Error::StorePathNameEmpty => write!(f, "store path name is empty"),
Error::StorePathNameTooLong => {
write!(f, "store path name is longer than 211 characters")
}
Error::BadStorePathName => write!(f, "store path name contains forbidden character"),
Error::NarSizeFieldTooBig => write!(f, "size field in NAR is too big"),
Error::BadNarString => write!(f, "NAR string is not valid UTF-8"),
Error::BadNarPadding => write!(f, "NAR padding is not zero"),
Error::BadNarVersionMagic => write!(f, "unsupported NAR version"),
Error::MissingNarOpenTag => write!(f, "NAR open tag is missing"),
Error::MissingNarCloseTag => write!(f, "NAR close tag is missing"),
Error::MissingNarField => write!(f, "expected NAR field is missing"),
Error::BadNarField(s) => write!(f, "unrecognized NAR field '{}'", s),
Error::BadExecutableField => write!(f, "bad 'executable' field in NAR"),
Error::IOError(err) => write!(f, "I/O error: {}", err),
#[cfg(unused)]
Error::HttpError(err) => write!(f, "HTTP error: {}", err),
#[cfg(not(test))]
Error::Foreign(_) => write!(f, "<C++ exception>"), // FIXME
Error::Misc(s) => write!(f, "{}", s),
Error::BadTarFileMemberName(s) => {
write!(f, "tar archive contains illegal file name '{}'", s)
}
}
}
}
#[cfg(not(test))]
impl From<Error> for CppException {
fn from(err: Error) -> Self {
match err {
Error::Foreign(ex) => ex,
_ => CppException::new(&err.to_string()),
}
}
}
#[cfg(not(test))]
#[repr(C)]
#[derive(Debug)]
pub struct CppException(*const libc::c_void); // == std::exception_ptr*
#[cfg(not(test))]
impl CppException {
fn new(s: &str) -> Self {
Self(unsafe { make_error(s) })
}
}
#[cfg(not(test))]
impl Drop for CppException {
fn drop(&mut self) {
unsafe {
destroy_error(self.0);
}
}
}
#[cfg(not(test))]
extern "C" {
#[allow(improper_ctypes)] // YOLO
fn make_error(s: &str) -> *const libc::c_void;
fn destroy_error(exc: *const libc::c_void);
}

View file

@ -1,10 +0,0 @@
#[allow(improper_ctypes_definitions)]
#[cfg(not(test))]
mod c;
mod error;
#[cfg(unused)]
mod nar;
mod store;
mod util;
pub use error::Error;

View file

@ -1,126 +0,0 @@
use crate::Error;
use byteorder::{LittleEndian, ReadBytesExt};
use std::convert::TryFrom;
use std::io::Read;
pub fn parse<R: Read>(input: &mut R) -> Result<(), Error> {
if String::read(input)? != NAR_VERSION_MAGIC {
return Err(Error::BadNarVersionMagic);
}
parse_file(input)
}
const NAR_VERSION_MAGIC: &str = "nix-archive-1";
fn parse_file<R: Read>(input: &mut R) -> Result<(), Error> {
if String::read(input)? != "(" {
return Err(Error::MissingNarOpenTag);
}
if String::read(input)? != "type" {
return Err(Error::MissingNarField);
}
match String::read(input)?.as_ref() {
"regular" => {
let mut _executable = false;
let mut tag = String::read(input)?;
if tag == "executable" {
_executable = true;
if String::read(input)? != "" {
return Err(Error::BadExecutableField);
}
tag = String::read(input)?;
}
if tag != "contents" {
return Err(Error::MissingNarField);
}
let _contents = Vec::<u8>::read(input)?;
if String::read(input)? != ")" {
return Err(Error::MissingNarCloseTag);
}
}
"directory" => loop {
match String::read(input)?.as_ref() {
"entry" => {
if String::read(input)? != "(" {
return Err(Error::MissingNarOpenTag);
}
if String::read(input)? != "name" {
return Err(Error::MissingNarField);
}
let _name = String::read(input)?;
if String::read(input)? != "node" {
return Err(Error::MissingNarField);
}
parse_file(input)?;
let tag = String::read(input)?;
if tag != ")" {
return Err(Error::MissingNarCloseTag);
}
}
")" => break,
s => return Err(Error::BadNarField(s.into())),
}
},
"symlink" => {
if String::read(input)? != "target" {
return Err(Error::MissingNarField);
}
let _target = String::read(input)?;
if String::read(input)? != ")" {
return Err(Error::MissingNarCloseTag);
}
}
s => return Err(Error::BadNarField(s.into())),
}
Ok(())
}
trait Deserialize: Sized {
fn read<R: Read>(input: &mut R) -> Result<Self, Error>;
}
impl Deserialize for String {
fn read<R: Read>(input: &mut R) -> Result<Self, Error> {
let buf = Deserialize::read(input)?;
Ok(String::from_utf8(buf).map_err(|_| Error::BadNarString)?)
}
}
impl Deserialize for Vec<u8> {
fn read<R: Read>(input: &mut R) -> Result<Self, Error> {
let n: usize = Deserialize::read(input)?;
let mut buf = vec![0; n];
input.read_exact(&mut buf)?;
skip_padding(input, n)?;
Ok(buf)
}
}
fn skip_padding<R: Read>(input: &mut R, len: usize) -> Result<(), Error> {
if len % 8 != 0 {
let mut buf = [0; 8];
let buf = &mut buf[0..8 - (len % 8)];
input.read_exact(buf)?;
if !buf.iter().all(|b| *b == 0) {
return Err(Error::BadNarPadding);
}
}
Ok(())
}
impl Deserialize for u64 {
fn read<R: Read>(input: &mut R) -> Result<Self, Error> {
Ok(input.read_u64::<LittleEndian>()?)
}
}
impl Deserialize for usize {
fn read<R: Read>(input: &mut R) -> Result<Self, Error> {
let n: u64 = Deserialize::read(input)?;
Ok(usize::try_from(n).map_err(|_| Error::NarSizeFieldTooBig)?)
}
}

View file

@ -1,48 +0,0 @@
use super::{PathInfo, Store, StorePath};
use crate::Error;
use hyper::client::Client;
pub struct BinaryCacheStore {
base_uri: String,
client: Client<hyper::client::HttpConnector, hyper::Body>,
}
impl BinaryCacheStore {
pub fn new(base_uri: String) -> Self {
Self {
base_uri,
client: Client::new(),
}
}
}
impl Store for BinaryCacheStore {
fn query_path_info(
&self,
path: &StorePath,
) -> std::pin::Pin<Box<dyn std::future::Future<Output = Result<PathInfo, Error>> + Send>> {
let uri = format!("{}/{}.narinfo", self.base_uri.clone(), path.hash);
let path = path.clone();
let client = self.client.clone();
let store_dir = self.store_dir().to_string();
Box::pin(async move {
let response = client.get(uri.parse::<hyper::Uri>().unwrap()).await?;
if response.status() == hyper::StatusCode::NOT_FOUND
|| response.status() == hyper::StatusCode::FORBIDDEN
{
return Err(Error::InvalidPath(path));
}
let mut body = response.into_body();
let mut bytes = Vec::new();
while let Some(next) = body.next().await {
bytes.extend(next?);
}
PathInfo::parse_nar_info(std::str::from_utf8(&bytes).unwrap(), &store_dir)
})
}
}

View file

@ -1,17 +0,0 @@
pub mod path;
#[cfg(unused)]
mod binary_cache_store;
#[cfg(unused)]
mod path_info;
#[cfg(unused)]
mod store;
pub use path::{StorePath, StorePathHash, StorePathName};
#[cfg(unused)]
pub use binary_cache_store::BinaryCacheStore;
#[cfg(unused)]
pub use path_info::PathInfo;
#[cfg(unused)]
pub use store::Store;

View file

@ -1,224 +0,0 @@
use crate::error::Error;
use crate::util::base32;
use std::fmt;
use std::path::Path;
#[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Debug)]
pub struct StorePath {
pub hash: StorePathHash,
pub name: StorePathName,
}
pub const STORE_PATH_HASH_BYTES: usize = 20;
pub const STORE_PATH_HASH_CHARS: usize = 32;
impl StorePath {
pub fn new(path: &Path, store_dir: &Path) -> Result<Self, Error> {
if path.parent() != Some(store_dir) {
return Err(Error::NotInStore(path.into()));
}
Self::new_from_base_name(
path.file_name()
.ok_or_else(|| Error::BadStorePath(path.into()))?
.to_str()
.ok_or_else(|| Error::BadStorePath(path.into()))?,
)
}
pub fn from_parts(hash: [u8; STORE_PATH_HASH_BYTES], name: &str) -> Result<Self, Error> {
Ok(StorePath {
hash: StorePathHash(hash),
name: StorePathName::new(name)?,
})
}
pub fn new_from_base_name(base_name: &str) -> Result<Self, Error> {
if base_name.len() < STORE_PATH_HASH_CHARS + 1
|| base_name.as_bytes()[STORE_PATH_HASH_CHARS] != b'-'
{
return Err(Error::BadStorePath(base_name.into()));
}
Ok(StorePath {
hash: StorePathHash::new(&base_name[0..STORE_PATH_HASH_CHARS])?,
name: StorePathName::new(&base_name[STORE_PATH_HASH_CHARS + 1..])?,
})
}
}
impl fmt::Display for StorePath {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{}-{}", self.hash, self.name)
}
}
#[derive(Clone, PartialEq, Eq, Debug)]
pub struct StorePathHash([u8; STORE_PATH_HASH_BYTES]);
impl StorePathHash {
pub fn new(s: &str) -> Result<Self, Error> {
assert_eq!(s.len(), STORE_PATH_HASH_CHARS);
let v = base32::decode(s)?;
assert_eq!(v.len(), STORE_PATH_HASH_BYTES);
let mut bytes: [u8; 20] = Default::default();
bytes.copy_from_slice(&v[0..STORE_PATH_HASH_BYTES]);
Ok(Self(bytes))
}
pub fn hash(&self) -> &[u8; STORE_PATH_HASH_BYTES] {
&self.0
}
}
impl fmt::Display for StorePathHash {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let mut buf = vec![0; STORE_PATH_HASH_CHARS];
base32::encode_into(&self.0, &mut buf);
f.write_str(std::str::from_utf8(&buf).unwrap())
}
}
impl Ord for StorePathHash {
fn cmp(&self, other: &Self) -> std::cmp::Ordering {
// Historically we've sorted store paths by their base32
// serialization, but our base32 encodes bytes in reverse
// order. So compare them in reverse order as well.
self.0.iter().rev().cmp(other.0.iter().rev())
}
}
impl PartialOrd for StorePathHash {
fn partial_cmp(&self, other: &Self) -> Option<std::cmp::Ordering> {
Some(self.cmp(other))
}
}
#[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Debug)]
pub struct StorePathName(String);
impl StorePathName {
pub fn new(s: &str) -> Result<Self, Error> {
if s.is_empty() {
return Err(Error::StorePathNameEmpty);
}
if s.len() > 211 {
return Err(Error::StorePathNameTooLong);
}
let is_good_path_name = s.chars().all(|c| {
c.is_ascii_alphabetic()
|| c.is_ascii_digit()
|| c == '+'
|| c == '-'
|| c == '.'
|| c == '_'
|| c == '?'
|| c == '='
});
if s.starts_with('.') || !is_good_path_name {
return Err(Error::BadStorePathName);
}
Ok(Self(s.to_string()))
}
pub fn name(&self) -> &str {
&self.0
}
}
impl fmt::Display for StorePathName {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.write_str(&self.0)
}
}
#[cfg(test)]
mod tests {
use super::*;
use assert_matches::assert_matches;
#[test]
fn test_parse() {
let s = "7h7qgvs4kgzsn8a6rb273saxyqh4jxlz-konsole-18.12.3";
let p = StorePath::new_from_base_name(&s).unwrap();
assert_eq!(p.name.0, "konsole-18.12.3");
assert_eq!(
p.hash.0,
[
0x9f, 0x76, 0x49, 0x20, 0xf6, 0x5d, 0xe9, 0x71, 0xc4, 0xca, 0x46, 0x21, 0xab, 0xff,
0x9b, 0x44, 0xef, 0x87, 0x0f, 0x3c
]
);
}
#[test]
fn test_no_name() {
let s = "7h7qgvs4kgzsn8a6rb273saxyqh4jxlz-";
assert_matches!(
StorePath::new_from_base_name(&s),
Err(Error::StorePathNameEmpty)
);
}
#[test]
fn test_no_dash() {
let s = "7h7qgvs4kgzsn8a6rb273saxyqh4jxlz";
assert_matches!(
StorePath::new_from_base_name(&s),
Err(Error::BadStorePath(_))
);
}
#[test]
fn test_short_hash() {
let s = "7h7qgvs4kgzsn8a6rb273saxyqh4jxl-konsole-18.12.3";
assert_matches!(
StorePath::new_from_base_name(&s),
Err(Error::BadStorePath(_))
);
}
#[test]
fn test_invalid_hash() {
let s = "7h7qgvs4kgzsn8e6rb273saxyqh4jxlz-konsole-18.12.3";
assert_matches!(StorePath::new_from_base_name(&s), Err(Error::BadBase32));
}
#[test]
fn test_long_name() {
let s = "7h7qgvs4kgzsn8a6rb273saxyqh4jxlz-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx";
assert_matches!(StorePath::new_from_base_name(&s), Ok(_));
}
#[test]
fn test_too_long_name() {
let s = "7h7qgvs4kgzsn8a6rb273saxyqh4jxlz-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx";
assert_matches!(
StorePath::new_from_base_name(&s),
Err(Error::StorePathNameTooLong)
);
}
#[test]
fn test_bad_name() {
let s = "7h7qgvs4kgzsn8a6rb273saxyqh4jxlz-foo bar";
assert_matches!(
StorePath::new_from_base_name(&s),
Err(Error::BadStorePathName)
);
let s = "7h7qgvs4kgzsn8a6rb273saxyqh4jxlz-kónsole";
assert_matches!(
StorePath::new_from_base_name(&s),
Err(Error::BadStorePathName)
);
}
#[test]
fn test_roundtrip() {
let s = "7h7qgvs4kgzsn8a6rb273saxyqh4jxlz-konsole-18.12.3";
assert_eq!(StorePath::new_from_base_name(&s).unwrap().to_string(), s);
}
}

View file

@ -1,70 +0,0 @@
use crate::store::StorePath;
use crate::Error;
use std::collections::BTreeSet;
#[derive(Clone, Debug)]
pub struct PathInfo {
pub path: StorePath,
pub references: BTreeSet<StorePath>,
pub nar_size: u64,
pub deriver: Option<StorePath>,
// Additional binary cache info.
pub url: Option<String>,
pub compression: Option<String>,
pub file_size: Option<u64>,
}
impl PathInfo {
pub fn parse_nar_info(nar_info: &str, store_dir: &str) -> Result<Self, Error> {
let mut path = None;
let mut references = BTreeSet::new();
let mut nar_size = None;
let mut deriver = None;
let mut url = None;
let mut compression = None;
let mut file_size = None;
for line in nar_info.lines() {
let colon = line.find(':').ok_or(Error::BadNarInfo)?;
let (name, value) = line.split_at(colon);
if !value.starts_with(": ") {
return Err(Error::BadNarInfo);
}
let value = &value[2..];
if name == "StorePath" {
path = Some(StorePath::new(std::path::Path::new(value), store_dir)?);
} else if name == "NarSize" {
nar_size = Some(u64::from_str_radix(value, 10).map_err(|_| Error::BadNarInfo)?);
} else if name == "References" {
if !value.is_empty() {
for r in value.split(' ') {
references.insert(StorePath::new_from_base_name(r)?);
}
}
} else if name == "Deriver" {
deriver = Some(StorePath::new_from_base_name(value)?);
} else if name == "URL" {
url = Some(value.into());
} else if name == "Compression" {
compression = Some(value.into());
} else if name == "FileSize" {
file_size = Some(u64::from_str_radix(value, 10).map_err(|_| Error::BadNarInfo)?);
}
}
Ok(PathInfo {
path: path.ok_or(Error::BadNarInfo)?,
references,
nar_size: nar_size.ok_or(Error::BadNarInfo)?,
deriver,
url: Some(url.ok_or(Error::BadNarInfo)?),
compression,
file_size,
})
}
}

View file

@ -1,53 +0,0 @@
use super::{PathInfo, StorePath};
use crate::Error;
use std::collections::{BTreeMap, BTreeSet};
use std::path::Path;
pub trait Store: Send + Sync {
fn store_dir(&self) -> &str {
"/nix/store"
}
fn query_path_info(
&self,
store_path: &StorePath,
) -> std::pin::Pin<Box<dyn std::future::Future<Output = Result<PathInfo, Error>> + Send>>;
}
impl dyn Store {
pub fn parse_store_path(&self, path: &Path) -> Result<StorePath, Error> {
StorePath::new(path, self.store_dir())
}
pub async fn compute_path_closure(
&self,
roots: BTreeSet<StorePath>,
) -> Result<BTreeMap<StorePath, PathInfo>, Error> {
let mut done = BTreeSet::new();
let mut result = BTreeMap::new();
let mut pending = vec![];
for root in roots {
pending.push(self.query_path_info(&root));
done.insert(root);
}
while !pending.is_empty() {
let (info, _, remaining) = futures::future::select_all(pending).await;
pending = remaining;
let info = info?;
for path in &info.references {
if !done.contains(path) {
pending.push(self.query_path_info(&path));
done.insert(path.clone());
}
}
result.insert(info.path.clone(), info);
}
Ok(result)
}
}

View file

@ -1,160 +0,0 @@
use crate::error::Error;
use lazy_static::lazy_static;
pub fn encoded_len(input_len: usize) -> usize {
if input_len == 0 {
0
} else {
(input_len * 8 - 1) / 5 + 1
}
}
pub fn decoded_len(input_len: usize) -> usize {
input_len * 5 / 8
}
static BASE32_CHARS: &[u8; 32] = &b"0123456789abcdfghijklmnpqrsvwxyz";
lazy_static! {
static ref BASE32_CHARS_REVERSE: Box<[u8; 256]> = {
let mut xs = [0xffu8; 256];
for (n, c) in BASE32_CHARS.iter().enumerate() {
xs[*c as usize] = n as u8;
}
Box::new(xs)
};
}
pub fn encode(input: &[u8]) -> String {
let mut buf = vec![0; encoded_len(input.len())];
encode_into(input, &mut buf);
std::str::from_utf8(&buf).unwrap().to_string()
}
pub fn encode_into(input: &[u8], output: &mut [u8]) {
let len = encoded_len(input.len());
assert_eq!(len, output.len());
let mut nr_bits_left: usize = 0;
let mut bits_left: u16 = 0;
let mut pos = len;
for b in input {
bits_left |= (*b as u16) << nr_bits_left;
nr_bits_left += 8;
while nr_bits_left > 5 {
output[pos - 1] = BASE32_CHARS[(bits_left & 0x1f) as usize];
pos -= 1;
bits_left >>= 5;
nr_bits_left -= 5;
}
}
if nr_bits_left > 0 {
output[pos - 1] = BASE32_CHARS[(bits_left & 0x1f) as usize];
pos -= 1;
}
assert_eq!(pos, 0);
}
pub fn decode(input: &str) -> Result<Vec<u8>, crate::Error> {
let mut res = Vec::with_capacity(decoded_len(input.len()));
let mut nr_bits_left: usize = 0;
let mut bits_left: u16 = 0;
for c in input.chars().rev() {
let b = BASE32_CHARS_REVERSE[c as usize];
if b == 0xff {
return Err(Error::BadBase32);
}
bits_left |= (b as u16) << nr_bits_left;
nr_bits_left += 5;
if nr_bits_left >= 8 {
res.push((bits_left & 0xff) as u8);
bits_left >>= 8;
nr_bits_left -= 8;
}
}
if nr_bits_left > 0 && bits_left != 0 {
return Err(Error::BadBase32);
}
Ok(res)
}
#[cfg(test)]
mod tests {
use super::*;
use assert_matches::assert_matches;
use hex;
use proptest::proptest;
#[test]
fn test_encode() {
assert_eq!(encode(&[]), "");
assert_eq!(
encode(&hex::decode("0839703786356bca59b0f4a32987eb2e6de43ae8").unwrap()),
"x0xf8v9fxf3jk8zln1cwlsrmhqvp0f88"
);
assert_eq!(
encode(
&hex::decode("ba7816bf8f01cfea414140de5dae2223b00361a396177a9cb410ff61f20015ad")
.unwrap()
),
"1b8m03r63zqhnjf7l5wnldhh7c134ap5vpj0850ymkq1iyzicy5s"
);
assert_eq!(
encode(
&hex::decode("ddaf35a193617abacc417349ae20413112e6fa4e89a97ea20a9eeee64b55d39a2192992a274fc1a836ba3c23a3feebbd454d4423643ce80e2a9ac94fa54ca49f")
.unwrap()
),
"2gs8k559z4rlahfx0y688s49m2vvszylcikrfinm30ly9rak69236nkam5ydvly1ai7xac99vxfc4ii84hawjbk876blyk1jfhkbbyx"
);
}
#[test]
fn test_decode() {
assert_eq!(hex::encode(decode("").unwrap()), "");
assert_eq!(
hex::encode(decode("x0xf8v9fxf3jk8zln1cwlsrmhqvp0f88").unwrap()),
"0839703786356bca59b0f4a32987eb2e6de43ae8"
);
assert_eq!(
hex::encode(decode("1b8m03r63zqhnjf7l5wnldhh7c134ap5vpj0850ymkq1iyzicy5s").unwrap()),
"ba7816bf8f01cfea414140de5dae2223b00361a396177a9cb410ff61f20015ad"
);
assert_eq!(
hex::encode(decode("2gs8k559z4rlahfx0y688s49m2vvszylcikrfinm30ly9rak69236nkam5ydvly1ai7xac99vxfc4ii84hawjbk876blyk1jfhkbbyx").unwrap()),
"ddaf35a193617abacc417349ae20413112e6fa4e89a97ea20a9eeee64b55d39a2192992a274fc1a836ba3c23a3feebbd454d4423643ce80e2a9ac94fa54ca49f"
);
assert_matches!(
decode("xoxf8v9fxf3jk8zln1cwlsrmhqvp0f88"),
Err(Error::BadBase32)
);
assert_matches!(
decode("2b8m03r63zqhnjf7l5wnldhh7c134ap5vpj0850ymkq1iyzicy5s"),
Err(Error::BadBase32)
);
assert_matches!(decode("2"), Err(Error::BadBase32));
assert_matches!(decode("2gs"), Err(Error::BadBase32));
assert_matches!(decode("2gs8"), Err(Error::BadBase32));
}
proptest! {
#[test]
fn roundtrip(s: Vec<u8>) {
assert_eq!(s, decode(&encode(&s)).unwrap());
}
}
}

View file

@ -1 +0,0 @@
pub mod base32;

View file

@ -41,7 +41,7 @@ perlarchname=$($perl -e 'use Config; print $Config{archname};')
AC_SUBST(perllibdir, [${libdir}/perl5/site_perl/$perlversion/$perlarchname]) AC_SUBST(perllibdir, [${libdir}/perl5/site_perl/$perlversion/$perlarchname])
AC_MSG_RESULT($perllibdir) AC_MSG_RESULT($perllibdir)
# Look for libsodium, an optional dependency. # Look for libsodium.
PKG_CHECK_MODULES([SODIUM], [libsodium], [CXXFLAGS="$SODIUM_CFLAGS $CXXFLAGS"]) PKG_CHECK_MODULES([SODIUM], [libsodium], [CXXFLAGS="$SODIUM_CFLAGS $CXXFLAGS"])
# Check for the required Perl dependencies (DBI and DBD::SQLite). # Check for the required Perl dependencies (DBI and DBD::SQLite).

View file

@ -1,6 +1,7 @@
package Nix::Config; package Nix::Config;
use MIME::Base64; use MIME::Base64;
use Nix::Store;
$version = "@PACKAGE_VERSION@"; $version = "@PACKAGE_VERSION@";

View file

@ -240,7 +240,7 @@ SV * convertHash(char * algo, char * s, int toBase32)
PPCODE: PPCODE:
try { try {
auto h = Hash::parseAny(s, parseHashType(algo)); auto h = Hash::parseAny(s, parseHashType(algo));
string s = h.to_string(toBase32 ? Base32 : Base16, false); auto s = h.to_string(toBase32 ? Base32 : Base16, false);
XPUSHs(sv_2mortal(newSVpv(s.c_str(), 0))); XPUSHs(sv_2mortal(newSVpv(s.c_str(), 0)));
} catch (Error & e) { } catch (Error & e) {
croak("%s", e.what()); croak("%s", e.what());

View file

@ -0,0 +1,33 @@
#!/usr/bin/env bash
set -euo pipefail
# set -x
# mapfile BUILDS_FOR_LATEST_EVAL < <(
# curl -H 'Accept: application/json' https://hydra.nixos.org/jobset/nix/master/evals | \
# jq -r '.evals[0].builds[] | @sh')
BUILDS_FOR_LATEST_EVAL=$(
curl -sS -H 'Accept: application/json' https://hydra.nixos.org/jobset/nix/master/evals | \
jq -r '.evals[0].builds[]')
someBuildFailed=0
for buildId in $BUILDS_FOR_LATEST_EVAL; do
buildInfo=$(curl -sS -H 'Accept: application/json' "https://hydra.nixos.org/build/$buildId")
finished=$(echo "$buildInfo" | jq -r '.finished')
if [[ $finished = 0 ]]; then
continue
fi
buildStatus=$(echo "$buildInfo" | jq -r '.buildstatus')
if [[ $buildStatus != 0 ]]; then
someBuildFailed=1
echo "Job “$(echo "$buildInfo" | jq -r '.job')” failed on hydra: $buildInfo"
fi
done
exit "$someBuildFailed"

View file

@ -246,6 +246,7 @@ get_volume_pass() {
verify_volume_pass() { verify_volume_pass() {
local volume_special="$1" # (i.e., disk1s7) local volume_special="$1" # (i.e., disk1s7)
local volume_uuid="$2" local volume_uuid="$2"
_sudo "to confirm the password actually unlocks the volume" \
/usr/sbin/diskutil apfs unlockVolume "$volume_special" -verify -stdinpassphrase -user "$volume_uuid" /usr/sbin/diskutil apfs unlockVolume "$volume_special" -verify -stdinpassphrase -user "$volume_uuid"
} }
@ -440,7 +441,22 @@ add_nix_vol_fstab_line() {
# shellcheck disable=SC1003,SC2026 # shellcheck disable=SC1003,SC2026
local escaped_mountpoint="${NIX_ROOT/ /'\\\'040}" local escaped_mountpoint="${NIX_ROOT/ /'\\\'040}"
shift shift
EDITOR="/usr/bin/ex" _sudo "to add nix to fstab" "$@" <<EOF
# wrap `ex` to work around a problem with vim plugins breaking exit codes;
# (see https://github.com/NixOS/nix/issues/5468)
# we'd prefer EDITOR="/usr/bin/ex --noplugin" but vifs doesn't word-split
# the EDITOR env.
#
# TODO: at some point we should switch to `--clean`, but it wasn't added
# until https://github.com/vim/vim/releases/tag/v8.0.1554 while the macOS
# minver 10.12.6 seems to have released with vim 7.4
cat > "$SCRATCH/ex_cleanroom_wrapper" <<EOF
#!/bin/sh
/usr/bin/ex --noplugin "\$@"
EOF
chmod 755 "$SCRATCH/ex_cleanroom_wrapper"
EDITOR="$SCRATCH/ex_cleanroom_wrapper" _sudo "to add nix to fstab" "$@" <<EOF
:a :a
UUID=$uuid $escaped_mountpoint apfs rw,noauto,nobrowse,suid,owners UUID=$uuid $escaped_mountpoint apfs rw,noauto,nobrowse,suid,owners
. .
@ -631,7 +647,7 @@ EOF
# technically /etc/synthetic.d/nix is supported in Big Sur+ # technically /etc/synthetic.d/nix is supported in Big Sur+
# but handling both takes even more code... # but handling both takes even more code...
_sudo "to add Nix to /etc/synthetic.conf" \ _sudo "to add Nix to /etc/synthetic.conf" \
/usr/bin/ex /etc/synthetic.conf <<EOF /usr/bin/ex --noplugin /etc/synthetic.conf <<EOF
:a :a
${NIX_ROOT:1} ${NIX_ROOT:1}
. .
@ -670,11 +686,15 @@ encrypt_volume() {
local volume_uuid="$1" local volume_uuid="$1"
local volume_label="$2" local volume_label="$2"
local password local password
task "Encrypt the Nix volume" >&2
# Note: mount/unmount are late additions to support the right order # Note: mount/unmount are late additions to support the right order
# of operations for creating the volume and then baking its uuid into # of operations for creating the volume and then baking its uuid into
# other artifacts; not as well-trod wrt to potential errors, race # other artifacts; not as well-trod wrt to potential errors, race
# conditions, etc. # conditions, etc.
_sudo "to mount your Nix volume for encrypting" \
/usr/sbin/diskutil mount "$volume_label" /usr/sbin/diskutil mount "$volume_label"
password="$(/usr/bin/xxd -l 32 -p -c 256 /dev/random)" password="$(/usr/bin/xxd -l 32 -p -c 256 /dev/random)"
@ -682,9 +702,10 @@ encrypt_volume() {
/usr/bin/security -i <<EOF /usr/bin/security -i <<EOF
add-generic-password -a "$volume_label" -s "$volume_uuid" -l "$volume_label encryption password" -D "Encrypted volume password" -j "Added automatically by the Nix installer for use by $NIX_VOLUME_MOUNTD_DEST" -w "$password" -T /System/Library/CoreServices/APFSUserAgent -T /System/Library/CoreServices/CSUserAgent -T /usr/bin/security "/Library/Keychains/System.keychain" add-generic-password -a "$volume_label" -s "$volume_uuid" -l "$volume_label encryption password" -D "Encrypted volume password" -j "Added automatically by the Nix installer for use by $NIX_VOLUME_MOUNTD_DEST" -w "$password" -T /System/Library/CoreServices/APFSUserAgent -T /System/Library/CoreServices/CSUserAgent -T /usr/bin/security "/Library/Keychains/System.keychain"
EOF EOF
builtin printf "%s" "$password" | _sudo "to encrypt your Nix volume" \ builtin printf "%s" "$password" | _sudo "to actually encrypt your Nix volume" \
/usr/sbin/diskutil apfs encryptVolume "$volume_label" -user disk -stdinpassphrase /usr/sbin/diskutil apfs encryptVolume "$volume_label" -user disk -stdinpassphrase
_sudo "to unmount the encrypted volume" \
/usr/sbin/diskutil unmount force "$volume_label" /usr/sbin/diskutil unmount force "$volume_label"
} }
@ -742,6 +763,9 @@ setup_volume() {
use_special="${NIX_VOLUME_USE_SPECIAL:-$(create_volume)}" use_special="${NIX_VOLUME_USE_SPECIAL:-$(create_volume)}"
_sudo "to ensure the Nix volume is not mounted" \
/usr/sbin/diskutil unmount force "$use_special" || true # might not be mounted
use_uuid=${NIX_VOLUME_USE_UUID:-$(volume_uuid_from_special "$use_special")} use_uuid=${NIX_VOLUME_USE_UUID:-$(volume_uuid_from_special "$use_special")}
setup_fstab "$use_uuid" setup_fstab "$use_uuid"
@ -791,7 +815,7 @@ setup_volume_daemon() {
local volume_uuid="$2" local volume_uuid="$2"
if ! test_voldaemon; then if ! test_voldaemon; then
task "Configuring LaunchDaemon to mount '$NIX_VOLUME_LABEL'" >&2 task "Configuring LaunchDaemon to mount '$NIX_VOLUME_LABEL'" >&2
_sudo "to install the Nix volume mounter" /usr/bin/ex "$NIX_VOLUME_MOUNTD_DEST" <<EOF _sudo "to install the Nix volume mounter" /usr/bin/ex --noplugin "$NIX_VOLUME_MOUNTD_DEST" <<EOF
:a :a
$(generate_mount_daemon "$cmd_type" "$volume_uuid") $(generate_mount_daemon "$cmd_type" "$volume_uuid")
. .

View file

@ -78,7 +78,7 @@ poly_service_installed_check() {
poly_service_uninstall_directions() { poly_service_uninstall_directions() {
echo "$1. Remove macOS-specific components:" echo "$1. Remove macOS-specific components:"
if should_create_volume && test_nix_volume_mountd_installed; then if should_create_volume && test_nix_volume_mountd_installed; then
darwin_volume_uninstall_directions nix_volume_mountd_uninstall_directions
fi fi
if test_nix_daemon_installed; then if test_nix_daemon_installed; then
nix_daemon_uninstall_directions nix_daemon_uninstall_directions
@ -218,7 +218,7 @@ EOF
setup_darwin_volume setup_darwin_volume
fi fi
if [ "$(diskutil info -plist /nix | xmllint --xpath "(/plist/dict/key[text()='GlobalPermissionsEnabled'])/following-sibling::*[1]" -)" = "<false/>" ]; then if [ "$(/usr/sbin/diskutil info -plist /nix | xmllint --xpath "(/plist/dict/key[text()='GlobalPermissionsEnabled'])/following-sibling::*[1]" -)" = "<false/>" ]; then
failure "This script needs a /nix volume with global permissions! This may require running sudo diskutil enableOwnership /nix." failure "This script needs a /nix volume with global permissions! This may require running sudo /usr/sbin/diskutil enableOwnership /nix."
fi fi
} }

View file

@ -23,10 +23,10 @@ readonly RED='\033[31m'
# installer allows overriding build user count to speed up installation # installer allows overriding build user count to speed up installation
# as creating each user takes non-trivial amount of time on macos # as creating each user takes non-trivial amount of time on macos
readonly NIX_USER_COUNT=${NIX_USER_COUNT:-32} readonly NIX_USER_COUNT=${NIX_USER_COUNT:-32}
readonly NIX_BUILD_GROUP_ID="30000" readonly NIX_BUILD_GROUP_ID="${NIX_BUILD_GROUP_ID:-30000}"
readonly NIX_BUILD_GROUP_NAME="nixbld" readonly NIX_BUILD_GROUP_NAME="nixbld"
# darwin installer needs to override these # darwin installer needs to override these
NIX_FIRST_BUILD_UID="30001" NIX_FIRST_BUILD_UID="${NIX_FIRST_BUILD_UID:-30001}"
NIX_BUILD_USER_NAME_TEMPLATE="nixbld%d" NIX_BUILD_USER_NAME_TEMPLATE="nixbld%d"
# Please don't change this. We don't support it, because the # Please don't change this. We don't support it, because the
# default shell profile that comes with Nix doesn't support it. # default shell profile that comes with Nix doesn't support it.
@ -377,6 +377,11 @@ cure_artifacts() {
} }
validate_starting_assumptions() { validate_starting_assumptions() {
task "Checking for artifacts of previous installs"
cat <<EOF
Before I try to install, I'll check for signs Nix already is or has
been installed on this system.
EOF
if type nix-env 2> /dev/null >&2; then if type nix-env 2> /dev/null >&2; then
warning <<EOF warning <<EOF
Nix already appears to be installed. This installer may run into issues. Nix already appears to be installed. This installer may run into issues.
@ -386,20 +391,34 @@ $(uninstall_directions)
EOF EOF
fi fi
# TODO: I think it would be good for this step to accumulate more
# knowledge of older obsolete artifacts, if there are any.
# We could issue a "reminder" here that the user might want
# to clean them up?
for profile_target in "${PROFILE_TARGETS[@]}"; do for profile_target in "${PROFILE_TARGETS[@]}"; do
# TODO: I think it would be good to accumulate a list of all
# of the copies so that people don't hit this 2 or 3x in
# a row for different files.
if [ -e "$profile_target$PROFILE_BACKUP_SUFFIX" ]; then if [ -e "$profile_target$PROFILE_BACKUP_SUFFIX" ]; then
# this backup process first released in Nix 2.1
failure <<EOF failure <<EOF
When this script runs, it backs up the current $profile_target to I back up shell profile/rc scripts before I add Nix to them.
$profile_target$PROFILE_BACKUP_SUFFIX. This backup file already exists, though. I need to back up $profile_target to $profile_target$PROFILE_BACKUP_SUFFIX,
but the latter already exists.
Please follow these instructions to clean up the old backup file: Here's how to clean up the old backup file:
1. Copy $profile_target and $profile_target$PROFILE_BACKUP_SUFFIX to another place, just 1. Back up (copy) $profile_target and $profile_target$PROFILE_BACKUP_SUFFIX
in case. to another location, just in case.
2. Take care to make sure that $profile_target$PROFILE_BACKUP_SUFFIX doesn't look like 2. Ensure $profile_target$PROFILE_BACKUP_SUFFIX does not have anything
it has anything nix-related in it. If it does, something is probably Nix-related in it. If it does, something is probably quite
quite wrong. Please open an issue or get in touch immediately. wrong. Please open an issue or get in touch immediately.
3. Once you confirm $profile_target is backed up and
$profile_target$PROFILE_BACKUP_SUFFIX doesn't mention Nix, run:
mv $profile_target$PROFILE_BACKUP_SUFFIX $profile_target
EOF EOF
fi fi
done done
@ -557,21 +576,40 @@ create_directories() {
# since this bit is cross-platform: # since this bit is cross-platform:
# - first try with `command -vp` to try and find # - first try with `command -vp` to try and find
# chown in the usual places # chown in the usual places
# * to work around some sort of deficiency in
# `command -p` in macOS bash 3.2, we also add
# PATH="$(getconf PATH 2>/dev/null)". As long as
# getconf is found, this should set a sane PATH
# which `command -p` in bash 3.2 appears to use.
# A bash with a properly-working `command -p`
# should ignore this hard-set PATH in favor of
# whatever it obtains internally. See
# github.com/NixOS/nix/issues/5768
# - fall back on `command -v` which would find # - fall back on `command -v` which would find
# any chown on path # any chown on path
# if we don't find one, the command is already # if we don't find one, the command is already
# hiding behind || true, and the general state # hiding behind || true, and the general state
# should be one the user can repair once they # should be one the user can repair once they
# figure out where chown is... # figure out where chown is...
local get_chr_own="$(command -vp chown)" local get_chr_own="$(PATH="$(getconf PATH 2>/dev/null)" command -vp chown)"
if [[ -z "$get_chr_own" ]]; then if [[ -z "$get_chr_own" ]]; then
get_chr_own="$(command -v chown)" get_chr_own="$(command -v chown)"
fi fi
if [[ -z "$get_chr_own" ]]; then
reminder <<EOF
I wanted to take root ownership of existing Nix store files,
but I couldn't locate 'chown'. (You may need to fix your PATH.)
To manually change file ownership, you can run:
sudo chown -R 'root:$NIX_BUILD_GROUP_NAME' '$NIX_ROOT'
EOF
else
_sudo "to take root ownership of existing Nix store files" \ _sudo "to take root ownership of existing Nix store files" \
"$get_chr_own" -R "root:$NIX_BUILD_GROUP_NAME" "$NIX_ROOT" || true "$get_chr_own" -R "root:$NIX_BUILD_GROUP_NAME" "$NIX_ROOT" || true
fi fi
fi
_sudo "to make the basic directory structure of Nix (part 1)" \ _sudo "to make the basic directory structure of Nix (part 1)" \
install -dv -m 0755 /nix /nix/var /nix/var/log /nix/var/log/nix /nix/var/log/nix/drvs /nix/var/nix{,/db,/gcroots,/profiles,/temproots,/userpool} /nix/var/nix/{gcroots,profiles}/per-user install -dv -m 0755 /nix /nix/var /nix/var/log /nix/var/log/nix /nix/var/log/nix/drvs /nix/var/nix{,/db,/gcroots,/profiles,/temproots,/userpool,/daemon-socket} /nix/var/nix/{gcroots,profiles}/per-user
_sudo "to make the basic directory structure of Nix (part 2)" \ _sudo "to make the basic directory structure of Nix (part 2)" \
install -dv -g "$NIX_BUILD_GROUP_NAME" -m 1775 /nix/store install -dv -g "$NIX_BUILD_GROUP_NAME" -m 1775 /nix/store
@ -599,7 +637,7 @@ manager. This will happen in a few stages:
1. Make sure your computer doesn't already have Nix. If it does, I 1. Make sure your computer doesn't already have Nix. If it does, I
will show you instructions on how to clean up your old install. will show you instructions on how to clean up your old install.
2. Show you what we are going to install and where. Then we will ask 2. Show you what I am going to install and where. Then I will ask
if you are ready to continue. if you are ready to continue.
3. Create the system users and groups that the Nix daemon uses to run 3. Create the system users and groups that the Nix daemon uses to run
@ -614,14 +652,14 @@ manager. This will happen in a few stages:
EOF EOF
if ui_confirm "Would you like to see a more detailed list of what we will do?"; then if ui_confirm "Would you like to see a more detailed list of what I will do?"; then
cat <<EOF cat <<EOF
We will: I will:
- make sure your computer doesn't already have Nix files - make sure your computer doesn't already have Nix files
(if it does, I will tell you how to clean them up.) (if it does, I will tell you how to clean them up.)
- create local users (see the list above for the users we'll make) - create local users (see the list above for the users I'll make)
- create a local group ($NIX_BUILD_GROUP_NAME) - create a local group ($NIX_BUILD_GROUP_NAME)
- install Nix in to $NIX_ROOT - install Nix in to $NIX_ROOT
- create a configuration file in /etc/nix - create a configuration file in /etc/nix
@ -656,7 +694,7 @@ run in a headless fashion, like this:
$ curl -L https://nixos.org/nix/install | sh $ curl -L https://nixos.org/nix/install | sh
or maybe in a CI pipeline. Because of that, we're going to skip the or maybe in a CI pipeline. Because of that, I'm going to skip the
verbose output in the interest of brevity. verbose output in the interest of brevity.
If you would like to If you would like to
@ -670,7 +708,7 @@ EOF
fi fi
cat <<EOF cat <<EOF
This script is going to call sudo a lot. Every time we do, it'll This script is going to call sudo a lot. Every time I do, it'll
output exactly what it'll do, and why. output exactly what it'll do, and why.
Just like this: Just like this:
@ -682,15 +720,15 @@ EOF
cat <<EOF cat <<EOF
This might look scary, but everything can be undone by running just a This might look scary, but everything can be undone by running just a
few commands. We used to ask you to confirm each time sudo ran, but it few commands. I used to ask you to confirm each time sudo ran, but it
was too many times. Instead, I'll just ask you this one time: was too many times. Instead, I'll just ask you this one time:
EOF EOF
if ui_confirm "Can we use sudo?"; then if ui_confirm "Can I use sudo?"; then
ok "Yay! Thanks! Let's get going!" ok "Yay! Thanks! Let's get going!"
else else
failure <<EOF failure <<EOF
That is okay, but we can't install. That is okay, but I can't install.
EOF EOF
fi fi
} }
@ -809,10 +847,10 @@ main() {
# can fail faster in this case. Sourcing install-darwin... now runs # can fail faster in this case. Sourcing install-darwin... now runs
# `touch /` to detect Read-only root, but it could update times on # `touch /` to detect Read-only root, but it could update times on
# pre-Catalina macOS if run as root user. # pre-Catalina macOS if run as root user.
if [ $EUID -eq 0 ]; then if [ "$EUID" -eq 0 ]; then
failure <<EOF failure <<EOF
Please do not run this script with root privileges. We will call sudo Please do not run this script with root privileges. I will call sudo
when we need to. when I need to.
EOF EOF
fi fi

View file

@ -38,7 +38,7 @@ fi
# Determine if we could use the multi-user installer or not # Determine if we could use the multi-user installer or not
if [ "$(uname -s)" = "Linux" ]; then if [ "$(uname -s)" = "Linux" ]; then
echo "Note: a multi-user installation is possible. See https://nixos.org/nix/manual/#sect-multi-user-installation" >&2 echo "Note: a multi-user installation is possible. See https://nixos.org/manual/nix/stable/installation/installing-binary.html#multi-user-installation" >&2
fi fi
case "$(uname -s)" in case "$(uname -s)" in
@ -98,7 +98,7 @@ while [ $# -gt 0 ]; do
echo " providing multi-user support and better isolation for local builds." echo " providing multi-user support and better isolation for local builds."
echo " Both for security and reproducibility, this method is recommended if" echo " Both for security and reproducibility, this method is recommended if"
echo " supported on your platform." echo " supported on your platform."
echo " See https://nixos.org/nix/manual/#sect-multi-user-installation" echo " See https://nixos.org/manual/nix/stable/installation/installing-binary.html#multi-user-installation"
echo "" echo ""
echo " --no-daemon: Simple, single-user installation that does not require root and is" echo " --no-daemon: Simple, single-user installation that does not require root and is"
echo " trivial to uninstall." echo " trivial to uninstall."
@ -134,7 +134,7 @@ fi
echo "performing a single-user installation of Nix..." >&2 echo "performing a single-user installation of Nix..." >&2
if ! [ -e $dest ]; then if ! [ -e "$dest" ]; then
cmd="mkdir -m 0755 $dest && chown $USER $dest" cmd="mkdir -m 0755 $dest && chown $USER $dest"
echo "directory $dest does not exist; creating it by running '$cmd' using sudo" >&2 echo "directory $dest does not exist; creating it by running '$cmd' using sudo" >&2
if ! sudo sh -c "$cmd"; then if ! sudo sh -c "$cmd"; then
@ -143,12 +143,12 @@ if ! [ -e $dest ]; then
fi fi
fi fi
if ! [ -w $dest ]; then if ! [ -w "$dest" ]; then
echo "$0: directory $dest exists, but is not writable by you. This could indicate that another user has already performed a single-user installation of Nix on this system. If you wish to enable multi-user support see https://nixos.org/nix/manual/#ssec-multi-user. If you wish to continue with a single-user install for $USER please run 'chown -R $USER $dest' as root." >&2 echo "$0: directory $dest exists, but is not writable by you. This could indicate that another user has already performed a single-user installation of Nix on this system. If you wish to enable multi-user support see https://nixos.org/manual/nix/stable/installation/multi-user.html. If you wish to continue with a single-user install for $USER please run 'chown -R $USER $dest' as root." >&2
exit 1 exit 1
fi fi
mkdir -p $dest/store mkdir -p "$dest/store"
printf "copying Nix to %s..." "${dest}/store" >&2 printf "copying Nix to %s..." "${dest}/store" >&2
# Insert a newline if no progress is shown. # Insert a newline if no progress is shown.
@ -189,17 +189,17 @@ fi
# Install an SSL certificate bundle. # Install an SSL certificate bundle.
if [ -z "$NIX_SSL_CERT_FILE" ] || ! [ -f "$NIX_SSL_CERT_FILE" ]; then if [ -z "$NIX_SSL_CERT_FILE" ] || ! [ -f "$NIX_SSL_CERT_FILE" ]; then
$nix/bin/nix-env -i "$cacert" "$nix/bin/nix-env" -i "$cacert"
export NIX_SSL_CERT_FILE="$HOME/.nix-profile/etc/ssl/certs/ca-bundle.crt" export NIX_SSL_CERT_FILE="$HOME/.nix-profile/etc/ssl/certs/ca-bundle.crt"
fi fi
# Subscribe the user to the Nixpkgs channel and fetch it. # Subscribe the user to the Nixpkgs channel and fetch it.
if [ -z "$NIX_INSTALLER_NO_CHANNEL_ADD" ]; then if [ -z "$NIX_INSTALLER_NO_CHANNEL_ADD" ]; then
if ! $nix/bin/nix-channel --list | grep -q "^nixpkgs "; then if ! "$nix/bin/nix-channel" --list | grep -q "^nixpkgs "; then
$nix/bin/nix-channel --add https://nixos.org/channels/nixpkgs-unstable "$nix/bin/nix-channel" --add https://nixos.org/channels/nixpkgs-unstable
fi fi
if [ -z "$_NIX_INSTALLER_TEST" ]; then if [ -z "$_NIX_INSTALLER_TEST" ]; then
if ! $nix/bin/nix-channel --update nixpkgs; then if ! "$nix/bin/nix-channel" --update nixpkgs; then
echo "Fetching the nixpkgs channel failed. (Are you offline?)" echo "Fetching the nixpkgs channel failed. (Are you offline?)"
echo "To try again later, run \"nix-channel --update nixpkgs\"." echo "To try again later, run \"nix-channel --update nixpkgs\"."
fi fi
@ -215,7 +215,7 @@ if [ -z "$NIX_INSTALLER_NO_MODIFY_PROFILE" ]; then
if [ -w "$fn" ]; then if [ -w "$fn" ]; then
if ! grep -q "$p" "$fn"; then if ! grep -q "$p" "$fn"; then
echo "modifying $fn..." >&2 echo "modifying $fn..." >&2
echo -e "\nif [ -e $p ]; then . $p; fi # added by Nix installer" >> "$fn" printf '\nif [ -e %s ]; then . %s; fi # added by Nix installer\n' "$p" "$p" >> "$fn"
fi fi
added=1 added=1
break break
@ -226,7 +226,7 @@ if [ -z "$NIX_INSTALLER_NO_MODIFY_PROFILE" ]; then
if [ -w "$fn" ]; then if [ -w "$fn" ]; then
if ! grep -q "$p" "$fn"; then if ! grep -q "$p" "$fn"; then
echo "modifying $fn..." >&2 echo "modifying $fn..." >&2
echo -e "\nif [ -e $p ]; then . $p; fi # added by Nix installer" >> "$fn" printf '\nif [ -e %s ]; then . %s; fi # added by Nix installer\n' "$p" "$p" >> "$fn"
fi fi
added=1 added=1
break break

View file

@ -15,7 +15,7 @@ readonly SERVICE_OVERRIDE=${SERVICE_DEST}.d/override.conf
create_systemd_override() { create_systemd_override() {
header "Configuring proxy for the nix-daemon service" header "Configuring proxy for the nix-daemon service"
_sudo "create directory for systemd unit override" mkdir -p "$(dirname $SERVICE_OVERRIDE)" _sudo "create directory for systemd unit override" mkdir -p "$(dirname "$SERVICE_OVERRIDE")"
cat <<EOF | _sudo "create systemd unit override" tee "$SERVICE_OVERRIDE" cat <<EOF | _sudo "create systemd unit override" tee "$SERVICE_OVERRIDE"
[Service] [Service]
$1 $1

View file

@ -81,10 +81,10 @@ if [ "$(uname -s)" != "Darwin" ]; then
require_util xz "unpack the binary tarball" require_util xz "unpack the binary tarball"
fi fi
if command -v wget > /dev/null 2>&1; then if command -v curl > /dev/null 2>&1; then
fetch() { wget "$1" -O "$2"; }
elif command -v curl > /dev/null 2>&1; then
fetch() { curl -L "$1" -o "$2"; } fetch() { curl -L "$1" -o "$2"; }
elif command -v wget > /dev/null 2>&1; then
fetch() { wget "$1" -O "$2"; }
else else
oops "you don't have wget or curl installed, which I need to download the binary tarball" oops "you don't have wget or curl installed, which I need to download the binary tarball"
fi fi

View file

@ -1,7 +1,5 @@
nix_noinst_scripts := \ nix_noinst_scripts := \
$(d)/nix-http-export.cgi \ $(d)/nix-profile.sh
$(d)/nix-profile.sh \
$(d)/nix-reduce-build
noinst-scripts += $(nix_noinst_scripts) noinst-scripts += $(nix_noinst_scripts)

View file

@ -1,51 +0,0 @@
#! /bin/sh
export HOME=/tmp
export NIX_REMOTE=daemon
TMP_DIR="${TMP_DIR:-/tmp/nix-export}"
@coreutils@/mkdir -p "$TMP_DIR" || true
@coreutils@/chmod a+r "$TMP_DIR"
needed_path="?$QUERY_STRING"
needed_path="${needed_path#*[?&]needed_path=}"
needed_path="${needed_path%%&*}"
#needed_path="$(echo $needed_path | ./unhttp)"
needed_path="${needed_path//%2B/+}"
needed_path="${needed_path//%3D/=}"
echo needed_path: "$needed_path" >&2
NIX_STORE="${NIX_STORE_DIR:-/nix/store}"
echo NIX_STORE: "${NIX_STORE}" >&2
full_path="${NIX_STORE}"/"$needed_path"
if [ "$needed_path" != "${needed_path%.drv}" ]; then
echo "Status: 403 You should create the derivation file yourself"
echo "Content-Type: text/plain"
echo
echo "Refusing to disclose derivation contents"
exit
fi
if @bindir@/nix-store --check-validity "$full_path"; then
if ! [ -e nix-export/"$needed_path".nar.gz ]; then
@bindir@/nix-store --export "$full_path" | @gzip@ > "$TMP_DIR"/"$needed_path".nar.gz
@coreutils@/ln -fs "$TMP_DIR"/"$needed_path".nar.gz nix-export/"$needed_path".nar.gz
fi;
echo "Status: 301 Moved"
echo "Location: nix-export/"$needed_path".nar.gz"
echo
else
echo "Status: 404 No such path found"
echo "Content-Type: text/plain"
echo
echo "Path not found:"
echo "$needed_path"
echo "checked:"
echo "$full_path"
fi

View file

@ -5,7 +5,7 @@ __ETC_PROFILE_NIX_SOURCED=1
export NIX_PROFILES="@localstatedir@/nix/profiles/default $HOME/.nix-profile" export NIX_PROFILES="@localstatedir@/nix/profiles/default $HOME/.nix-profile"
# Set $NIX_SSL_CERT_FILE so that Nixpkgs applications like curl work. # Set $NIX_SSL_CERT_FILE so that Nixpkgs applications like curl work.
if [ ! -z "${NIX_SSL_CERT_FILE:-}" ]; then if [ -n "${NIX_SSL_CERT_FILE:-}" ]; then
: # Allow users to override the NIX_SSL_CERT_FILE : # Allow users to override the NIX_SSL_CERT_FILE
elif [ -e /etc/ssl/certs/ca-certificates.crt ]; then # NixOS, Ubuntu, Debian, Gentoo, Arch elif [ -e /etc/ssl/certs/ca-certificates.crt ]; then # NixOS, Ubuntu, Debian, Gentoo, Arch
export NIX_SSL_CERT_FILE=/etc/ssl/certs/ca-certificates.crt export NIX_SSL_CERT_FILE=/etc/ssl/certs/ca-certificates.crt
@ -18,14 +18,14 @@ elif [ -e /etc/pki/tls/certs/ca-bundle.crt ]; then # Fedora, CentOS
else else
# Fall back to what is in the nix profiles, favouring whatever is defined last. # Fall back to what is in the nix profiles, favouring whatever is defined last.
check_nix_profiles() { check_nix_profiles() {
if [ "$ZSH_VERSION" ]; then if [ -n "$ZSH_VERSION" ]; then
# Zsh by default doesn't split words in unquoted parameter expansion. # Zsh by default doesn't split words in unquoted parameter expansion.
# Set local_options for these options to be reverted at the end of the function # Set local_options for these options to be reverted at the end of the function
# and shwordsplit to force splitting words in $NIX_PROFILES below. # and shwordsplit to force splitting words in $NIX_PROFILES below.
setopt local_options shwordsplit setopt local_options shwordsplit
fi fi
for i in $NIX_PROFILES; do for i in $NIX_PROFILES; do
if [ -e $i/etc/ssl/certs/ca-bundle.crt ]; then if [ -e "$i/etc/ssl/certs/ca-bundle.crt" ]; then
export NIX_SSL_CERT_FILE=$i/etc/ssl/certs/ca-bundle.crt export NIX_SSL_CERT_FILE=$i/etc/ssl/certs/ca-bundle.crt
fi fi
done done

View file

@ -24,6 +24,9 @@ if [ -n "$HOME" ] && [ -n "$USER" ]; then
export NIX_SSL_CERT_FILE="$NIX_LINK/etc/ca-bundle.crt" export NIX_SSL_CERT_FILE="$NIX_LINK/etc/ca-bundle.crt"
fi fi
# Only use MANPATH if it is already set. In general `man` will just simply
# pick up `.nix-profile/share/man` because is it close to `.nix-profile/bin`
# which is in the $PATH. For more info, run `manpath -d`.
if [ -n "${MANPATH-}" ]; then if [ -n "${MANPATH-}" ]; then
export MANPATH="$NIX_LINK/share/man:$MANPATH" export MANPATH="$NIX_LINK/share/man:$MANPATH"
fi fi

View file

@ -1,171 +0,0 @@
#! @bash@
WORKING_DIRECTORY=$(mktemp -d "${TMPDIR:-/tmp}"/nix-reduce-build-XXXXXX);
cd "$WORKING_DIRECTORY";
if test -z "$1" || test "a--help" = "a$1" ; then
echo 'nix-reduce-build (paths or Nix expressions) -- (package sources)' >&2
echo As in: >&2
echo nix-reduce-build /etc/nixos/nixos -- ssh://user@somewhere.nowhere.example.org >&2
echo nix-reduce-build /etc/nixos/nixos -- \\
echo " " \''http://somewhere.nowhere.example.org/nix/nix-http-export.cgi?needed_path='\' >&2
echo " store path name will be added into the end of the URL" >&2
echo nix-reduce-build /etc/nixos/nixos -- file://home/user/nar/ >&2
echo " that should be a directory where gzipped 'nix-store --export' ">&2
echo " files are located (they should have .nar.gz extension)" >&2
echo " Or all together: " >&2
echo -e nix-reduce-build /expr.nix /e2.nix -- \\\\\\\n\
" ssh://a@b.example.com http://n.example.com/get-nar?q= file://nar/" >&2
echo " Also supports best-effort local builds of failing expression set:" >&2
echo "nix-reduce-build /e.nix -- nix-daemon:// nix-self://" >&2
echo " nix-daemon:// builds using daemon"
echo " nix-self:// builds directly using nix-store from current installation" >&2
echo " nix-daemon-fixed:// and nix-self-fixed:// do the same, but only for" >&2;
echo "derivations with specified output hash (sha256, sha1 or md5)." >&2
echo " nix-daemon-substitute:// and nix-self-substitute:// try to substitute" >&2;
echo "maximum amount of paths" >&2;
echo " nix-daemon-build:// and nix-self-build:// try to build (not substitute)" >&2;
echo "maximum amount of paths" >&2;
echo " If no package sources are specified, required paths are listed." >&2;
exit;
fi;
while ! test "$1" = "--" || test "$1" = "" ; do
echo "$1" >> initial; >&2
shift;
done
shift;
echo Will work on $(cat initial | wc -l) targets. >&2
while read ; do
case "$REPLY" in
${NIX_STORE_DIR:-/nix/store}/*)
echo "$REPLY" >> paths; >&2
;;
*)
(
IFS=: ;
nix-instantiate $REPLY >> paths;
);
;;
esac;
done < initial;
echo Proceeding $(cat paths | wc -l) paths. >&2
while read; do
case "$REPLY" in
*.drv)
echo "$REPLY" >> derivers; >&2
;;
*)
nix-store --query --deriver "$REPLY" >>derivers;
;;
esac;
done < paths;
echo Found $(cat derivers | wc -l) derivers. >&2
cat derivers | xargs nix-store --query -R > derivers-closure;
echo Proceeding at most $(cat derivers-closure | wc -l) derivers. >&2
cat derivers-closure | egrep '[.]drv$' | xargs nix-store --query --outputs > wanted-paths;
cat derivers-closure | egrep -v '[.]drv$' >> wanted-paths;
echo Prepared $(cat wanted-paths | wc -l) paths to get. >&2
cat wanted-paths | xargs nix-store --check-validity --print-invalid > needed-paths;
echo We need $(cat needed-paths | wc -l) paths. >&2
egrep '[.]drv$' derivers-closure > critical-derivers;
if test -z "$1" ; then
cat needed-paths;
fi;
refresh_critical_derivers() {
echo "Finding needed derivers..." >&2;
cat critical-derivers | while read; do
if ! (nix-store --query --outputs "$REPLY" | xargs nix-store --check-validity &> /dev/null;); then
echo "$REPLY";
fi;
done > new-critical-derivers;
mv new-critical-derivers critical-derivers;
echo The needed paths are realized by $(cat critical-derivers | wc -l) derivers. >&2
}
build_here() {
cat critical-derivers | while read; do
echo "Realising $REPLY using nix-daemon" >&2
@bindir@/nix-store -r "${REPLY}"
done;
}
try_to_substitute(){
cat needed-paths | while read ; do
echo "Building $REPLY using nix-daemon" >&2
@bindir@/nix-store -r "${NIX_STORE_DIR:-/nix/store}/${REPLY##*/}"
done;
}
for i in "$@"; do
sshHost="${i#ssh://}";
httpHost="${i#http://}";
httpsHost="${i#https://}";
filePath="${i#file:/}";
if [ "$i" != "$sshHost" ]; then
cat needed-paths | while read; do
echo "Getting $REPLY and its closure over ssh" >&2
nix-copy-closure --from "$sshHost" --gzip "$REPLY" </dev/null || true;
done;
elif [ "$i" != "$httpHost" ] || [ "$i" != "$httpsHost" ]; then
cat needed-paths | while read; do
echo "Getting $REPLY over http/https" >&2
curl ${BAD_CERTIFICATE:+-k} -L "$i${REPLY##*/}" | gunzip | nix-store --import;
done;
elif [ "$i" != "$filePath" ] ; then
cat needed-paths | while read; do
echo "Installing $REPLY from file" >&2
gunzip < "$filePath/${REPLY##*/}".nar.gz | nix-store --import;
done;
elif [ "$i" = "nix-daemon://" ] ; then
NIX_REMOTE=daemon try_to_substitute;
refresh_critical_derivers;
NIX_REMOTE=daemon build_here;
elif [ "$i" = "nix-self://" ] ; then
NIX_REMOTE= try_to_substitute;
refresh_critical_derivers;
NIX_REMOTE= build_here;
elif [ "$i" = "nix-daemon-fixed://" ] ; then
refresh_critical_derivers;
cat critical-derivers | while read; do
if egrep '"(md5|sha1|sha256)"' "$REPLY" &>/dev/null; then
echo "Realising $REPLY using nix-daemon" >&2
NIX_REMOTE=daemon @bindir@/nix-store -r "${REPLY}"
fi;
done;
elif [ "$i" = "nix-self-fixed://" ] ; then
refresh_critical_derivers;
cat critical-derivers | while read; do
if egrep '"(md5|sha1|sha256)"' "$REPLY" &>/dev/null; then
echo "Realising $REPLY using direct Nix build" >&2
NIX_REMOTE= @bindir@/nix-store -r "${REPLY}"
fi;
done;
elif [ "$i" = "nix-daemon-substitute://" ] ; then
NIX_REMOTE=daemon try_to_substitute;
elif [ "$i" = "nix-self-substitute://" ] ; then
NIX_REMOTE= try_to_substitute;
elif [ "$i" = "nix-daemon-build://" ] ; then
refresh_critical_derivers;
NIX_REMOTE=daemon build_here;
elif [ "$i" = "nix-self-build://" ] ; then
refresh_critical_derivers;
NIX_REMOTE= build_here;
fi;
mv needed-paths wanted-paths;
cat wanted-paths | xargs nix-store --check-validity --print-invalid > needed-paths;
echo We still need $(cat needed-paths | wc -l) paths. >&2
done;
cd /
rm -r "$WORKING_DIRECTORY"

View file

@ -3,7 +3,7 @@
set -e set -e
script=$(nix-build -A outputs.hydraJobs.installerScriptForGHA --no-out-link) script=$(nix-build -A outputs.hydraJobs.installerScriptForGHA --no-out-link)
installerHash=$(echo $script | cut -b12-43 -) installerHash=$(echo "$script" | cut -b12-43 -)
installerURL=https://$CACHIX_NAME.cachix.org/serve/$installerHash/install installerURL=https://$CACHIX_NAME.cachix.org/serve/$installerHash/install

View file

@ -1,3 +1,3 @@
(import (fetchTarball https://github.com/edolstra/flake-compat/archive/master.tar.gz) { (import (fetchTarball "https://github.com/edolstra/flake-compat/archive/master.tar.gz") {
src = ./.; src = ./.;
}).shellNix }).shellNix

View file

@ -14,10 +14,12 @@
#include "pathlocks.hh" #include "pathlocks.hh"
#include "globals.hh" #include "globals.hh"
#include "serialise.hh" #include "serialise.hh"
#include "build-result.hh"
#include "store-api.hh" #include "store-api.hh"
#include "derivations.hh" #include "derivations.hh"
#include "local-store.hh" #include "local-store.hh"
#include "legacy.hh" #include "legacy.hh"
#include "experimental-features.hh"
using namespace nix; using namespace nix;
using std::cin; using std::cin;
@ -31,7 +33,7 @@ std::string escapeUri(std::string uri)
return uri; return uri;
} }
static string currentLoad; static std::string currentLoad;
static AutoCloseFD openSlotLock(const Machine & m, uint64_t slot) static AutoCloseFD openSlotLock(const Machine & m, uint64_t slot)
{ {
@ -96,7 +98,7 @@ static int main_build_remote(int argc, char * * argv)
} }
std::optional<StorePath> drvPath; std::optional<StorePath> drvPath;
string storeUri; std::string storeUri;
while (true) { while (true) {
@ -130,11 +132,14 @@ static int main_build_remote(int argc, char * * argv)
for (auto & m : machines) { for (auto & m : machines) {
debug("considering building on remote machine '%s'", m.storeUri); debug("considering building on remote machine '%s'", m.storeUri);
if (m.enabled && std::find(m.systemTypes.begin(), if (m.enabled
&& (neededSystem == "builtin"
|| std::find(m.systemTypes.begin(),
m.systemTypes.end(), m.systemTypes.end(),
neededSystem) != m.systemTypes.end() && neededSystem) != m.systemTypes.end()) &&
m.allSupported(requiredFeatures) && m.allSupported(requiredFeatures) &&
m.mandatoryMet(requiredFeatures)) { m.mandatoryMet(requiredFeatures))
{
rightType = true; rightType = true;
AutoCloseFD free; AutoCloseFD free;
uint64_t load = 0; uint64_t load = 0;
@ -179,7 +184,7 @@ static int main_build_remote(int argc, char * * argv)
else else
{ {
// build the hint template. // build the hint template.
string errorText = std::string errorText =
"Failed to find a machine for remote build!\n" "Failed to find a machine for remote build!\n"
"derivation: %s\nrequired (system, features): (%s, %s)"; "derivation: %s\nrequired (system, features): (%s, %s)";
errorText += "\n%s available machines:"; errorText += "\n%s available machines:";
@ -189,7 +194,7 @@ static int main_build_remote(int argc, char * * argv)
errorText += "\n(%s, %s, %s, %s)"; errorText += "\n(%s, %s, %s, %s)";
// add the template values. // add the template values.
string drvstr; std::string drvstr;
if (drvPath.has_value()) if (drvPath.has_value())
drvstr = drvPath->to_string(); drvstr = drvPath->to_string();
else else
@ -204,7 +209,7 @@ static int main_build_remote(int argc, char * * argv)
for (auto & m : machines) for (auto & m : machines)
error error
% concatStringsSep<vector<string>>(", ", m.systemTypes) % concatStringsSep<std::vector<std::string>>(", ", m.systemTypes)
% m.maxJobs % m.maxJobs
% concatStringsSep<StringSet>(", ", m.supportedFeatures) % concatStringsSep<StringSet>(", ", m.supportedFeatures)
% concatStringsSep<StringSet>(", ", m.mandatoryFeatures); % concatStringsSep<StringSet>(", ", m.mandatoryFeatures);
@ -295,7 +300,7 @@ connected:
std::set<Realisation> missingRealisations; std::set<Realisation> missingRealisations;
StorePathSet missingPaths; StorePathSet missingPaths;
if (settings.isExperimentalFeatureEnabled("ca-derivations") && !derivationHasKnownOutputPaths(drv.type())) { if (settings.isExperimentalFeatureEnabled(Xp::CaDerivations) && !derivationHasKnownOutputPaths(drv.type())) {
for (auto & outputName : wantedOutputs) { for (auto & outputName : wantedOutputs) {
auto thisOutputHash = outputHashes.at(outputName); auto thisOutputHash = outputHashes.at(outputName);
auto thisOutputId = DrvOutput{ thisOutputHash, outputName }; auto thisOutputId = DrvOutput{ thisOutputHash, outputName };
@ -327,7 +332,7 @@ connected:
for (auto & realisation : missingRealisations) { for (auto & realisation : missingRealisations) {
// Should hold, because if the feature isn't enabled the set // Should hold, because if the feature isn't enabled the set
// of missing realisations should be empty // of missing realisations should be empty
settings.requireExperimentalFeature("ca-derivations"); settings.requireExperimentalFeature(Xp::CaDerivations);
store->registerDrvOutput(realisation); store->registerDrvOutput(realisation);
} }

View file

@ -1,18 +0,0 @@
Copyright (c) 2014 Chase Geigle
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

File diff suppressed because it is too large Load diff

View file

@ -54,6 +54,36 @@ void StoreCommand::run()
run(getStore()); run(getStore());
} }
CopyCommand::CopyCommand()
{
addFlag({
.longName = "from",
.description = "URL of the source Nix store.",
.labels = {"store-uri"},
.handler = {&srcUri},
});
addFlag({
.longName = "to",
.description = "URL of the destination Nix store.",
.labels = {"store-uri"},
.handler = {&dstUri},
});
}
ref<Store> CopyCommand::createStore()
{
return srcUri.empty() ? StoreCommand::createStore() : openStore(srcUri);
}
ref<Store> CopyCommand::getDstStore()
{
if (srcUri.empty() && dstUri.empty())
throw UsageError("you must pass '--from' and/or '--to'");
return dstUri.empty() ? openStore() : openStore(dstUri);
}
EvalCommand::EvalCommand() EvalCommand::EvalCommand()
{ {
} }
@ -74,7 +104,15 @@ ref<Store> EvalCommand::getEvalStore()
ref<EvalState> EvalCommand::getEvalState() ref<EvalState> EvalCommand::getEvalState()
{ {
if (!evalState) if (!evalState)
evalState = std::make_shared<EvalState>(searchPath, getEvalStore(), getStore()); evalState =
#if HAVE_BOEHMGC
std::allocate_shared<EvalState>(traceable_allocator<EvalState>(),
searchPath, getEvalStore(), getStore())
#else
std::make_shared<EvalState>(
searchPath, getEvalStore(), getStore())
#endif
;
return ref<EvalState>(evalState); return ref<EvalState>(evalState);
} }
@ -115,7 +153,7 @@ void BuiltPathsCommand::run(ref<Store> store)
for (auto & p : store->queryAllValidPaths()) for (auto & p : store->queryAllValidPaths())
paths.push_back(BuiltPath::Opaque{p}); paths.push_back(BuiltPath::Opaque{p});
} else { } else {
paths = toBuiltPaths(getEvalStore(), store, realiseMode, operateOn, installables); paths = Installable::toBuiltPaths(getEvalStore(), store, realiseMode, operateOn, installables);
if (recursive) { if (recursive) {
// XXX: This only computes the store path closure, ignoring // XXX: This only computes the store path closure, ignoring
// intermediate realisations // intermediate realisations

View file

@ -5,7 +5,6 @@
#include "common-eval-args.hh" #include "common-eval-args.hh"
#include "path.hh" #include "path.hh"
#include "flake/lockfile.hh" #include "flake/lockfile.hh"
#include "store-api.hh"
#include <optional> #include <optional>
@ -43,6 +42,19 @@ private:
std::shared_ptr<Store> _store; std::shared_ptr<Store> _store;
}; };
/* A command that copies something between `--from` and `--to`
stores. */
struct CopyCommand : virtual StoreCommand
{
std::string srcUri, dstUri;
CopyCommand();
ref<Store> createStore() override;
ref<Store> getDstStore();
};
struct EvalCommand : virtual StoreCommand, MixEvalArgs struct EvalCommand : virtual StoreCommand, MixEvalArgs
{ {
EvalCommand(); EvalCommand();
@ -69,14 +81,6 @@ struct MixFlakeOptions : virtual Args, EvalCommand
{ return {}; } { return {}; }
}; };
/* How to handle derivations in commands that operate on store paths. */
enum class OperateOn {
/* Operate on the output path. */
Output,
/* Operate on the .drv path. */
Derivation
};
struct SourceExprCommand : virtual Args, MixFlakeOptions struct SourceExprCommand : virtual Args, MixFlakeOptions
{ {
std::optional<Path> file; std::optional<Path> file;
@ -100,19 +104,6 @@ struct SourceExprCommand : virtual Args, MixFlakeOptions
void completeInstallable(std::string_view prefix); void completeInstallable(std::string_view prefix);
}; };
enum class Realise {
/* Build the derivation. Postcondition: the
derivation outputs exist. */
Outputs,
/* Don't build the derivation. Postcondition: the store derivation
exists. */
Derivation,
/* Evaluate in dry-run mode. Postcondition: nothing. */
// FIXME: currently unused, but could be revived if we can
// evaluate derivations in-memory.
Nothing
};
/* A command that operates on a list of "installables", which can be /* A command that operates on a list of "installables", which can be
store paths, attribute paths, Nix expressions, etc. */ store paths, attribute paths, Nix expressions, etc. */
struct InstallablesCommand : virtual Args, SourceExprCommand struct InstallablesCommand : virtual Args, SourceExprCommand
@ -225,38 +216,6 @@ static RegisterCommand registerCommand2(std::vector<std::string> && name)
return RegisterCommand(std::move(name), [](){ return make_ref<T>(); }); return RegisterCommand(std::move(name), [](){ return make_ref<T>(); });
} }
BuiltPaths build(
ref<Store> evalStore,
ref<Store> store, Realise mode,
const std::vector<std::shared_ptr<Installable>> & installables,
BuildMode bMode = bmNormal);
std::set<StorePath> toStorePaths(
ref<Store> evalStore,
ref<Store> store,
Realise mode,
OperateOn operateOn,
const std::vector<std::shared_ptr<Installable>> & installables);
StorePath toStorePath(
ref<Store> evalStore,
ref<Store> store,
Realise mode,
OperateOn operateOn,
std::shared_ptr<Installable> installable);
std::set<StorePath> toDerivations(
ref<Store> store,
const std::vector<std::shared_ptr<Installable>> & installables,
bool useDeriver = false);
BuiltPaths toBuiltPaths(
ref<Store> evalStore,
ref<Store> store,
Realise mode,
OperateOn operateOn,
const std::vector<std::shared_ptr<Installable>> & installables);
/* Helper function to generate args that invoke $EDITOR on /* Helper function to generate args that invoke $EDITOR on
filename:lineno. */ filename:lineno. */
Strings editorFor(const Pos & pos); Strings editorFor(const Pos & pos);

View file

@ -12,6 +12,7 @@
#include "eval-cache.hh" #include "eval-cache.hh"
#include "url.hh" #include "url.hh"
#include "registry.hh" #include "registry.hh"
#include "build-result.hh"
#include <regex> #include <regex>
#include <queue> #include <queue>
@ -97,7 +98,7 @@ MixFlakeOptions::MixFlakeOptions()
lockFlags.writeLockFile = false; lockFlags.writeLockFile = false;
lockFlags.inputOverrides.insert_or_assign( lockFlags.inputOverrides.insert_or_assign(
flake::parseInputPath(inputPath), flake::parseInputPath(inputPath),
parseFlakeRef(flakeRef, absPath("."))); parseFlakeRef(flakeRef, absPath("."), true));
}} }}
}); });
@ -158,7 +159,10 @@ SourceExprCommand::SourceExprCommand()
Strings SourceExprCommand::getDefaultFlakeAttrPaths() Strings SourceExprCommand::getDefaultFlakeAttrPaths()
{ {
return {"defaultPackage." + settings.thisSystem.get()}; return {
"packages." + settings.thisSystem.get() + ".default",
"defaultPackage." + settings.thisSystem.get()
};
} }
Strings SourceExprCommand::getDefaultFlakeAttrPathPrefixes() Strings SourceExprCommand::getDefaultFlakeAttrPathPrefixes()
@ -191,18 +195,21 @@ void SourceExprCommand::completeInstallable(std::string_view prefix)
auto sep = prefix_.rfind('.'); auto sep = prefix_.rfind('.');
std::string searchWord; std::string searchWord;
if (sep != std::string::npos) { if (sep != std::string::npos) {
searchWord = prefix_.substr(sep, std::string::npos); searchWord = prefix_.substr(sep + 1, std::string::npos);
prefix_ = prefix_.substr(0, sep); prefix_ = prefix_.substr(0, sep);
} else { } else {
searchWord = prefix_; searchWord = prefix_;
prefix_ = ""; prefix_ = "";
} }
Value &v1(*findAlongAttrPath(*state, prefix_, *autoArgs, root).first); auto [v, pos] = findAlongAttrPath(*state, prefix_, *autoArgs, root);
state->forceValue(v1); Value &v1(*v);
state->forceValue(v1, pos);
Value v2; Value v2;
state->autoCallFunction(*autoArgs, v1, v2); state->autoCallFunction(*autoArgs, v1, v2);
completionType = ctAttrs;
if (v2.type() == nAttrs) { if (v2.type() == nAttrs) {
for (auto & i : *v2.attrs) { for (auto & i : *v2.attrs) {
std::string name = i.name; std::string name = i.name;
@ -232,7 +239,9 @@ void completeFlakeRefWithFragment(
prefix. */ prefix. */
try { try {
auto hash = prefix.find('#'); auto hash = prefix.find('#');
if (hash != std::string::npos) { if (hash == std::string::npos) {
completeFlakeRef(evalState->store, prefix);
} else {
auto fragment = prefix.substr(hash + 1); auto fragment = prefix.substr(hash + 1);
auto flakeRefS = std::string(prefix.substr(0, hash)); auto flakeRefS = std::string(prefix.substr(0, hash));
// FIXME: do tilde expansion. // FIXME: do tilde expansion.
@ -248,6 +257,8 @@ void completeFlakeRefWithFragment(
flake. */ flake. */
attrPathPrefixes.push_back(""); attrPathPrefixes.push_back("");
completionType = ctAttrs;
for (auto & attrPathPrefixS : attrPathPrefixes) { for (auto & attrPathPrefixS : attrPathPrefixes) {
auto attrPathPrefix = parseAttrPath(*evalState, attrPathPrefixS); auto attrPathPrefix = parseAttrPath(*evalState, attrPathPrefixS);
auto attrPathS = attrPathPrefixS + std::string(fragment); auto attrPathS = attrPathPrefixS + std::string(fragment);
@ -262,9 +273,9 @@ void completeFlakeRefWithFragment(
auto attr = root->findAlongAttrPath(attrPath); auto attr = root->findAlongAttrPath(attrPath);
if (!attr) continue; if (!attr) continue;
for (auto & attr2 : attr->getAttrs()) { for (auto & attr2 : (*attr)->getAttrs()) {
if (hasPrefix(attr2, lastAttr)) { if (hasPrefix(attr2, lastAttr)) {
auto attrPath2 = attr->getAttrPath(attr2); auto attrPath2 = (*attr)->getAttrPath(attr2);
/* Strip the attrpath prefix. */ /* Strip the attrpath prefix. */
attrPath2.erase(attrPath2.begin(), attrPath2.begin() + attrPathPrefix.size()); attrPath2.erase(attrPath2.begin(), attrPath2.begin() + attrPathPrefix.size());
completions->add(flakeRefS + "#" + concatStringsSep(".", attrPath2)); completions->add(flakeRefS + "#" + concatStringsSep(".", attrPath2));
@ -285,12 +296,13 @@ void completeFlakeRefWithFragment(
} catch (Error & e) { } catch (Error & e) {
warn(e.msg()); warn(e.msg());
} }
completeFlakeRef(evalState->store, prefix);
} }
void completeFlakeRef(ref<Store> store, std::string_view prefix) void completeFlakeRef(ref<Store> store, std::string_view prefix)
{ {
if (!settings.isExperimentalFeatureEnabled(Xp::Flakes))
return;
if (prefix == "") if (prefix == "")
completions->add("."); completions->add(".");
@ -338,6 +350,18 @@ Installable::getCursor(EvalState & state)
return cursors[0]; return cursors[0];
} }
static StorePath getDeriver(
ref<Store> store,
const Installable & i,
const StorePath & drvPath)
{
auto derivers = store->queryValidDerivers(drvPath);
if (derivers.empty())
throw Error("'%s' does not have a known deriver", i.what());
// FIXME: use all derivers?
return *derivers.begin();
}
struct InstallableStorePath : Installable struct InstallableStorePath : Installable
{ {
ref<Store> store; ref<Store> store;
@ -346,7 +370,7 @@ struct InstallableStorePath : Installable
InstallableStorePath(ref<Store> store, StorePath && storePath) InstallableStorePath(ref<Store> store, StorePath && storePath)
: store(store), storePath(std::move(storePath)) { } : store(store), storePath(std::move(storePath)) { }
std::string what() override { return store->printStorePath(storePath); } std::string what() const override { return store->printStorePath(storePath); }
DerivedPaths toDerivedPaths() override DerivedPaths toDerivedPaths() override
{ {
@ -367,6 +391,15 @@ struct InstallableStorePath : Installable
} }
} }
StorePathSet toDrvPaths(ref<Store> store) override
{
if (storePath.isDerivation()) {
return {storePath};
} else {
return {getDeriver(store, *this, storePath)};
}
}
std::optional<StorePath> getStorePath() override std::optional<StorePath> getStorePath() override
{ {
return storePath; return storePath;
@ -395,6 +428,14 @@ DerivedPaths InstallableValue::toDerivedPaths()
return res; return res;
} }
StorePathSet InstallableValue::toDrvPaths(ref<Store> store)
{
StorePathSet res;
for (auto & drv : toDerivations())
res.insert(drv.drvPath);
return res;
}
struct InstallableAttrPath : InstallableValue struct InstallableAttrPath : InstallableValue
{ {
SourceExprCommand & cmd; SourceExprCommand & cmd;
@ -405,12 +446,12 @@ struct InstallableAttrPath : InstallableValue
: InstallableValue(state), cmd(cmd), v(allocRootValue(v)), attrPath(attrPath) : InstallableValue(state), cmd(cmd), v(allocRootValue(v)), attrPath(attrPath)
{ } { }
std::string what() override { return attrPath; } std::string what() const override { return attrPath; }
std::pair<Value *, Pos> toValue(EvalState & state) override std::pair<Value *, Pos> toValue(EvalState & state) override
{ {
auto [vRes, pos] = findAlongAttrPath(state, attrPath, *cmd.getAutoArgs(state), **v); auto [vRes, pos] = findAlongAttrPath(state, attrPath, *cmd.getAutoArgs(state), **v);
state.forceValue(*vRes); state.forceValue(*vRes, pos);
return {vRes, pos}; return {vRes, pos};
} }
@ -428,11 +469,10 @@ std::vector<InstallableValue::DerivationInfo> InstallableAttrPath::toDerivations
std::vector<DerivationInfo> res; std::vector<DerivationInfo> res;
for (auto & drvInfo : drvInfos) { for (auto & drvInfo : drvInfos) {
res.push_back({ auto drvPath = drvInfo.queryDrvPath();
state->store->parseStorePath(drvInfo.queryDrvPath()), if (!drvPath)
state->store->maybeParseStorePath(drvInfo.queryOutPath()), throw Error("'%s' is not a derivation", what());
drvInfo.queryOutputName() res.push_back({ *drvPath, drvInfo.queryOutputName() });
});
} }
return res; return res;
@ -460,7 +500,7 @@ Value * InstallableFlake::getFlakeOutputs(EvalState & state, const flake::Locked
auto aOutputs = vFlake->attrs->get(state.symbols.create("outputs")); auto aOutputs = vFlake->attrs->get(state.symbols.create("outputs"));
assert(aOutputs); assert(aOutputs);
state.forceValue(*aOutputs->value); state.forceValue(*aOutputs->value, [&]() { return aOutputs->value->determinePos(noPos); });
return aOutputs->value; return aOutputs->value;
} }
@ -485,7 +525,7 @@ ref<eval_cache::EvalCache> openEvalCache(
auto vFlake = state.allocValue(); auto vFlake = state.allocValue();
flake::callFlake(state, *lockedFlake, *vFlake); flake::callFlake(state, *lockedFlake, *vFlake);
state.forceAttrs(*vFlake); state.forceAttrs(*vFlake, noPos);
auto aOutputs = vFlake->attrs->get(state.symbols.create("outputs")); auto aOutputs = vFlake->attrs->get(state.symbols.create("outputs"));
assert(aOutputs); assert(aOutputs);
@ -508,13 +548,14 @@ InstallableFlake::InstallableFlake(
SourceExprCommand * cmd, SourceExprCommand * cmd,
ref<EvalState> state, ref<EvalState> state,
FlakeRef && flakeRef, FlakeRef && flakeRef,
Strings && attrPaths, std::string_view fragment,
Strings && prefixes, Strings attrPaths,
Strings prefixes,
const flake::LockFlags & lockFlags) const flake::LockFlags & lockFlags)
: InstallableValue(state), : InstallableValue(state),
flakeRef(flakeRef), flakeRef(flakeRef),
attrPaths(attrPaths), attrPaths(fragment == "" ? attrPaths : Strings{(std::string) fragment}),
prefixes(prefixes), prefixes(fragment == "" ? Strings{} : prefixes),
lockFlags(lockFlags) lockFlags(lockFlags)
{ {
if (cmd && cmd->getAutoArgs(*state)->size()) if (cmd && cmd->getAutoArgs(*state)->size())
@ -528,13 +569,22 @@ std::tuple<std::string, FlakeRef, InstallableValue::DerivationInfo> InstallableF
auto cache = openEvalCache(*state, lockedFlake); auto cache = openEvalCache(*state, lockedFlake);
auto root = cache->getRoot(); auto root = cache->getRoot();
Suggestions suggestions;
for (auto & attrPath : getActualAttrPaths()) { for (auto & attrPath : getActualAttrPaths()) {
auto attr = root->findAlongAttrPath( debug("trying flake output attribute '%s'", attrPath);
auto attrOrSuggestions = root->findAlongAttrPath(
parseAttrPath(*state, attrPath), parseAttrPath(*state, attrPath),
true true
); );
if (!attr) continue; if (!attrOrSuggestions) {
suggestions += attrOrSuggestions.getSuggestions();
continue;
}
auto attr = *attrOrSuggestions;
if (!attr->isDerivation()) if (!attr->isDerivation())
throw Error("flake output attribute '%s' is not a derivation", attrPath); throw Error("flake output attribute '%s' is not a derivation", attrPath);
@ -543,14 +593,13 @@ std::tuple<std::string, FlakeRef, InstallableValue::DerivationInfo> InstallableF
auto drvInfo = DerivationInfo { auto drvInfo = DerivationInfo {
std::move(drvPath), std::move(drvPath),
state->store->maybeParseStorePath(attr->getAttr(state->sOutPath)->getString()),
attr->getAttr(state->sOutputName)->getString() attr->getAttr(state->sOutputName)->getString()
}; };
return {attrPath, lockedFlake->flake.lockedRef, std::move(drvInfo)}; return {attrPath, lockedFlake->flake.lockedRef, std::move(drvInfo)};
} }
throw Error("flake '%s' does not provide attribute %s", throw Error(suggestions, "flake '%s' does not provide attribute %s",
flakeRef, showAttrPaths(getActualAttrPaths())); flakeRef, showAttrPaths(getActualAttrPaths()));
} }
@ -569,17 +618,24 @@ std::pair<Value *, Pos> InstallableFlake::toValue(EvalState & state)
auto emptyArgs = state.allocBindings(0); auto emptyArgs = state.allocBindings(0);
Suggestions suggestions;
for (auto & attrPath : getActualAttrPaths()) { for (auto & attrPath : getActualAttrPaths()) {
try { try {
auto [v, pos] = findAlongAttrPath(state, attrPath, *emptyArgs, *vOutputs); auto [v, pos] = findAlongAttrPath(state, attrPath, *emptyArgs, *vOutputs);
state.forceValue(*v); state.forceValue(*v, pos);
return {v, pos}; return {v, pos};
} catch (AttrPathNotFound & e) { } catch (AttrPathNotFound & e) {
suggestions += e.info().suggestions;
} }
} }
throw Error("flake '%s' does not provide attribute %s", throw Error(
flakeRef, showAttrPaths(getActualAttrPaths())); suggestions,
"flake '%s' does not provide attribute %s",
flakeRef,
showAttrPaths(getActualAttrPaths())
);
} }
std::vector<std::pair<std::shared_ptr<eval_cache::AttrCursor>, std::string>> std::vector<std::pair<std::shared_ptr<eval_cache::AttrCursor>, std::string>>
@ -594,7 +650,7 @@ InstallableFlake::getCursors(EvalState & state)
for (auto & attrPath : getActualAttrPaths()) { for (auto & attrPath : getActualAttrPaths()) {
auto attr = root->findAlongAttrPath(parseAttrPath(state, attrPath)); auto attr = root->findAlongAttrPath(parseAttrPath(state, attrPath));
if (attr) res.push_back({attr, attrPath}); if (attr) res.push_back({*attr, attrPath});
} }
return res; return res;
@ -671,7 +727,8 @@ std::vector<std::shared_ptr<Installable>> SourceExprCommand::parseInstallables(
this, this,
getEvalState(), getEvalState(),
std::move(flakeRef), std::move(flakeRef),
fragment == "" ? getDefaultFlakeAttrPaths() : Strings{fragment}, fragment,
getDefaultFlakeAttrPaths(),
getDefaultFlakeAttrPathPrefixes(), getDefaultFlakeAttrPathPrefixes(),
lockFlags)); lockFlags));
continue; continue;
@ -713,8 +770,7 @@ BuiltPaths getBuiltPaths(ref<Store> evalStore, ref<Store> store, const DerivedPa
throw Error( throw Error(
"the derivation '%s' doesn't have an output named '%s'", "the derivation '%s' doesn't have an output named '%s'",
store->printStorePath(bfd.drvPath), output); store->printStorePath(bfd.drvPath), output);
if (settings.isExperimentalFeatureEnabled( if (settings.isExperimentalFeatureEnabled(Xp::CaDerivations)) {
"ca-derivations")) {
auto outputId = auto outputId =
DrvOutput{outputHashes.at(output), output}; DrvOutput{outputHashes.at(output), output};
auto realisation = auto realisation =
@ -743,7 +799,7 @@ BuiltPaths getBuiltPaths(ref<Store> evalStore, ref<Store> store, const DerivedPa
return res; return res;
} }
BuiltPaths build( BuiltPaths Installable::build(
ref<Store> evalStore, ref<Store> evalStore,
ref<Store> store, ref<Store> store,
Realise mode, Realise mode,
@ -760,15 +816,36 @@ BuiltPaths build(
pathsToBuild.insert(pathsToBuild.end(), b.begin(), b.end()); pathsToBuild.insert(pathsToBuild.end(), b.begin(), b.end());
} }
if (mode == Realise::Nothing || mode == Realise::Derivation) switch (mode) {
case Realise::Nothing:
case Realise::Derivation:
printMissing(store, pathsToBuild, lvlError); printMissing(store, pathsToBuild, lvlError);
else if (mode == Realise::Outputs)
store->buildPaths(pathsToBuild, bMode, evalStore);
return getBuiltPaths(evalStore, store, pathsToBuild); return getBuiltPaths(evalStore, store, pathsToBuild);
case Realise::Outputs: {
BuiltPaths res;
for (auto & buildResult : store->buildPathsWithResults(pathsToBuild, bMode, evalStore)) {
if (!buildResult.success())
buildResult.rethrow();
std::visit(overloaded {
[&](const DerivedPath::Built & bfd) {
std::map<std::string, StorePath> outputs;
for (auto & path : buildResult.builtOutputs)
outputs.emplace(path.first.outputName, path.second.outPath);
res.push_back(BuiltPath::Built { bfd.drvPath, outputs });
},
[&](const DerivedPath::Opaque & bo) {
res.push_back(BuiltPath::Opaque { bo.path });
},
}, buildResult.path.raw());
}
return res;
}
default:
assert(false);
}
} }
BuiltPaths toBuiltPaths( BuiltPaths Installable::toBuiltPaths(
ref<Store> evalStore, ref<Store> evalStore,
ref<Store> store, ref<Store> store,
Realise mode, Realise mode,
@ -776,19 +853,19 @@ BuiltPaths toBuiltPaths(
const std::vector<std::shared_ptr<Installable>> & installables) const std::vector<std::shared_ptr<Installable>> & installables)
{ {
if (operateOn == OperateOn::Output) if (operateOn == OperateOn::Output)
return build(evalStore, store, mode, installables); return Installable::build(evalStore, store, mode, installables);
else { else {
if (mode == Realise::Nothing) if (mode == Realise::Nothing)
settings.readOnlyMode = true; settings.readOnlyMode = true;
BuiltPaths res; BuiltPaths res;
for (auto & drvPath : toDerivations(store, installables, true)) for (auto & drvPath : Installable::toDerivations(store, installables, true))
res.push_back(BuiltPath::Opaque{drvPath}); res.push_back(BuiltPath::Opaque{drvPath});
return res; return res;
} }
} }
StorePathSet toStorePaths( StorePathSet Installable::toStorePaths(
ref<Store> evalStore, ref<Store> evalStore,
ref<Store> store, ref<Store> store,
Realise mode, OperateOn operateOn, Realise mode, OperateOn operateOn,
@ -802,7 +879,7 @@ StorePathSet toStorePaths(
return outPaths; return outPaths;
} }
StorePath toStorePath( StorePath Installable::toStorePath(
ref<Store> evalStore, ref<Store> evalStore,
ref<Store> store, ref<Store> store,
Realise mode, OperateOn operateOn, Realise mode, OperateOn operateOn,
@ -816,7 +893,7 @@ StorePath toStorePath(
return *paths.begin(); return *paths.begin();
} }
StorePathSet toDerivations( StorePathSet Installable::toDerivations(
ref<Store> store, ref<Store> store,
const std::vector<std::shared_ptr<Installable>> & installables, const std::vector<std::shared_ptr<Installable>> & installables,
bool useDeriver) bool useDeriver)
@ -829,11 +906,7 @@ StorePathSet toDerivations(
[&](const DerivedPath::Opaque & bo) { [&](const DerivedPath::Opaque & bo) {
if (!useDeriver) if (!useDeriver)
throw Error("argument '%s' did not evaluate to a derivation", i->what()); throw Error("argument '%s' did not evaluate to a derivation", i->what());
auto derivers = store->queryValidDerivers(bo.path); drvPaths.insert(getDeriver(store, *i, bo.path));
if (derivers.empty())
throw Error("'%s' does not have a known deriver", i->what());
// FIXME: use all derivers?
drvPaths.insert(*derivers.begin());
}, },
[&](const DerivedPath::Built & bfd) { [&](const DerivedPath::Built & bfd) {
drvPaths.insert(bfd.drvPath); drvPaths.insert(bfd.drvPath);

View file

@ -5,6 +5,7 @@
#include "path-with-outputs.hh" #include "path-with-outputs.hh"
#include "derived-path.hh" #include "derived-path.hh"
#include "eval.hh" #include "eval.hh"
#include "store-api.hh"
#include "flake/flake.hh" #include "flake/flake.hh"
#include <optional> #include <optional>
@ -29,14 +30,40 @@ struct UnresolvedApp
App resolve(ref<Store> evalStore, ref<Store> store); App resolve(ref<Store> evalStore, ref<Store> store);
}; };
enum class Realise {
/* Build the derivation. Postcondition: the
derivation outputs exist. */
Outputs,
/* Don't build the derivation. Postcondition: the store derivation
exists. */
Derivation,
/* Evaluate in dry-run mode. Postcondition: nothing. */
// FIXME: currently unused, but could be revived if we can
// evaluate derivations in-memory.
Nothing
};
/* How to handle derivations in commands that operate on store paths. */
enum class OperateOn {
/* Operate on the output path. */
Output,
/* Operate on the .drv path. */
Derivation
};
struct Installable struct Installable
{ {
virtual ~Installable() { } virtual ~Installable() { }
virtual std::string what() = 0; virtual std::string what() const = 0;
virtual DerivedPaths toDerivedPaths() = 0; virtual DerivedPaths toDerivedPaths() = 0;
virtual StorePathSet toDrvPaths(ref<Store> store)
{
throw Error("'%s' cannot be converted to a derivation path", what());
}
DerivedPath toDerivedPath(); DerivedPath toDerivedPath();
UnresolvedApp toApp(EvalState & state); UnresolvedApp toApp(EvalState & state);
@ -63,6 +90,39 @@ struct Installable
{ {
return FlakeRef::fromAttrs({{"type","indirect"}, {"id", "nixpkgs"}}); return FlakeRef::fromAttrs({{"type","indirect"}, {"id", "nixpkgs"}});
} }
static BuiltPaths build(
ref<Store> evalStore,
ref<Store> store,
Realise mode,
const std::vector<std::shared_ptr<Installable>> & installables,
BuildMode bMode = bmNormal);
static std::set<StorePath> toStorePaths(
ref<Store> evalStore,
ref<Store> store,
Realise mode,
OperateOn operateOn,
const std::vector<std::shared_ptr<Installable>> & installables);
static StorePath toStorePath(
ref<Store> evalStore,
ref<Store> store,
Realise mode,
OperateOn operateOn,
std::shared_ptr<Installable> installable);
static std::set<StorePath> toDerivations(
ref<Store> store,
const std::vector<std::shared_ptr<Installable>> & installables,
bool useDeriver = false);
static BuiltPaths toBuiltPaths(
ref<Store> evalStore,
ref<Store> store,
Realise mode,
OperateOn operateOn,
const std::vector<std::shared_ptr<Installable>> & installables);
}; };
struct InstallableValue : Installable struct InstallableValue : Installable
@ -74,13 +134,14 @@ struct InstallableValue : Installable
struct DerivationInfo struct DerivationInfo
{ {
StorePath drvPath; StorePath drvPath;
std::optional<StorePath> outPath;
std::string outputName; std::string outputName;
}; };
virtual std::vector<DerivationInfo> toDerivations() = 0; virtual std::vector<DerivationInfo> toDerivations() = 0;
DerivedPaths toDerivedPaths() override; DerivedPaths toDerivedPaths() override;
StorePathSet toDrvPaths(ref<Store> store) override;
}; };
struct InstallableFlake : InstallableValue struct InstallableFlake : InstallableValue
@ -95,11 +156,12 @@ struct InstallableFlake : InstallableValue
SourceExprCommand * cmd, SourceExprCommand * cmd,
ref<EvalState> state, ref<EvalState> state,
FlakeRef && flakeRef, FlakeRef && flakeRef,
Strings && attrPaths, std::string_view fragment,
Strings && prefixes, Strings attrPaths,
Strings prefixes,
const flake::LockFlags & lockFlags); const flake::LockFlags & lockFlags);
std::string what() override { return flakeRef.to_string() + "#" + *attrPaths.begin(); } std::string what() const override { return flakeRef.to_string() + "#" + *attrPaths.begin(); }
std::vector<std::string> getActualAttrPaths(); std::vector<std::string> getActualAttrPaths();
@ -123,4 +185,9 @@ ref<eval_cache::EvalCache> openEvalCache(
EvalState & state, EvalState & state,
std::shared_ptr<flake::LockedFlake> lockedFlake); std::shared_ptr<flake::LockedFlake> lockedFlake);
BuiltPaths getBuiltPaths(
ref<Store> evalStore,
ref<Store> store,
const DerivedPaths & hopefullyBuiltPaths);
} }

View file

@ -8,7 +8,7 @@ libcmd_SOURCES := $(wildcard $(d)/*.cc)
libcmd_CXXFLAGS += -I src/libutil -I src/libstore -I src/libexpr -I src/libmain -I src/libfetchers libcmd_CXXFLAGS += -I src/libutil -I src/libstore -I src/libexpr -I src/libmain -I src/libfetchers
libcmd_LDFLAGS += -llowdown -pthread libcmd_LDFLAGS += $(LOWDOWN_LIBS) -pthread
libcmd_LIBS = libstore libutil libexpr libmain libfetchers libcmd_LIBS = libstore libutil libexpr libmain libfetchers

View file

@ -9,7 +9,7 @@ namespace nix {
static Strings parseAttrPath(std::string_view s) static Strings parseAttrPath(std::string_view s)
{ {
Strings res; Strings res;
string cur; std::string cur;
auto i = s.begin(); auto i = s.begin();
while (i != s.end()) { while (i != s.end()) {
if (*i == '.') { if (*i == '.') {
@ -41,7 +41,7 @@ std::vector<Symbol> parseAttrPath(EvalState & state, std::string_view s)
} }
std::pair<Value *, Pos> findAlongAttrPath(EvalState & state, const string & attrPath, std::pair<Value *, Pos> findAlongAttrPath(EvalState & state, const std::string & attrPath,
Bindings & autoArgs, Value & vIn) Bindings & autoArgs, Value & vIn)
{ {
Strings tokens = parseAttrPath(attrPath); Strings tokens = parseAttrPath(attrPath);
@ -58,7 +58,7 @@ std::pair<Value *, Pos> findAlongAttrPath(EvalState & state, const string & attr
Value * vNew = state.allocValue(); Value * vNew = state.allocValue();
state.autoCallFunction(autoArgs, *v, *vNew); state.autoCallFunction(autoArgs, *v, *vNew);
v = vNew; v = vNew;
state.forceValue(*v); state.forceValue(*v, noPos);
/* It should evaluate to either a set or an expression, /* It should evaluate to either a set or an expression,
according to what is specified in the attrPath. */ according to what is specified in the attrPath. */
@ -74,8 +74,14 @@ std::pair<Value *, Pos> findAlongAttrPath(EvalState & state, const string & attr
throw Error("empty attribute name in selection path '%1%'", attrPath); throw Error("empty attribute name in selection path '%1%'", attrPath);
Bindings::iterator a = v->attrs->find(state.symbols.create(attr)); Bindings::iterator a = v->attrs->find(state.symbols.create(attr));
if (a == v->attrs->end()) if (a == v->attrs->end()) {
throw AttrPathNotFound("attribute '%1%' in selection path '%2%' not found", attr, attrPath); std::set<std::string> attrNames;
for (auto & attr : *v->attrs)
attrNames.insert(attr.name);
auto suggestions = Suggestions::bestMatches(attrNames, attr);
throw AttrPathNotFound(suggestions, "attribute '%1%' in selection path '%2%' not found", attr, attrPath);
}
v = &*a->value; v = &*a->value;
pos = *a->pos; pos = *a->pos;
} }
@ -121,7 +127,7 @@ Pos findPackageFilename(EvalState & state, Value & v, std::string what)
std::string filename(pos, 0, colon); std::string filename(pos, 0, colon);
unsigned int lineno; unsigned int lineno;
try { try {
lineno = std::stoi(std::string(pos, colon + 1)); lineno = std::stoi(std::string(pos, colon + 1, std::string::npos));
} catch (std::invalid_argument & e) { } catch (std::invalid_argument & e) {
throw ParseError("cannot parse line number '%s'", pos); throw ParseError("cannot parse line number '%s'", pos);
} }

View file

@ -10,8 +10,11 @@ namespace nix {
MakeError(AttrPathNotFound, Error); MakeError(AttrPathNotFound, Error);
MakeError(NoPositionInfo, Error); MakeError(NoPositionInfo, Error);
std::pair<Value *, Pos> findAlongAttrPath(EvalState & state, const string & attrPath, std::pair<Value *, Pos> findAlongAttrPath(
Bindings & autoArgs, Value & vIn); EvalState & state,
const std::string & attrPath,
Bindings & autoArgs,
Value & vIn);
/* Heuristic to find the filename and lineno or a nix value. */ /* Heuristic to find the filename and lineno or a nix value. */
Pos findPackageFilename(EvalState & state, Value & v, std::string what); Pos findPackageFilename(EvalState & state, Value & v, std::string what);

Some files were not shown because too many files have changed in this diff Show more