forked from lix-project/lix
Merge branch 'errors-phase-2' into caveman-LOCs
This commit is contained in:
commit
b1c53b034c
|
@ -386,7 +386,7 @@ false</literal>.</para>
|
|||
|
||||
<programlisting>
|
||||
builtins.fetchurl {
|
||||
url = https://example.org/foo-1.2.3.tar.xz;
|
||||
url = "https://example.org/foo-1.2.3.tar.xz";
|
||||
sha256 = "2c26b46b68ffc68ff99b453c1d30413413422d706483bfa0f98a5e886266e7ae";
|
||||
}
|
||||
</programlisting>
|
||||
|
|
|
@ -53,7 +53,7 @@ nixpkgs=/home/eelco/Dev/nixpkgs-branch:/etc/nixos</screen>
|
|||
<envar>NIX_PATH</envar> to
|
||||
|
||||
<screen>
|
||||
nixpkgs=https://github.com/NixOS/nixpkgs-channels/archive/nixos-15.09.tar.gz</screen>
|
||||
nixpkgs=https://github.com/NixOS/nixpkgs/archive/nixos-15.09.tar.gz</screen>
|
||||
|
||||
tells Nix to download the latest revision in the Nixpkgs/NixOS
|
||||
15.09 channel.</para>
|
||||
|
|
|
@ -526,13 +526,10 @@ these paths will be fetched (0.04 MiB download, 0.19 MiB unpacked):
|
|||
14.12 channel:
|
||||
|
||||
<screen>
|
||||
$ nix-env -f https://github.com/NixOS/nixpkgs-channels/archive/nixos-14.12.tar.gz -iA firefox
|
||||
$ nix-env -f https://github.com/NixOS/nixpkgs/archive/nixos-14.12.tar.gz -iA firefox
|
||||
</screen>
|
||||
|
||||
(The GitHub repository <literal>nixpkgs-channels</literal> is updated
|
||||
automatically from the main <literal>nixpkgs</literal> repository
|
||||
after certain tests have succeeded and binaries have been built and
|
||||
uploaded to the binary cache at <uri>cache.nixos.org</uri>.)</para>
|
||||
</para>
|
||||
|
||||
</refsection>
|
||||
|
||||
|
|
|
@ -258,7 +258,7 @@ path. You can override it by passing <option>-I</option> or setting
|
|||
containing the Pan package from a specific revision of Nixpkgs:
|
||||
|
||||
<screen>
|
||||
$ nix-shell -p pan -I nixpkgs=https://github.com/NixOS/nixpkgs-channels/archive/8a3eea054838b55aca962c3fbde9c83c102b8bf2.tar.gz
|
||||
$ nix-shell -p pan -I nixpkgs=https://github.com/NixOS/nixpkgs/archive/8a3eea054838b55aca962c3fbde9c83c102b8bf2.tar.gz
|
||||
|
||||
[nix-shell:~]$ pan --version
|
||||
Pan 0.139
|
||||
|
@ -352,7 +352,7 @@ following Haskell script uses a specific branch of Nixpkgs/NixOS (the
|
|||
<programlisting><![CDATA[
|
||||
#! /usr/bin/env nix-shell
|
||||
#! nix-shell -i runghc -p "haskellPackages.ghcWithPackages (ps: [ps.HTTP ps.tagsoup])"
|
||||
#! nix-shell -I nixpkgs=https://github.com/NixOS/nixpkgs-channels/archive/nixos-18.03.tar.gz
|
||||
#! nix-shell -I nixpkgs=https://github.com/NixOS/nixpkgs/archive/nixos-18.03.tar.gz
|
||||
|
||||
import Network.HTTP
|
||||
import Text.HTML.TagSoup
|
||||
|
@ -370,7 +370,7 @@ If you want to be even more precise, you can specify a specific
|
|||
revision of Nixpkgs:
|
||||
|
||||
<programlisting>
|
||||
#! nix-shell -I nixpkgs=https://github.com/NixOS/nixpkgs-channels/archive/0672315759b3e15e2121365f067c1c8c56bb4722.tar.gz
|
||||
#! nix-shell -I nixpkgs=https://github.com/NixOS/nixpkgs/archive/0672315759b3e15e2121365f067c1c8c56bb4722.tar.gz
|
||||
</programlisting>
|
||||
|
||||
</para>
|
||||
|
|
|
@ -178,7 +178,7 @@ impureEnvVars = [ "http_proxy" "https_proxy" <replaceable>...</replaceable> ];
|
|||
|
||||
<programlisting>
|
||||
fetchurl {
|
||||
url = http://ftp.gnu.org/pub/gnu/hello/hello-2.1.1.tar.gz;
|
||||
url = "http://ftp.gnu.org/pub/gnu/hello/hello-2.1.1.tar.gz";
|
||||
sha256 = "1md7jsfd8pa45z73bz1kszpp01yw6x5ljkjk2hx7wl800any6465";
|
||||
}
|
||||
</programlisting>
|
||||
|
@ -189,7 +189,7 @@ fetchurl {
|
|||
|
||||
<programlisting>
|
||||
fetchurl {
|
||||
url = ftp://ftp.nluug.nl/pub/gnu/hello/hello-2.1.1.tar.gz;
|
||||
url = "ftp://ftp.nluug.nl/pub/gnu/hello/hello-2.1.1.tar.gz";
|
||||
sha256 = "1md7jsfd8pa45z73bz1kszpp01yw6x5ljkjk2hx7wl800any6465";
|
||||
}
|
||||
</programlisting>
|
||||
|
|
|
@ -324,7 +324,7 @@ if builtins ? getEnv then builtins.getEnv "PATH" else ""</programlisting>
|
|||
particular version of Nixpkgs, e.g.
|
||||
|
||||
<programlisting>
|
||||
with import (fetchTarball https://github.com/NixOS/nixpkgs-channels/archive/nixos-14.12.tar.gz) {};
|
||||
with import (fetchTarball https://github.com/NixOS/nixpkgs/archive/nixos-14.12.tar.gz) {};
|
||||
|
||||
stdenv.mkDerivation { … }
|
||||
</programlisting>
|
||||
|
@ -349,7 +349,7 @@ stdenv.mkDerivation { … }
|
|||
|
||||
<programlisting>
|
||||
with import (fetchTarball {
|
||||
url = https://github.com/NixOS/nixpkgs-channels/archive/nixos-14.12.tar.gz;
|
||||
url = "https://github.com/NixOS/nixpkgs/archive/nixos-14.12.tar.gz";
|
||||
sha256 = "1jppksrfvbk5ypiqdz4cddxdl8z6zyzdb2srq8fcffr327ld5jj2";
|
||||
}) {};
|
||||
|
||||
|
@ -1406,7 +1406,7 @@ stdenv.mkDerivation {
|
|||
";
|
||||
|
||||
src = fetchurl {
|
||||
url = http://ftp.nluug.nl/pub/gnu/hello/hello-2.1.1.tar.gz;
|
||||
url = "http://ftp.nluug.nl/pub/gnu/hello/hello-2.1.1.tar.gz";
|
||||
sha256 = "1md7jsfd8pa45z73bz1kszpp01yw6x5ljkjk2hx7wl800any6465";
|
||||
};
|
||||
inherit perl;
|
||||
|
|
|
@ -15,7 +15,7 @@ stdenv.mkDerivation { <co xml:id='ex-hello-nix-co-2' />
|
|||
name = "hello-2.1.1"; <co xml:id='ex-hello-nix-co-3' />
|
||||
builder = ./builder.sh; <co xml:id='ex-hello-nix-co-4' />
|
||||
src = fetchurl { <co xml:id='ex-hello-nix-co-5' />
|
||||
url = ftp://ftp.nluug.nl/pub/gnu/hello/hello-2.1.1.tar.gz;
|
||||
url = "ftp://ftp.nluug.nl/pub/gnu/hello/hello-2.1.1.tar.gz";
|
||||
sha256 = "1md7jsfd8pa45z73bz1kszpp01yw6x5ljkjk2hx7wl800any6465";
|
||||
};
|
||||
inherit perl; <co xml:id='ex-hello-nix-co-6' />
|
||||
|
|
|
@ -73,12 +73,4 @@ waiting for lock on `/nix/store/0h5b7hp8d4hqfrw8igvx97x1xawrjnac-hello-2.1.1x'</
|
|||
So it is always safe to run multiple instances of Nix in parallel
|
||||
(which isn’t the case with, say, <command>make</command>).</para>
|
||||
|
||||
<para>If you have a system with multiple CPUs, you may want to have
|
||||
Nix build different derivations in parallel (insofar as possible).
|
||||
Just pass the option <link linkend='opt-max-jobs'><option>-j
|
||||
<replaceable>N</replaceable></option></link>, where
|
||||
<replaceable>N</replaceable> is the maximum number of jobs to be run
|
||||
in parallel, or set. Typically this should be the number of
|
||||
CPUs.</para>
|
||||
|
||||
</section>
|
||||
|
|
|
@ -6,16 +6,30 @@
|
|||
|
||||
<title>Installing a Binary Distribution</title>
|
||||
|
||||
<para>If you are using Linux or macOS, the easiest way to install Nix
|
||||
is to run the following command:
|
||||
<para>
|
||||
If you are using Linux or macOS versions up to 10.14 (Mojave), the
|
||||
easiest way to install Nix is to run the following command:
|
||||
</para>
|
||||
|
||||
<screen>
|
||||
$ sh <(curl https://nixos.org/nix/install)
|
||||
</screen>
|
||||
|
||||
<para>
|
||||
If you're using macOS 10.15 (Catalina) or newer, consult
|
||||
<link linkend="sect-macos-installation">the macOS installation instructions</link>
|
||||
before installing.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
As of Nix 2.1.0, the Nix installer will always default to creating a
|
||||
single-user installation, however opting in to the multi-user
|
||||
installation is highly recommended.
|
||||
<!-- TODO: this explains *neither* why the default version is
|
||||
single-user, nor why we'd recommend multi-user over the default.
|
||||
True prospective users don't have much basis for evaluating this.
|
||||
What's it to me? Who should pick which? Why? What if I pick wrong?
|
||||
-->
|
||||
</para>
|
||||
|
||||
<section xml:id="sect-single-user-installation">
|
||||
|
@ -36,7 +50,7 @@ run this under your usual user account, <emphasis>not</emphasis> as
|
|||
root. The script will invoke <command>sudo</command> to create
|
||||
<filename>/nix</filename> if it doesn’t already exist. If you don’t
|
||||
have <command>sudo</command>, you should manually create
|
||||
<command>/nix</command> first as root, e.g.:
|
||||
<filename>/nix</filename> first as root, e.g.:
|
||||
|
||||
<screen>
|
||||
$ mkdir /nix
|
||||
|
@ -47,7 +61,7 @@ The install script will modify the first writable file from amongst
|
|||
<filename>.bash_profile</filename>, <filename>.bash_login</filename>
|
||||
and <filename>.profile</filename> to source
|
||||
<filename>~/.nix-profile/etc/profile.d/nix.sh</filename>. You can set
|
||||
the <command>NIX_INSTALLER_NO_MODIFY_PROFILE</command> environment
|
||||
the <envar>NIX_INSTALLER_NO_MODIFY_PROFILE</envar> environment
|
||||
variable before executing the install script to disable this
|
||||
behaviour.
|
||||
</para>
|
||||
|
@ -81,12 +95,10 @@ $ rm -rf /nix
|
|||
<para>
|
||||
You can instruct the installer to perform a multi-user
|
||||
installation on your system:
|
||||
|
||||
<screen>
|
||||
sh <(curl https://nixos.org/nix/install) --daemon
|
||||
</screen>
|
||||
</para>
|
||||
|
||||
<screen>sh <(curl https://nixos.org/nix/install) --daemon</screen>
|
||||
|
||||
<para>
|
||||
The multi-user installation of Nix will create build users between
|
||||
the user IDs 30001 and 30032, and a group with the group ID 30000.
|
||||
|
@ -136,6 +148,273 @@ sudo rm /Library/LaunchDaemons/org.nixos.nix-daemon.plist
|
|||
|
||||
</section>
|
||||
|
||||
<section xml:id="sect-macos-installation">
|
||||
<title>macOS Installation</title>
|
||||
|
||||
<para>
|
||||
Starting with macOS 10.15 (Catalina), the root filesystem is read-only.
|
||||
This means <filename>/nix</filename> can no longer live on your system
|
||||
volume, and that you'll need a workaround to install Nix.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
The recommended approach, which creates an unencrypted APFS volume
|
||||
for your Nix store and a "synthetic" empty directory to mount it
|
||||
over at <filename>/nix</filename>, is least likely to impair Nix
|
||||
or your system.
|
||||
</para>
|
||||
|
||||
<note><para>
|
||||
With all separate-volume approaches, it's possible something on
|
||||
your system (particularly daemons/services and restored apps) may
|
||||
need access to your Nix store before the volume is mounted. Adding
|
||||
additional encryption makes this more likely.
|
||||
</para></note>
|
||||
|
||||
<para>
|
||||
If you're using a recent Mac with a
|
||||
<link xlink:href="https://www.apple.com/euro/mac/shared/docs/Apple_T2_Security_Chip_Overview.pdf">T2 chip</link>,
|
||||
your drive will still be encrypted at rest (in which case "unencrypted"
|
||||
is a bit of a misnomer). To use this approach, just install Nix with:
|
||||
</para>
|
||||
|
||||
<screen>$ sh <(curl https://nixos.org/nix/install) --darwin-use-unencrypted-nix-store-volume</screen>
|
||||
|
||||
<para>
|
||||
If you don't like the sound of this, you'll want to weigh the
|
||||
other approaches and tradeoffs detailed in this section.
|
||||
</para>
|
||||
|
||||
<note>
|
||||
<title>Eventual solutions?</title>
|
||||
<para>
|
||||
All of the known workarounds have drawbacks, but we hope
|
||||
better solutions will be available in the future. Some that
|
||||
we have our eye on are:
|
||||
</para>
|
||||
<orderedlist>
|
||||
<listitem>
|
||||
<para>
|
||||
A true firmlink would enable the Nix store to live on the
|
||||
primary data volume without the build problems caused by
|
||||
the symlink approach. End users cannot currently
|
||||
create true firmlinks.
|
||||
</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>
|
||||
If the Nix store volume shared FileVault encryption
|
||||
with the primary data volume (probably by using the same
|
||||
volume group and role), FileVault encryption could be
|
||||
easily supported by the installer without requiring
|
||||
manual setup by each user.
|
||||
</para>
|
||||
</listitem>
|
||||
</orderedlist>
|
||||
</note>
|
||||
|
||||
<section xml:id="sect-macos-installation-change-store-prefix">
|
||||
<title>Change the Nix store path prefix</title>
|
||||
<para>
|
||||
Changing the default prefix for the Nix store is a simple
|
||||
approach which enables you to leave it on your root volume,
|
||||
where it can take full advantage of FileVault encryption if
|
||||
enabled. Unfortunately, this approach also opts your device out
|
||||
of some benefits that are enabled by using the same prefix
|
||||
across systems:
|
||||
|
||||
<itemizedlist>
|
||||
<listitem>
|
||||
<para>
|
||||
Your system won't be able to take advantage of the binary
|
||||
cache (unless someone is able to stand up and support
|
||||
duplicate caching infrastructure), which means you'll
|
||||
spend more time waiting for builds.
|
||||
</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>
|
||||
It's harder to build and deploy packages to Linux systems.
|
||||
</para>
|
||||
</listitem>
|
||||
<!-- TODO: may be more here -->
|
||||
</itemizedlist>
|
||||
|
||||
<!-- TODO: Yes, but how?! -->
|
||||
|
||||
It would also possible (and often requested) to just apply this
|
||||
change ecosystem-wide, but it's an intrusive process that has
|
||||
side effects we want to avoid for now.
|
||||
<!-- magnificent hand-wavy gesture -->
|
||||
</para>
|
||||
<para>
|
||||
</para>
|
||||
</section>
|
||||
|
||||
<section xml:id="sect-macos-installation-encrypted-volume">
|
||||
<title>Use a separate encrypted volume</title>
|
||||
<para>
|
||||
If you like, you can also add encryption to the recommended
|
||||
approach taken by the installer. You can do this by pre-creating
|
||||
an encrypted volume before you run the installer--or you can
|
||||
run the installer and encrypt the volume it creates later.
|
||||
<!-- TODO: see later note about whether this needs both add-encryption and from-scratch directions -->
|
||||
</para>
|
||||
<para>
|
||||
In either case, adding encryption to a second volume isn't quite
|
||||
as simple as enabling FileVault for your boot volume. Before you
|
||||
dive in, there are a few things to weigh:
|
||||
</para>
|
||||
<orderedlist>
|
||||
<listitem>
|
||||
<para>
|
||||
The additional volume won't be encrypted with your existing
|
||||
FileVault key, so you'll need another mechanism to decrypt
|
||||
the volume.
|
||||
</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>
|
||||
You can store the password in Keychain to automatically
|
||||
decrypt the volume on boot--but it'll have to wait on Keychain
|
||||
and may not mount before your GUI apps restore. If any of
|
||||
your launchd agents or apps depend on Nix-installed software
|
||||
(for example, if you use a Nix-installed login shell), the
|
||||
restore may fail or break.
|
||||
</para>
|
||||
<para>
|
||||
On a case-by-case basis, you may be able to work around this
|
||||
problem by using <command>wait4path</command> to block
|
||||
execution until your executable is available.
|
||||
</para>
|
||||
<para>
|
||||
It's also possible to decrypt and mount the volume earlier
|
||||
with a login hook--but this mechanism appears to be
|
||||
deprecated and its future is unclear.
|
||||
</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>
|
||||
You can hard-code the password in the clear, so that your
|
||||
store volume can be decrypted before Keychain is available.
|
||||
</para>
|
||||
</listitem>
|
||||
</orderedlist>
|
||||
<para>
|
||||
If you are comfortable navigating these tradeoffs, you can encrypt the volume with
|
||||
something along the lines of:
|
||||
<!-- TODO:
|
||||
I don't know if this also needs from-scratch instructions?
|
||||
can we just recommend use-the-installer-and-then-encrypt?
|
||||
-->
|
||||
</para>
|
||||
<!--
|
||||
TODO: it looks like this option can be encryptVolume|encrypt|enableFileVault
|
||||
|
||||
It may be more clear to use encryptVolume, here? FileVault seems
|
||||
heavily associated with the boot-volume behavior; I worry
|
||||
a little that it can mislead here, especially as it gets
|
||||
copied around minus doc context...?
|
||||
-->
|
||||
<screen>alice$ diskutil apfs enableFileVault /nix -user disk</screen>
|
||||
|
||||
<!-- TODO: and then go into detail on the mount/decrypt approaches? -->
|
||||
</section>
|
||||
|
||||
<section xml:id="sect-macos-installation-symlink">
|
||||
<!--
|
||||
Maybe a good razor is: if we'd hate having to support someone who
|
||||
installed Nix this way, it shouldn't even be detailed?
|
||||
-->
|
||||
<title>Symlink the Nix store to a custom location</title>
|
||||
<para>
|
||||
Another simple approach is using <filename>/etc/synthetic.conf</filename>
|
||||
to symlink the Nix store to the data volume. This option also
|
||||
enables your store to share any configured FileVault encryption.
|
||||
Unfortunately, builds that resolve the symlink may leak the
|
||||
canonical path or even fail.
|
||||
</para>
|
||||
<para>
|
||||
Because of these downsides, we can't recommend this approach.
|
||||
</para>
|
||||
<!-- Leaving out instructions for this one. -->
|
||||
</section>
|
||||
|
||||
<section xml:id="sect-macos-installation-recommended-notes">
|
||||
<title>Notes on the recommended approach</title>
|
||||
<para>
|
||||
This section goes into a little more detail on the recommended
|
||||
approach. You don't need to understand it to run the installer,
|
||||
but it can serve as a helpful reference if you run into trouble.
|
||||
</para>
|
||||
<orderedlist>
|
||||
<listitem>
|
||||
<para>
|
||||
In order to compose user-writable locations into the new
|
||||
read-only system root, Apple introduced a new concept called
|
||||
<literal>firmlinks</literal>, which it describes as a
|
||||
"bi-directional wormhole" between two filesystems. You can
|
||||
see the current firmlinks in <filename>/usr/share/firmlinks</filename>.
|
||||
Unfortunately, firmlinks aren't (currently?) user-configurable.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
For special cases like NFS mount points or package manager roots,
|
||||
<link xlink:href="https://developer.apple.com/library/archive/documentation/System/Conceptual/ManPages_iPhoneOS/man5/synthetic.conf.5.html">synthetic.conf(5)</link>
|
||||
supports limited user-controlled file-creation (of symlinks,
|
||||
and synthetic empty directories) at <filename>/</filename>.
|
||||
To create a synthetic empty directory for mounting at <filename>/nix</filename>,
|
||||
add the following line to <filename>/etc/synthetic.conf</filename>
|
||||
(create it if necessary):
|
||||
</para>
|
||||
|
||||
<screen>nix</screen>
|
||||
</listitem>
|
||||
|
||||
<listitem>
|
||||
<para>
|
||||
This configuration is applied at boot time, but you can use
|
||||
<command>apfs.util</command> to trigger creation (not deletion)
|
||||
of new entries without a reboot:
|
||||
</para>
|
||||
|
||||
<screen>alice$ /System/Library/Filesystems/apfs.fs/Contents/Resources/apfs.util -B</screen>
|
||||
</listitem>
|
||||
|
||||
<listitem>
|
||||
<para>
|
||||
Create the new APFS volume with diskutil:
|
||||
</para>
|
||||
|
||||
<screen>alice$ sudo diskutil apfs addVolume diskX APFS 'Nix Store' -mountpoint /nix</screen>
|
||||
</listitem>
|
||||
|
||||
<listitem>
|
||||
<para>
|
||||
Using <command>vifs</command>, add the new mount to
|
||||
<filename>/etc/fstab</filename>. If it doesn't already have
|
||||
other entries, it should look something like:
|
||||
</para>
|
||||
|
||||
<screen>
|
||||
#
|
||||
# Warning - this file should only be modified with vifs(8)
|
||||
#
|
||||
# Failure to do so is unsupported and may be destructive.
|
||||
#
|
||||
LABEL=Nix\040Store /nix apfs rw,nobrowse
|
||||
</screen>
|
||||
|
||||
<para>
|
||||
The nobrowse setting will keep Spotlight from indexing this
|
||||
volume, and keep it from showing up on your desktop.
|
||||
</para>
|
||||
</listitem>
|
||||
</orderedlist>
|
||||
</section>
|
||||
|
||||
</section>
|
||||
|
||||
<section xml:id="sect-nix-install-pinned-version-url">
|
||||
<title>Installing a pinned Nix version from a URL</title>
|
||||
|
||||
|
|
|
@ -274,7 +274,8 @@ int checkSignature(SV * publicKey_, SV * sig_, char * msg)
|
|||
SV * addToStore(char * srcPath, int recursive, char * algo)
|
||||
PPCODE:
|
||||
try {
|
||||
auto path = store()->addToStore(std::string(baseNameOf(srcPath)), srcPath, recursive, parseHashType(algo));
|
||||
auto method = recursive ? FileIngestionMethod::Recursive : FileIngestionMethod::Flat;
|
||||
auto path = store()->addToStore(std::string(baseNameOf(srcPath)), srcPath, method, parseHashType(algo));
|
||||
XPUSHs(sv_2mortal(newSVpv(store()->printStorePath(path).c_str(), 0)));
|
||||
} catch (Error & e) {
|
||||
croak("%s", e.what());
|
||||
|
@ -285,7 +286,8 @@ SV * makeFixedOutputPath(int recursive, char * algo, char * hash, char * name)
|
|||
PPCODE:
|
||||
try {
|
||||
Hash h(hash, parseHashType(algo));
|
||||
auto path = store()->makeFixedOutputPath(recursive, h, name);
|
||||
auto method = recursive ? FileIngestionMethod::Recursive : FileIngestionMethod::Flat;
|
||||
auto path = store()->makeFixedOutputPath(method, h, name);
|
||||
XPUSHs(sv_2mortal(newSVpv(store()->printStorePath(path).c_str(), 0)));
|
||||
} catch (Error & e) {
|
||||
croak("%s", e.what());
|
||||
|
|
22
release.nix
22
release.nix
|
@ -115,17 +115,17 @@ let
|
|||
|
||||
installFlags = "sysconfdir=$(out)/etc";
|
||||
|
||||
postInstall = ''
|
||||
mkdir -p $doc/nix-support
|
||||
echo "doc manual $doc/share/doc/nix/manual" >> $doc/nix-support/hydra-build-products
|
||||
'';
|
||||
|
||||
doCheck = true;
|
||||
|
||||
doInstallCheck = true;
|
||||
installCheckFlags = "sysconfdir=$(out)/etc";
|
||||
|
||||
separateDebugInfo = true;
|
||||
|
||||
preDist = ''
|
||||
mkdir -p $doc/nix-support
|
||||
echo "doc manual $doc/share/doc/nix/manual" >> $doc/nix-support/hydra-build-products
|
||||
'';
|
||||
});
|
||||
|
||||
|
||||
|
@ -177,10 +177,10 @@ let
|
|||
}
|
||||
''
|
||||
cp ${installerClosureInfo}/registration $TMPDIR/reginfo
|
||||
cp ${./scripts/create-darwin-volume.sh} $TMPDIR/create-darwin-volume.sh
|
||||
substitute ${./scripts/install-nix-from-closure.sh} $TMPDIR/install \
|
||||
--subst-var-by nix ${toplevel} \
|
||||
--subst-var-by cacert ${cacert}
|
||||
|
||||
substitute ${./scripts/install-darwin-multi-user.sh} $TMPDIR/install-darwin-multi-user.sh \
|
||||
--subst-var-by nix ${toplevel} \
|
||||
--subst-var-by cacert ${cacert}
|
||||
|
@ -195,6 +195,7 @@ let
|
|||
# SC1090: Don't worry about not being able to find
|
||||
# $nix/etc/profile.d/nix.sh
|
||||
shellcheck --exclude SC1090 $TMPDIR/install
|
||||
shellcheck $TMPDIR/create-darwin-volume.sh
|
||||
shellcheck $TMPDIR/install-darwin-multi-user.sh
|
||||
shellcheck $TMPDIR/install-systemd-multi-user.sh
|
||||
|
||||
|
@ -210,6 +211,7 @@ let
|
|||
fi
|
||||
|
||||
chmod +x $TMPDIR/install
|
||||
chmod +x $TMPDIR/create-darwin-volume.sh
|
||||
chmod +x $TMPDIR/install-darwin-multi-user.sh
|
||||
chmod +x $TMPDIR/install-systemd-multi-user.sh
|
||||
chmod +x $TMPDIR/install-multi-user
|
||||
|
@ -222,11 +224,15 @@ let
|
|||
--absolute-names \
|
||||
--hard-dereference \
|
||||
--transform "s,$TMPDIR/install,$dir/install," \
|
||||
--transform "s,$TMPDIR/create-darwin-volume.sh,$dir/create-darwin-volume.sh," \
|
||||
--transform "s,$TMPDIR/reginfo,$dir/.reginfo," \
|
||||
--transform "s,$NIX_STORE,$dir/store,S" \
|
||||
$TMPDIR/install $TMPDIR/install-darwin-multi-user.sh \
|
||||
$TMPDIR/install \
|
||||
$TMPDIR/create-darwin-volume.sh \
|
||||
$TMPDIR/install-darwin-multi-user.sh \
|
||||
$TMPDIR/install-systemd-multi-user.sh \
|
||||
$TMPDIR/install-multi-user $TMPDIR/reginfo \
|
||||
$TMPDIR/install-multi-user \
|
||||
$TMPDIR/reginfo \
|
||||
$(cat ${installerClosureInfo}/store-paths)
|
||||
'');
|
||||
|
||||
|
|
185
scripts/create-darwin-volume.sh
Executable file
185
scripts/create-darwin-volume.sh
Executable file
|
@ -0,0 +1,185 @@
|
|||
#!/bin/sh
|
||||
set -e
|
||||
|
||||
root_disk() {
|
||||
diskutil info -plist /
|
||||
}
|
||||
|
||||
apfs_volumes_for() {
|
||||
disk=$1
|
||||
diskutil apfs list -plist "$disk"
|
||||
}
|
||||
|
||||
disk_identifier() {
|
||||
xpath "/plist/dict/key[text()='ParentWholeDisk']/following-sibling::string[1]/text()" 2>/dev/null
|
||||
}
|
||||
|
||||
volume_list_true() {
|
||||
key=$1
|
||||
xpath "/plist/dict/array/dict/key[text()='Volumes']/following-sibling::array/dict/key[text()='$key']/following-sibling::true[1]" 2> /dev/null
|
||||
}
|
||||
|
||||
volume_get_string() {
|
||||
key=$1 i=$2
|
||||
xpath "/plist/dict/array/dict/key[text()='Volumes']/following-sibling::array/dict[$i]/key[text()='$key']/following-sibling::string[1]/text()" 2> /dev/null
|
||||
}
|
||||
|
||||
find_nix_volume() {
|
||||
disk=$1
|
||||
i=1
|
||||
volumes=$(apfs_volumes_for "$disk")
|
||||
while true; do
|
||||
name=$(echo "$volumes" | volume_get_string "Name" "$i")
|
||||
if [ -z "$name" ]; then
|
||||
break
|
||||
fi
|
||||
case "$name" in
|
||||
[Nn]ix*)
|
||||
echo "$name"
|
||||
break
|
||||
;;
|
||||
esac
|
||||
i=$((i+1))
|
||||
done
|
||||
}
|
||||
|
||||
test_fstab() {
|
||||
grep -q "/nix apfs rw" /etc/fstab 2>/dev/null
|
||||
}
|
||||
|
||||
test_nix_symlink() {
|
||||
[ -L "/nix" ] || grep -q "^nix." /etc/synthetic.conf 2>/dev/null
|
||||
}
|
||||
|
||||
test_synthetic_conf() {
|
||||
grep -q "^nix$" /etc/synthetic.conf 2>/dev/null
|
||||
}
|
||||
|
||||
test_nix() {
|
||||
test -d "/nix"
|
||||
}
|
||||
|
||||
test_t2_chip_present(){
|
||||
# Use xartutil to see if system has a t2 chip.
|
||||
#
|
||||
# This isn't well-documented on its own; until it is,
|
||||
# let's keep track of knowledge/assumptions.
|
||||
#
|
||||
# Warnings:
|
||||
# - Don't search "xart" if porn will cause you trouble :)
|
||||
# - Other xartutil flags do dangerous things. Don't run them
|
||||
# naively. If you must, search "xartutil" first.
|
||||
#
|
||||
# Assumptions:
|
||||
# - the "xART session seeds recovery utility"
|
||||
# appears to interact with xartstorageremoted
|
||||
# - `sudo xartutil --list` lists xART sessions
|
||||
# and their seeds and exits 0 if successful. If
|
||||
# not, it exits 1 and prints an error such as:
|
||||
# xartutil: ERROR: No supported link to the SEP present
|
||||
# - xART sessions/seeds are present when a T2 chip is
|
||||
# (and not, otherwise)
|
||||
# - the presence of a T2 chip means a newly-created
|
||||
# volume on the primary drive will be
|
||||
# encrypted at rest
|
||||
# - all together: `sudo xartutil --list`
|
||||
# should exit 0 if a new Nix Store volume will
|
||||
# be encrypted at rest, and exit 1 if not.
|
||||
sudo xartutil --list >/dev/null 2>/dev/null
|
||||
}
|
||||
|
||||
test_filevault_in_use() {
|
||||
disk=$1
|
||||
# list vols on disk | get value of Filevault key | value is true
|
||||
apfs_volumes_for "$disk" | volume_list_true FileVault | grep -q true
|
||||
}
|
||||
|
||||
# use after error msg for conditions we don't understand
|
||||
suggest_report_error(){
|
||||
# ex "error: something sad happened :(" >&2
|
||||
echo " please report this @ https://github.com/nixos/nix/issues" >&2
|
||||
}
|
||||
|
||||
main() {
|
||||
(
|
||||
echo ""
|
||||
echo " ------------------------------------------------------------------ "
|
||||
echo " | This installer will create a volume for the nix store and |"
|
||||
echo " | configure it to mount at /nix. Follow these steps to uninstall. |"
|
||||
echo " ------------------------------------------------------------------ "
|
||||
echo ""
|
||||
echo " 1. Remove the entry from fstab using 'sudo vifs'"
|
||||
echo " 2. Destroy the data volume using 'diskutil apfs deleteVolume'"
|
||||
echo " 3. Remove the 'nix' line from /etc/synthetic.conf or the file"
|
||||
echo ""
|
||||
) >&2
|
||||
|
||||
if test_nix_symlink; then
|
||||
echo "error: /nix is a symlink, please remove it and make sure it's not in synthetic.conf (in which case a reboot is required)" >&2
|
||||
echo " /nix -> $(readlink "/nix")" >&2
|
||||
exit 2
|
||||
fi
|
||||
|
||||
if ! test_synthetic_conf; then
|
||||
echo "Configuring /etc/synthetic.conf..." >&2
|
||||
echo nix | sudo tee -a /etc/synthetic.conf
|
||||
if ! test_synthetic_conf; then
|
||||
echo "error: failed to configure synthetic.conf;" >&2
|
||||
suggest_report_error
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
|
||||
if ! test_nix; then
|
||||
echo "Creating mountpoint for /nix..." >&2
|
||||
/System/Library/Filesystems/apfs.fs/Contents/Resources/apfs.util -B || true
|
||||
if ! test_nix; then
|
||||
sudo mkdir -p /nix 2>/dev/null || true
|
||||
fi
|
||||
if ! test_nix; then
|
||||
echo "error: failed to bootstrap /nix; if a reboot doesn't help," >&2
|
||||
suggest_report_error
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
|
||||
disk=$(root_disk | disk_identifier)
|
||||
volume=$(find_nix_volume "$disk")
|
||||
if [ -z "$volume" ]; then
|
||||
echo "Creating a Nix Store volume..." >&2
|
||||
|
||||
if test_filevault_in_use "$disk"; then
|
||||
# TODO: Not sure if it's in-scope now, but `diskutil apfs list`
|
||||
# shows both filevault and encrypted at rest status, and it
|
||||
# may be the more semantic way to test for this? It'll show
|
||||
# `FileVault: No (Encrypted at rest)`
|
||||
# `FileVault: No`
|
||||
# `FileVault: Yes (Unlocked)`
|
||||
# and so on.
|
||||
if test_t2_chip_present; then
|
||||
echo "warning: boot volume is FileVault-encrypted, but the Nix store volume" >&2
|
||||
echo " is only encrypted at rest." >&2
|
||||
echo " See https://nixos.org/nix/manual/#sect-macos-installation" >&2
|
||||
else
|
||||
echo "error: refusing to create Nix store volume because the boot volume is" >&2
|
||||
echo " FileVault encrypted, but encryption-at-rest is not available." >&2
|
||||
echo " Manually create a volume for the store and re-run this script." >&2
|
||||
echo " See https://nixos.org/nix/manual/#sect-macos-installation" >&2
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
|
||||
sudo diskutil apfs addVolume "$disk" APFS 'Nix Store' -mountpoint /nix
|
||||
volume="Nix Store"
|
||||
else
|
||||
echo "Using existing '$volume' volume" >&2
|
||||
fi
|
||||
|
||||
if ! test_fstab; then
|
||||
echo "Configuring /etc/fstab..." >&2
|
||||
label=$(echo "$volume" | sed 's/ /\\040/g')
|
||||
printf "\$a\nLABEL=%s /nix apfs rw,nobrowse\n.\nwq\n" "$label" | EDITOR=ed sudo vifs
|
||||
fi
|
||||
}
|
||||
|
||||
main "$@"
|
|
@ -20,15 +20,18 @@ readonly GREEN='\033[32m'
|
|||
readonly GREEN_UL='\033[4;32m'
|
||||
readonly RED='\033[31m'
|
||||
|
||||
readonly NIX_USER_COUNT="32"
|
||||
# installer allows overriding build user count to speed up installation
|
||||
# as creating each user takes non-trivial amount of time on macos
|
||||
readonly NIX_USER_COUNT=${NIX_USER_COUNT:-32}
|
||||
readonly NIX_BUILD_GROUP_ID="30000"
|
||||
readonly NIX_BUILD_GROUP_NAME="nixbld"
|
||||
readonly NIX_FIRST_BUILD_UID="30001"
|
||||
# Please don't change this. We don't support it, because the
|
||||
# default shell profile that comes with Nix doesn't support it.
|
||||
readonly NIX_ROOT="/nix"
|
||||
readonly NIX_EXTRA_CONF=${NIX_EXTRA_CONF:-}
|
||||
|
||||
readonly PROFILE_TARGETS=("/etc/bashrc" "/etc/profile.d/nix.sh" "/etc/zshrc")
|
||||
readonly PROFILE_TARGETS=("/etc/bashrc" "/etc/profile.d/nix.sh" "/etc/zshenv")
|
||||
readonly PROFILE_BACKUP_SUFFIX=".backup-before-nix"
|
||||
readonly PROFILE_NIX_FILE="$NIX_ROOT/var/nix/profiles/default/etc/profile.d/nix-daemon.sh"
|
||||
|
||||
|
@ -450,9 +453,11 @@ create_directories() {
|
|||
}
|
||||
|
||||
place_channel_configuration() {
|
||||
if [ -z "${NIX_INSTALLER_NO_CHANNEL_ADD:-}" ]; then
|
||||
echo "https://nixos.org/channels/nixpkgs-unstable nixpkgs" > "$SCRATCH/.nix-channels"
|
||||
_sudo "to set up the default system channel (part 1)" \
|
||||
install -m 0664 "$SCRATCH/.nix-channels" "$ROOT_HOME/.nix-channels"
|
||||
fi
|
||||
}
|
||||
|
||||
welcome_to_nix() {
|
||||
|
@ -634,18 +639,20 @@ setup_default_profile() {
|
|||
export NIX_SSL_CERT_FILE=/nix/var/nix/profiles/default/etc/ssl/certs/ca-bundle.crt
|
||||
fi
|
||||
|
||||
if [ -z "${NIX_INSTALLER_NO_CHANNEL_ADD:-}" ]; then
|
||||
# Have to explicitly pass NIX_SSL_CERT_FILE as part of the sudo call,
|
||||
# otherwise it will be lost in environments where sudo doesn't pass
|
||||
# all the environment variables by default.
|
||||
_sudo "to update the default channel in the default profile" \
|
||||
HOME="$ROOT_HOME" NIX_SSL_CERT_FILE="$NIX_SSL_CERT_FILE" "$NIX_INSTALLED_NIX/bin/nix-channel" --update nixpkgs \
|
||||
|| channel_update_failed=1
|
||||
|
||||
fi
|
||||
}
|
||||
|
||||
|
||||
place_nix_configuration() {
|
||||
cat <<EOF > "$SCRATCH/nix.conf"
|
||||
$NIX_EXTRA_CONF
|
||||
build-users-group = $NIX_BUILD_GROUP_NAME
|
||||
EOF
|
||||
_sudo "to place the default nix daemon configuration (part 2)" \
|
||||
|
|
|
@ -40,20 +40,29 @@ elif [ "$(uname -s)" = "Linux" ] && [ -e /run/systemd/system ]; then
|
|||
fi
|
||||
|
||||
INSTALL_MODE=no-daemon
|
||||
|
||||
CREATE_DARWIN_VOLUME=0
|
||||
# handle the command line flags
|
||||
while [ "x${1:-}" != "x" ]; do
|
||||
if [ "x${1:-}" = "x--no-daemon" ]; then
|
||||
INSTALL_MODE=no-daemon
|
||||
elif [ "x${1:-}" = "x--daemon" ]; then
|
||||
INSTALL_MODE=daemon
|
||||
elif [ "x${1:-}" = "x--no-channel-add" ]; then
|
||||
NIX_INSTALLER_NO_CHANNEL_ADD=1
|
||||
elif [ "x${1:-}" = "x--no-modify-profile" ]; then
|
||||
NIX_INSTALLER_NO_MODIFY_PROFILE=1
|
||||
elif [ "x${1:-}" != "x" ]; then
|
||||
while [ $# -gt 0 ]; do
|
||||
case $1 in
|
||||
--daemon)
|
||||
INSTALL_MODE=daemon;;
|
||||
--no-daemon)
|
||||
INSTALL_MODE=no-daemon;;
|
||||
--no-channel-add)
|
||||
export NIX_INSTALLER_NO_CHANNEL_ADD=1;;
|
||||
--daemon-user-count)
|
||||
export NIX_USER_COUNT=$2
|
||||
shift;;
|
||||
--no-modify-profile)
|
||||
NIX_INSTALLER_NO_MODIFY_PROFILE=1;;
|
||||
--darwin-use-unencrypted-nix-store-volume)
|
||||
CREATE_DARWIN_VOLUME=1;;
|
||||
--nix-extra-conf-file)
|
||||
export NIX_EXTRA_CONF="$(cat $2)"
|
||||
shift;;
|
||||
*)
|
||||
(
|
||||
echo "Nix Installer [--daemon|--no-daemon] [--no-channel-add] [--no-modify-profile]"
|
||||
echo "Nix Installer [--daemon|--no-daemon] [--daemon-user-count INT] [--no-channel-add] [--no-modify-profile] [--darwin-use-unencrypted-nix-store-volume] [--nix-extra-conf-file FILE]"
|
||||
|
||||
echo "Choose installation method."
|
||||
echo ""
|
||||
|
@ -72,12 +81,46 @@ while [ "x${1:-}" != "x" ]; do
|
|||
echo " --no-modify-profile: Skip channel installation. When not provided nixpkgs-unstable"
|
||||
echo " is installed by default."
|
||||
echo ""
|
||||
echo " --daemon-user-count: Number of build users to create. Defaults to 32."
|
||||
echo ""
|
||||
echo " --nix-extra-conf-file: Path to nix.conf to prepend when installing /etc/nix.conf"
|
||||
echo ""
|
||||
) >&2
|
||||
|
||||
# darwin and Catalina+
|
||||
if [ "$(uname -s)" = "Darwin" ] && [ "$macos_major" -gt 14 ]; then
|
||||
(
|
||||
echo " --darwin-use-unencrypted-nix-store-volume: Create an APFS volume for the Nix"
|
||||
echo " store and mount it at /nix. This is the recommended way to create"
|
||||
echo " /nix with a read-only / on macOS >=10.15."
|
||||
echo " See: https://nixos.org/nix/manual/#sect-macos-installation"
|
||||
echo ""
|
||||
) >&2
|
||||
exit
|
||||
fi
|
||||
exit;;
|
||||
esac
|
||||
shift
|
||||
done
|
||||
|
||||
if [ "$(uname -s)" = "Darwin" ]; then
|
||||
if [ "$CREATE_DARWIN_VOLUME" = 1 ]; then
|
||||
printf '\e[1;31mCreating volume and mountpoint /nix.\e[0m\n'
|
||||
"$self/create-darwin-volume.sh"
|
||||
fi
|
||||
|
||||
info=$(diskutil info -plist / | xpath "/plist/dict/key[text()='Writable']/following-sibling::true[1]" 2> /dev/null)
|
||||
if ! [ -e $dest ] && [ -n "$info" ] && [ "$macos_major" -gt 14 ]; then
|
||||
(
|
||||
echo ""
|
||||
echo "Installing on macOS >=10.15 requires relocating the store to an apfs volume."
|
||||
echo "Use sh <(curl https://nixos.org/nix/install) --darwin-use-unencrypted-nix-store-volume or run the preparation steps manually."
|
||||
echo "See https://nixos.org/nix/manual/#sect-macos-installation"
|
||||
echo ""
|
||||
) >&2
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
|
||||
if [ "$INSTALL_MODE" = "daemon" ]; then
|
||||
printf '\e[1;31mSwitching to the Daemon-based Installer\e[0m\n'
|
||||
exec "$self/install-multi-user"
|
||||
|
@ -170,6 +213,17 @@ if [ -z "$NIX_INSTALLER_NO_MODIFY_PROFILE" ]; then
|
|||
break
|
||||
fi
|
||||
done
|
||||
for i in .zshenv .zshrc; do
|
||||
fn="$HOME/$i"
|
||||
if [ -w "$fn" ]; then
|
||||
if ! grep -q "$p" "$fn"; then
|
||||
echo "modifying $fn..." >&2
|
||||
echo "if [ -e $p ]; then . $p; fi # added by Nix installer" >> "$fn"
|
||||
fi
|
||||
added=1
|
||||
break
|
||||
fi
|
||||
done
|
||||
fi
|
||||
|
||||
if [ -z "$added" ]; then
|
||||
|
|
|
@ -36,7 +36,9 @@ tarball="$tmpDir/$(basename "$tmpDir/nix-@nixVersion@-$system.tar.xz")"
|
|||
|
||||
require_util curl "download the binary tarball"
|
||||
require_util tar "unpack the binary tarball"
|
||||
if [ "$(uname -s)" != "Darwin" ]; then
|
||||
require_util xz "unpack the binary tarball"
|
||||
fi
|
||||
|
||||
echo "downloading Nix @nixVersion@ binary tarball for $system from '$url' to '$tmpDir'..."
|
||||
curl -L "$url" -o "$tarball" || oops "failed to download '$url'"
|
||||
|
|
|
@ -200,8 +200,7 @@ static int _main(int argc, char * * argv)
|
|||
|
||||
} catch (std::exception & e) {
|
||||
auto msg = chomp(drainFD(5, false));
|
||||
logError(
|
||||
ErrorInfo {
|
||||
logError({
|
||||
.name = "Remote build",
|
||||
.hint = hintfmt("cannot build on '%s': %s%s",
|
||||
bestMachine->storeUri, e.what(),
|
||||
|
|
|
@ -1,12 +0,0 @@
|
|||
programs += error-demo
|
||||
|
||||
error-demo_DIR := $(d)
|
||||
|
||||
error-demo_SOURCES := \
|
||||
$(wildcard $(d)/*.cc) \
|
||||
|
||||
error-demo_CXXFLAGS += -I src/libutil -I src/libexpr
|
||||
|
||||
error-demo_LIBS = libutil libexpr
|
||||
|
||||
error-demo_LDFLAGS = -pthread $(SODIUM_LIBS) $(EDITLINE_LIBS) $(BOOST_LDFLAGS) -lboost_context -lboost_thread -lboost_system
|
|
@ -1698,7 +1698,7 @@ string EvalState::copyPathToStore(PathSet & context, const Path & path)
|
|||
else {
|
||||
auto p = settings.readOnlyMode
|
||||
? store->computeStorePathForPath(std::string(baseNameOf(path)), checkSourcePath(path)).first
|
||||
: store->addToStore(std::string(baseNameOf(path)), checkSourcePath(path), true, htSHA256, defaultPathFilter, repair);
|
||||
: store->addToStore(std::string(baseNameOf(path)), checkSourcePath(path), FileIngestionMethod::Recursive, htSHA256, defaultPathFilter, repair);
|
||||
dstPath = store->printStorePath(p);
|
||||
srcToStore.insert_or_assign(path, std::move(p));
|
||||
printMsg(lvlChatty, "copied source '%1%' -> '%2%'", path, dstPath);
|
||||
|
|
|
@ -594,7 +594,7 @@ static void prim_derivationStrict(EvalState & state, const Pos & pos, Value * *
|
|||
|
||||
std::optional<std::string> outputHash;
|
||||
std::string outputHashAlgo;
|
||||
bool outputHashRecursive = false;
|
||||
auto ingestionMethod = FileIngestionMethod::Flat;
|
||||
|
||||
StringSet outputs;
|
||||
outputs.insert("out");
|
||||
|
@ -605,8 +605,8 @@ static void prim_derivationStrict(EvalState & state, const Pos & pos, Value * *
|
|||
vomit("processing attribute '%1%'", key);
|
||||
|
||||
auto handleHashMode = [&](const std::string & s) {
|
||||
if (s == "recursive") outputHashRecursive = true;
|
||||
else if (s == "flat") outputHashRecursive = false;
|
||||
if (s == "recursive") ingestionMethod = FileIngestionMethod::Recursive;
|
||||
else if (s == "flat") ingestionMethod = FileIngestionMethod::Flat;
|
||||
else
|
||||
throw EvalError(
|
||||
ErrorInfo {
|
||||
|
@ -787,11 +787,14 @@ static void prim_derivationStrict(EvalState & state, const Pos & pos, Value * *
|
|||
HashType ht = outputHashAlgo.empty() ? htUnknown : parseHashType(outputHashAlgo);
|
||||
Hash h(*outputHash, ht);
|
||||
|
||||
auto outPath = state.store->makeFixedOutputPath(outputHashRecursive, h, drvName);
|
||||
auto outPath = state.store->makeFixedOutputPath(ingestionMethod, h, drvName);
|
||||
if (!jsonObject) drv.env["out"] = state.store->printStorePath(outPath);
|
||||
drv.outputs.insert_or_assign("out", DerivationOutput(std::move(outPath),
|
||||
(outputHashRecursive ? "r:" : "") + printHashType(h.type),
|
||||
h.to_string(Base16, false)));
|
||||
drv.outputs.insert_or_assign("out", DerivationOutput {
|
||||
std::move(outPath),
|
||||
(ingestionMethod == FileIngestionMethod::Recursive ? "r:" : "")
|
||||
+ printHashType(h.type),
|
||||
h.to_string(Base16, false),
|
||||
});
|
||||
}
|
||||
|
||||
else {
|
||||
|
@ -1139,7 +1142,7 @@ static void prim_toFile(EvalState & state, const Pos & pos, Value * * args, Valu
|
|||
|
||||
|
||||
static void addPath(EvalState & state, const Pos & pos, const string & name, const Path & path_,
|
||||
Value * filterFun, bool recursive, const Hash & expectedHash, Value & v)
|
||||
Value * filterFun, FileIngestionMethod method, const Hash & expectedHash, Value & v)
|
||||
{
|
||||
const auto path = evalSettings.pureEval && expectedHash ?
|
||||
path_ :
|
||||
|
@ -1170,12 +1173,12 @@ static void addPath(EvalState & state, const Pos & pos, const string & name, con
|
|||
|
||||
std::optional<StorePath> expectedStorePath;
|
||||
if (expectedHash)
|
||||
expectedStorePath = state.store->makeFixedOutputPath(recursive, expectedHash, name);
|
||||
expectedStorePath = state.store->makeFixedOutputPath(method, expectedHash, name);
|
||||
Path dstPath;
|
||||
if (!expectedHash || !state.store->isValidPath(*expectedStorePath)) {
|
||||
dstPath = state.store->printStorePath(settings.readOnlyMode
|
||||
? state.store->computeStorePathForPath(name, path, recursive, htSHA256, filter).first
|
||||
: state.store->addToStore(name, path, recursive, htSHA256, filter, state.repair));
|
||||
? state.store->computeStorePathForPath(name, path, method, htSHA256, filter).first
|
||||
: state.store->addToStore(name, path, method, htSHA256, filter, state.repair));
|
||||
if (expectedHash && expectedStorePath != state.store->parseStorePath(dstPath))
|
||||
throw Error("store path mismatch in (possibly filtered) path added from '%s'", path);
|
||||
} else
|
||||
|
@ -1206,7 +1209,7 @@ static void prim_filterSource(EvalState & state, const Pos & pos, Value * * args
|
|||
.nixCode = NixCode { .errPos = pos }
|
||||
});
|
||||
|
||||
addPath(state, pos, std::string(baseNameOf(path)), path, args[0], true, Hash(), v);
|
||||
addPath(state, pos, std::string(baseNameOf(path)), path, args[0], FileIngestionMethod::Recursive, Hash(), v);
|
||||
}
|
||||
|
||||
static void prim_path(EvalState & state, const Pos & pos, Value * * args, Value & v)
|
||||
|
@ -1215,7 +1218,7 @@ static void prim_path(EvalState & state, const Pos & pos, Value * * args, Value
|
|||
Path path;
|
||||
string name;
|
||||
Value * filterFun = nullptr;
|
||||
auto recursive = true;
|
||||
auto method = FileIngestionMethod::Recursive;
|
||||
Hash expectedHash;
|
||||
|
||||
for (auto & attr : *args[0]->attrs) {
|
||||
|
@ -1236,7 +1239,7 @@ static void prim_path(EvalState & state, const Pos & pos, Value * * args, Value
|
|||
state.forceValue(*attr.value, pos);
|
||||
filterFun = attr.value;
|
||||
} else if (n == "recursive")
|
||||
recursive = state.forceBool(*attr.value, *attr.pos);
|
||||
method = FileIngestionMethod { state.forceBool(*attr.value, *attr.pos) };
|
||||
else if (n == "sha256")
|
||||
expectedHash = Hash(state.forceStringNoCtx(*attr.value, *attr.pos), htSHA256);
|
||||
else
|
||||
|
@ -1256,7 +1259,7 @@ static void prim_path(EvalState & state, const Pos & pos, Value * * args, Value
|
|||
if (name.empty())
|
||||
name = baseNameOf(path);
|
||||
|
||||
addPath(state, pos, name, path, filterFun, recursive, expectedHash, v);
|
||||
addPath(state, pos, name, path, filterFun, method, expectedHash, v);
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -195,7 +195,7 @@ struct GitInput : Input
|
|||
return files.count(file);
|
||||
};
|
||||
|
||||
auto storePath = store->addToStore("source", actualUrl, true, htSHA256, filter);
|
||||
auto storePath = store->addToStore("source", actualUrl, FileIngestionMethod::Recursive, htSHA256, filter);
|
||||
|
||||
auto tree = Tree {
|
||||
.actualPath = store->printStorePath(storePath),
|
||||
|
@ -282,7 +282,10 @@ struct GitInput : Input
|
|||
// FIXME: git stderr messes up our progress indicator, so
|
||||
// we're using --quiet for now. Should process its stderr.
|
||||
try {
|
||||
runProgram("git", true, { "-C", repoDir, "fetch", "--quiet", "--force", "--", actualUrl, fmt("%s:%s", *input->ref, *input->ref) });
|
||||
auto fetchRef = input->ref->compare(0, 5, "refs/") == 0
|
||||
? *input->ref
|
||||
: "refs/heads/" + *input->ref;
|
||||
runProgram("git", true, { "-C", repoDir, "fetch", "--quiet", "--force", "--", actualUrl, fmt("%s:%s", fetchRef, fetchRef) });
|
||||
} catch (Error & e) {
|
||||
if (!pathExists(localRefFile)) throw;
|
||||
warn("could not update local clone of Git repository '%s'; continuing with the most recent version", actualUrl);
|
||||
|
@ -347,7 +350,7 @@ struct GitInput : Input
|
|||
unpackTarfile(*source, tmpDir);
|
||||
}
|
||||
|
||||
auto storePath = store->addToStore(name, tmpDir, true, htSHA256, filter);
|
||||
auto storePath = store->addToStore(name, tmpDir, FileIngestionMethod::Recursive, htSHA256, filter);
|
||||
|
||||
auto lastModified = std::stoull(runProgram("git", true, { "-C", repoDir, "log", "-1", "--format=%ct", input->rev->gitRev() }));
|
||||
|
||||
|
@ -418,7 +421,7 @@ struct GitInputScheme : InputScheme
|
|||
|
||||
auto input = std::make_unique<GitInput>(parseURL(getStrAttr(attrs, "url")));
|
||||
if (auto ref = maybeGetStrAttr(attrs, "ref")) {
|
||||
if (!std::regex_match(*ref, refRegex))
|
||||
if (std::regex_search(*ref, badGitRefRegex))
|
||||
throw BadURL("invalid Git branch/tag name '%s'", *ref);
|
||||
input->ref = *ref;
|
||||
}
|
||||
|
|
|
@ -114,7 +114,7 @@ struct MercurialInput : Input
|
|||
return files.count(file);
|
||||
};
|
||||
|
||||
auto storePath = store->addToStore("source", actualUrl, true, htSHA256, filter);
|
||||
auto storePath = store->addToStore("source", actualUrl, FileIngestionMethod::Recursive, htSHA256, filter);
|
||||
|
||||
return {Tree {
|
||||
.actualPath = store->printStorePath(storePath),
|
||||
|
|
|
@ -67,11 +67,12 @@ DownloadFileResult downloadFile(
|
|||
StringSink sink;
|
||||
dumpString(*res.data, sink);
|
||||
auto hash = hashString(htSHA256, *res.data);
|
||||
ValidPathInfo info(store->makeFixedOutputPath(false, hash, name));
|
||||
ValidPathInfo info(store->makeFixedOutputPath(FileIngestionMethod::Flat, hash, name));
|
||||
info.narHash = hashString(htSHA256, *sink.s);
|
||||
info.narSize = sink.s->size();
|
||||
info.ca = makeFixedOutputCA(false, hash);
|
||||
store->addToStore(info, sink.s, NoRepair, NoCheckSigs);
|
||||
info.ca = makeFixedOutputCA(FileIngestionMethod::Flat, hash);
|
||||
auto source = StringSource { *sink.s };
|
||||
store->addToStore(info, source, NoRepair, NoCheckSigs);
|
||||
storePath = std::move(info.path);
|
||||
}
|
||||
|
||||
|
@ -141,7 +142,7 @@ Tree downloadTarball(
|
|||
throw nix::Error("tarball '%s' contains an unexpected number of top-level files", url);
|
||||
auto topDir = tmpDir + "/" + members.begin()->name;
|
||||
lastModified = lstat(topDir).st_mtime;
|
||||
unpackedStorePath = store->addToStore(name, topDir, true, htSHA256, defaultPathFilter, NoRepair);
|
||||
unpackedStorePath = store->addToStore(name, topDir, FileIngestionMethod::Recursive, htSHA256, defaultPathFilter, NoRepair);
|
||||
}
|
||||
|
||||
Attrs infoAttrs({
|
||||
|
|
|
@ -8,7 +8,7 @@ namespace nix::fetchers {
|
|||
StorePath TreeInfo::computeStorePath(Store & store) const
|
||||
{
|
||||
assert(narHash);
|
||||
return store.makeFixedOutputPath(true, narHash, "source");
|
||||
return store.makeFixedOutputPath(FileIngestionMethod::Recursive, narHash, "source");
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -113,9 +113,12 @@ void BinaryCacheStore::writeNarInfo(ref<NarInfo> narInfo)
|
|||
diskCache->upsertNarInfo(getUri(), hashPart, std::shared_ptr<NarInfo>(narInfo));
|
||||
}
|
||||
|
||||
void BinaryCacheStore::addToStore(const ValidPathInfo & info, const ref<std::string> & nar,
|
||||
void BinaryCacheStore::addToStore(const ValidPathInfo & info, Source & narSource,
|
||||
RepairFlag repair, CheckSigsFlag checkSigs, std::shared_ptr<FSAccessor> accessor)
|
||||
{
|
||||
// FIXME: See if we can use the original source to reduce memory usage.
|
||||
auto nar = make_ref<std::string>(narSource.drain());
|
||||
|
||||
if (!repair && isValidPath(info.path)) return;
|
||||
|
||||
/* Verify that all references are valid. This may do some .narinfo
|
||||
|
@ -327,7 +330,7 @@ void BinaryCacheStore::queryPathInfoUncached(const StorePath & storePath,
|
|||
}
|
||||
|
||||
StorePath BinaryCacheStore::addToStore(const string & name, const Path & srcPath,
|
||||
bool recursive, HashType hashAlgo, PathFilter & filter, RepairFlag repair)
|
||||
FileIngestionMethod method, HashType hashAlgo, PathFilter & filter, RepairFlag repair)
|
||||
{
|
||||
// FIXME: some cut&paste from LocalStore::addToStore().
|
||||
|
||||
|
@ -336,7 +339,7 @@ StorePath BinaryCacheStore::addToStore(const string & name, const Path & srcPath
|
|||
small files. */
|
||||
StringSink sink;
|
||||
Hash h;
|
||||
if (recursive) {
|
||||
if (method == FileIngestionMethod::Recursive) {
|
||||
dumpPath(srcPath, sink, filter);
|
||||
h = hashString(hashAlgo, *sink.s);
|
||||
} else {
|
||||
|
@ -345,9 +348,10 @@ StorePath BinaryCacheStore::addToStore(const string & name, const Path & srcPath
|
|||
h = hashString(hashAlgo, s);
|
||||
}
|
||||
|
||||
ValidPathInfo info(makeFixedOutputPath(recursive, h, name));
|
||||
ValidPathInfo info(makeFixedOutputPath(method, h, name));
|
||||
|
||||
addToStore(info, sink.s, repair, CheckSigs, nullptr);
|
||||
auto source = StringSource { *sink.s };
|
||||
addToStore(info, source, repair, CheckSigs, nullptr);
|
||||
|
||||
return std::move(info.path);
|
||||
}
|
||||
|
@ -361,7 +365,8 @@ StorePath BinaryCacheStore::addTextToStore(const string & name, const string & s
|
|||
if (repair || !isValidPath(info.path)) {
|
||||
StringSink sink;
|
||||
dumpString(s, sink);
|
||||
addToStore(info, sink.s, repair, CheckSigs, nullptr);
|
||||
auto source = StringSource { *sink.s };
|
||||
addToStore(info, source, repair, CheckSigs, nullptr);
|
||||
}
|
||||
|
||||
return std::move(info.path);
|
||||
|
|
|
@ -74,12 +74,12 @@ public:
|
|||
std::optional<StorePath> queryPathFromHashPart(const std::string & hashPart) override
|
||||
{ unsupported("queryPathFromHashPart"); }
|
||||
|
||||
void addToStore(const ValidPathInfo & info, const ref<std::string> & nar,
|
||||
void addToStore(const ValidPathInfo & info, Source & narSource,
|
||||
RepairFlag repair, CheckSigsFlag checkSigs,
|
||||
std::shared_ptr<FSAccessor> accessor) override;
|
||||
|
||||
StorePath addToStore(const string & name, const Path & srcPath,
|
||||
bool recursive, HashType hashAlgo,
|
||||
FileIngestionMethod method, HashType hashAlgo,
|
||||
PathFilter & filter, RepairFlag repair) override;
|
||||
|
||||
StorePath addTextToStore(const string & name, const string & s,
|
||||
|
|
|
@ -513,9 +513,10 @@ private:
|
|||
Path fnUserLock;
|
||||
AutoCloseFD fdUserLock;
|
||||
|
||||
bool isEnabled = false;
|
||||
string user;
|
||||
uid_t uid;
|
||||
gid_t gid;
|
||||
uid_t uid = 0;
|
||||
gid_t gid = 0;
|
||||
std::vector<gid_t> supplementaryGIDs;
|
||||
|
||||
public:
|
||||
|
@ -528,7 +529,9 @@ public:
|
|||
uid_t getGID() { assert(gid); return gid; }
|
||||
std::vector<gid_t> getSupplementaryGIDs() { return supplementaryGIDs; }
|
||||
|
||||
bool enabled() { return uid != 0; }
|
||||
bool findFreeUser();
|
||||
|
||||
bool enabled() { return isEnabled; }
|
||||
|
||||
};
|
||||
|
||||
|
@ -536,6 +539,11 @@ public:
|
|||
UserLock::UserLock()
|
||||
{
|
||||
assert(settings.buildUsersGroup != "");
|
||||
createDirs(settings.nixStateDir + "/userpool");
|
||||
}
|
||||
|
||||
bool UserLock::findFreeUser() {
|
||||
if (enabled()) return true;
|
||||
|
||||
/* Get the members of the build-users-group. */
|
||||
struct group * gr = getgrnam(settings.buildUsersGroup.get().c_str());
|
||||
|
@ -565,7 +573,6 @@ UserLock::UserLock()
|
|||
throw Error("the user '%1%' in the group '%2%' does not exist",
|
||||
i, settings.buildUsersGroup);
|
||||
|
||||
createDirs(settings.nixStateDir + "/userpool");
|
||||
|
||||
fnUserLock = (format("%1%/userpool/%2%") % settings.nixStateDir % pw->pw_uid).str();
|
||||
|
||||
|
@ -596,16 +603,14 @@ UserLock::UserLock()
|
|||
supplementaryGIDs.resize(ngroups);
|
||||
#endif
|
||||
|
||||
return;
|
||||
isEnabled = true;
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
throw Error("all build users are currently in use; "
|
||||
"consider creating additional users and adding them to the '%1%' group",
|
||||
settings.buildUsersGroup);
|
||||
return false;
|
||||
}
|
||||
|
||||
|
||||
void UserLock::kill()
|
||||
{
|
||||
killUser(uid);
|
||||
|
@ -934,6 +939,7 @@ private:
|
|||
void closureRepaired();
|
||||
void inputsRealised();
|
||||
void tryToBuild();
|
||||
void tryLocalBuild();
|
||||
void buildDone();
|
||||
|
||||
/* Is the build hook willing to perform the build? */
|
||||
|
@ -1005,6 +1011,8 @@ private:
|
|||
Goal::amDone(result);
|
||||
}
|
||||
|
||||
void started();
|
||||
|
||||
void done(BuildResult::Status status, const string & msg = "");
|
||||
|
||||
StorePathSet exportReferences(const StorePathSet & storePaths);
|
||||
|
@ -1150,8 +1158,7 @@ void DerivationGoal::loadDerivation()
|
|||
trace("loading derivation");
|
||||
|
||||
if (nrFailed != 0) {
|
||||
logError(
|
||||
ErrorInfo {
|
||||
logError({
|
||||
.name = "missing derivation during build",
|
||||
.hint = hintfmt("cannot build missing derivation '%s'", worker.store.printStorePath(drvPath))
|
||||
});
|
||||
|
@ -1305,8 +1312,7 @@ void DerivationGoal::repairClosure()
|
|||
/* Check each path (slow!). */
|
||||
for (auto & i : outputClosure) {
|
||||
if (worker.pathContentsGood(i)) continue;
|
||||
logError(
|
||||
ErrorInfo {
|
||||
logError({
|
||||
.name = "Corrupt path in closure",
|
||||
.hint = hintfmt(
|
||||
"found corrupted or missing path '%s' in the output closure of '%s'",
|
||||
|
@ -1345,8 +1351,7 @@ void DerivationGoal::inputsRealised()
|
|||
if (nrFailed != 0) {
|
||||
if (!useDerivation)
|
||||
throw Error("some dependencies of '%s' are missing", worker.store.printStorePath(drvPath));
|
||||
logError(
|
||||
ErrorInfo {
|
||||
logError({
|
||||
.name = "Dependencies could not be built",
|
||||
.hint = hintfmt(
|
||||
"cannot build derivation '%s': %s dependencies couldn't be built",
|
||||
|
@ -1406,6 +1411,19 @@ void DerivationGoal::inputsRealised()
|
|||
result = BuildResult();
|
||||
}
|
||||
|
||||
void DerivationGoal::started() {
|
||||
auto msg = fmt(
|
||||
buildMode == bmRepair ? "repairing outputs of '%s'" :
|
||||
buildMode == bmCheck ? "checking outputs of '%s'" :
|
||||
nrRounds > 1 ? "building '%s' (round %d/%d)" :
|
||||
"building '%s'", worker.store.printStorePath(drvPath), curRound, nrRounds);
|
||||
fmt("building '%s'", worker.store.printStorePath(drvPath));
|
||||
if (hook) msg += fmt(" on '%s'", machineName);
|
||||
act = std::make_unique<Activity>(*logger, lvlInfo, actBuild, msg,
|
||||
Logger::Fields{worker.store.printStorePath(drvPath), hook ? machineName : "", curRound, nrRounds});
|
||||
mcRunningBuilds = std::make_unique<MaintainCount<uint64_t>>(worker.runningBuilds);
|
||||
worker.updateProgress();
|
||||
}
|
||||
|
||||
void DerivationGoal::tryToBuild()
|
||||
{
|
||||
|
@ -1457,20 +1475,6 @@ void DerivationGoal::tryToBuild()
|
|||
supported for local builds. */
|
||||
bool buildLocally = buildMode != bmNormal || parsedDrv->willBuildLocally();
|
||||
|
||||
auto started = [&]() {
|
||||
auto msg = fmt(
|
||||
buildMode == bmRepair ? "repairing outputs of '%s'" :
|
||||
buildMode == bmCheck ? "checking outputs of '%s'" :
|
||||
nrRounds > 1 ? "building '%s' (round %d/%d)" :
|
||||
"building '%s'", worker.store.printStorePath(drvPath), curRound, nrRounds);
|
||||
fmt("building '%s'", worker.store.printStorePath(drvPath));
|
||||
if (hook) msg += fmt(" on '%s'", machineName);
|
||||
act = std::make_unique<Activity>(*logger, lvlInfo, actBuild, msg,
|
||||
Logger::Fields{worker.store.printStorePath(drvPath), hook ? machineName : "", curRound, nrRounds});
|
||||
mcRunningBuilds = std::make_unique<MaintainCount<uint64_t>>(worker.runningBuilds);
|
||||
worker.updateProgress();
|
||||
};
|
||||
|
||||
/* Is the build hook willing to accept this job? */
|
||||
if (!buildLocally) {
|
||||
switch (tryBuildHook()) {
|
||||
|
@ -1503,6 +1507,34 @@ void DerivationGoal::tryToBuild()
|
|||
return;
|
||||
}
|
||||
|
||||
state = &DerivationGoal::tryLocalBuild;
|
||||
worker.wakeUp(shared_from_this());
|
||||
}
|
||||
|
||||
void DerivationGoal::tryLocalBuild() {
|
||||
|
||||
/* If `build-users-group' is not empty, then we have to build as
|
||||
one of the members of that group. */
|
||||
if (settings.buildUsersGroup != "" && getuid() == 0) {
|
||||
#if defined(__linux__) || defined(__APPLE__)
|
||||
if (!buildUser) buildUser = std::make_unique<UserLock>();
|
||||
|
||||
if (buildUser->findFreeUser()) {
|
||||
/* Make sure that no other processes are executing under this
|
||||
uid. */
|
||||
buildUser->kill();
|
||||
} else {
|
||||
debug("waiting for build users");
|
||||
worker.waitForAWhile(shared_from_this());
|
||||
return;
|
||||
}
|
||||
#else
|
||||
/* Don't know how to block the creation of setuid/setgid
|
||||
binaries on this platform. */
|
||||
throw Error("build users are not supported on this platform for security reasons");
|
||||
#endif
|
||||
}
|
||||
|
||||
try {
|
||||
|
||||
/* Okay, we have to build. */
|
||||
|
@ -1809,8 +1841,7 @@ HookReply DerivationGoal::tryBuildHook()
|
|||
|
||||
} catch (SysError & e) {
|
||||
if (e.errNo == EPIPE) {
|
||||
logError(
|
||||
ErrorInfo {
|
||||
logError({
|
||||
.name = "Build hook died",
|
||||
.hint = hintfmt(
|
||||
"build hook died unexpectedly: %s",
|
||||
|
@ -1968,22 +1999,6 @@ void DerivationGoal::startBuilder()
|
|||
#endif
|
||||
}
|
||||
|
||||
/* If `build-users-group' is not empty, then we have to build as
|
||||
one of the members of that group. */
|
||||
if (settings.buildUsersGroup != "" && getuid() == 0) {
|
||||
#if defined(__linux__) || defined(__APPLE__)
|
||||
buildUser = std::make_unique<UserLock>();
|
||||
|
||||
/* Make sure that no other processes are executing under this
|
||||
uid. */
|
||||
buildUser->kill();
|
||||
#else
|
||||
/* Don't know how to block the creation of setuid/setgid
|
||||
binaries on this platform. */
|
||||
throw Error("build users are not supported on this platform for security reasons");
|
||||
#endif
|
||||
}
|
||||
|
||||
/* Create a temporary directory where the build will take
|
||||
place. */
|
||||
tmpDir = createTempDir("", "nix-build-" + std::string(drvPath.name()), false, false, 0700);
|
||||
|
@ -2740,7 +2755,7 @@ struct RestrictedStore : public LocalFSStore
|
|||
{ throw Error("queryPathFromHashPart"); }
|
||||
|
||||
StorePath addToStore(const string & name, const Path & srcPath,
|
||||
bool recursive = true, HashType hashAlgo = htSHA256,
|
||||
FileIngestionMethod method = FileIngestionMethod::Recursive, HashType hashAlgo = htSHA256,
|
||||
PathFilter & filter = defaultPathFilter, RepairFlag repair = NoRepair) override
|
||||
{ throw Error("addToStore"); }
|
||||
|
||||
|
@ -2753,9 +2768,9 @@ struct RestrictedStore : public LocalFSStore
|
|||
}
|
||||
|
||||
StorePath addToStoreFromDump(const string & dump, const string & name,
|
||||
bool recursive = true, HashType hashAlgo = htSHA256, RepairFlag repair = NoRepair) override
|
||||
FileIngestionMethod method = FileIngestionMethod::Recursive, HashType hashAlgo = htSHA256, RepairFlag repair = NoRepair) override
|
||||
{
|
||||
auto path = next->addToStoreFromDump(dump, name, recursive, hashAlgo, repair);
|
||||
auto path = next->addToStoreFromDump(dump, name, method, hashAlgo, repair);
|
||||
goal.addDependency(path);
|
||||
return path;
|
||||
}
|
||||
|
@ -3673,8 +3688,7 @@ void DerivationGoal::registerOutputs()
|
|||
/* Apply hash rewriting if necessary. */
|
||||
bool rewritten = false;
|
||||
if (!outputRewrites.empty()) {
|
||||
logWarning(
|
||||
ErrorInfo {
|
||||
logWarning({
|
||||
.name = "Rewriting hashes",
|
||||
.hint = hintfmt("rewriting hashes in '%1%'; cross fingers", path)
|
||||
});
|
||||
|
@ -3702,10 +3716,10 @@ void DerivationGoal::registerOutputs()
|
|||
|
||||
if (fixedOutput) {
|
||||
|
||||
bool recursive; Hash h;
|
||||
i.second.parseHashInfo(recursive, h);
|
||||
FileIngestionMethod outputHashMode; Hash h;
|
||||
i.second.parseHashInfo(outputHashMode, h);
|
||||
|
||||
if (!recursive) {
|
||||
if (outputHashMode == FileIngestionMethod::Flat) {
|
||||
/* The output path should be a regular file without execute permission. */
|
||||
if (!S_ISREG(st.st_mode) || (st.st_mode & S_IXUSR) != 0)
|
||||
throw BuildError(
|
||||
|
@ -3716,9 +3730,11 @@ void DerivationGoal::registerOutputs()
|
|||
|
||||
/* Check the hash. In hash mode, move the path produced by
|
||||
the derivation to its content-addressed location. */
|
||||
Hash h2 = recursive ? hashPath(h.type, actualPath).first : hashFile(h.type, actualPath);
|
||||
Hash h2 = outputHashMode == FileIngestionMethod::Recursive
|
||||
? hashPath(h.type, actualPath).first
|
||||
: hashFile(h.type, actualPath);
|
||||
|
||||
auto dest = worker.store.makeFixedOutputPath(recursive, h2, i.second.path.name());
|
||||
auto dest = worker.store.makeFixedOutputPath(outputHashMode, h2, i.second.path.name());
|
||||
|
||||
if (h != h2) {
|
||||
|
||||
|
@ -3747,7 +3763,7 @@ void DerivationGoal::registerOutputs()
|
|||
else
|
||||
assert(worker.store.parseStorePath(path) == dest);
|
||||
|
||||
ca = makeFixedOutputCA(recursive, h2);
|
||||
ca = makeFixedOutputCA(outputHashMode, h2);
|
||||
}
|
||||
|
||||
/* Get rid of all weird permissions. This also checks that
|
||||
|
@ -3854,8 +3870,7 @@ void DerivationGoal::registerOutputs()
|
|||
if (settings.enforceDeterminism)
|
||||
throw NotDeterministic(hint);
|
||||
|
||||
logError(
|
||||
ErrorInfo {
|
||||
logError({
|
||||
.name = "Output determinism error",
|
||||
.hint = hint
|
||||
});
|
||||
|
@ -3974,7 +3989,9 @@ void DerivationGoal::checkOutputs(const std::map<Path, ValidPathInfo> & outputs)
|
|||
|
||||
auto spec = parseReferenceSpecifiers(worker.store, *drv, *value);
|
||||
|
||||
auto used = recursive ? cloneStorePathSet(getClosure(info.path).first) : cloneStorePathSet(info.references);
|
||||
auto used = recursive
|
||||
? cloneStorePathSet(getClosure(info.path).first)
|
||||
: cloneStorePathSet(info.references);
|
||||
|
||||
if (recursive && checks.ignoreSelfRefs)
|
||||
used.erase(info.path);
|
||||
|
@ -4122,8 +4139,7 @@ void DerivationGoal::handleChildOutput(int fd, const string & data)
|
|||
{
|
||||
logSize += data.size();
|
||||
if (settings.maxLogSize && logSize > settings.maxLogSize) {
|
||||
logError(
|
||||
ErrorInfo {
|
||||
logError({
|
||||
.name = "Max log size exceeded",
|
||||
.hint = hintfmt(
|
||||
"%1% killed after writing more than %2% bytes of log output",
|
||||
|
@ -4444,8 +4460,7 @@ void SubstitutionGoal::tryNext()
|
|||
&& !sub->isTrusted
|
||||
&& !info->checkSignatures(worker.store, worker.store.getPublicKeys()))
|
||||
{
|
||||
logWarning(
|
||||
ErrorInfo {
|
||||
logWarning({
|
||||
.name = "Invalid path signature",
|
||||
.hint = hintfmt("substituter '%s' does not have a valid signature for path '%s'",
|
||||
sub->getUri(), worker.store.printStorePath(storePath))
|
||||
|
@ -4861,7 +4876,7 @@ void Worker::waitForInput()
|
|||
if (!waitingForAWhile.empty()) {
|
||||
useTimeout = true;
|
||||
if (lastWokenUp == steady_time_point::min())
|
||||
printInfo("waiting for locks or build slots...");
|
||||
printInfo("waiting for locks, build slots or build users...");
|
||||
if (lastWokenUp == steady_time_point::min() || lastWokenUp > before) lastWokenUp = before;
|
||||
timeout = std::max(1L,
|
||||
(long) std::chrono::duration_cast<std::chrono::seconds>(
|
||||
|
@ -4931,8 +4946,7 @@ void Worker::waitForInput()
|
|||
j->respectTimeouts &&
|
||||
after - j->lastOutput >= std::chrono::seconds(settings.maxSilentTime))
|
||||
{
|
||||
logError(
|
||||
ErrorInfo {
|
||||
logError({
|
||||
.name = "Silent build timeout",
|
||||
.hint = hintfmt(
|
||||
"%1% timed out after %2% seconds of silence",
|
||||
|
@ -4946,8 +4960,7 @@ void Worker::waitForInput()
|
|||
j->respectTimeouts &&
|
||||
after - j->timeStarted >= std::chrono::seconds(settings.buildTimeout))
|
||||
{
|
||||
logError(
|
||||
ErrorInfo {
|
||||
logError({
|
||||
.name = "Build timeout",
|
||||
.hint = hintfmt(
|
||||
"%1% timed out after %2% seconds",
|
||||
|
@ -5012,8 +5025,7 @@ bool Worker::pathContentsGood(const StorePath & path)
|
|||
}
|
||||
pathContentsGoodCache.insert_or_assign(path.clone(), res);
|
||||
if (!res)
|
||||
logError(
|
||||
ErrorInfo {
|
||||
logError({
|
||||
.name = "Corrupted path",
|
||||
.hint = hintfmt("path '%s' is corrupted or missing!", store.printStorePath(path))
|
||||
});
|
||||
|
|
|
@ -367,20 +367,24 @@ static void performOp(TunnelLogger * logger, ref<Store> store,
|
|||
}
|
||||
|
||||
case wopAddToStore: {
|
||||
bool fixed, recursive;
|
||||
std::string s, baseName;
|
||||
FileIngestionMethod method;
|
||||
{
|
||||
bool fixed, recursive;
|
||||
from >> baseName >> fixed /* obsolete */ >> recursive >> s;
|
||||
method = FileIngestionMethod { recursive };
|
||||
/* Compatibility hack. */
|
||||
if (!fixed) {
|
||||
s = "sha256";
|
||||
recursive = true;
|
||||
method = FileIngestionMethod::Recursive;
|
||||
}
|
||||
}
|
||||
HashType hashAlgo = parseHashType(s);
|
||||
|
||||
TeeSource savedNAR(from);
|
||||
RetrieveRegularNARSink savedRegular;
|
||||
|
||||
if (recursive) {
|
||||
if (method == FileIngestionMethod::Recursive) {
|
||||
/* Get the entire NAR dump from the client and save it to
|
||||
a string so that we can pass it to
|
||||
addToStoreFromDump(). */
|
||||
|
@ -392,7 +396,11 @@ static void performOp(TunnelLogger * logger, ref<Store> store,
|
|||
logger->startWork();
|
||||
if (!savedRegular.regular) throw Error("regular file expected");
|
||||
|
||||
auto path = store->addToStoreFromDump(recursive ? *savedNAR.data : savedRegular.s, baseName, recursive, hashAlgo);
|
||||
auto path = store->addToStoreFromDump(
|
||||
method == FileIngestionMethod::Recursive ? *savedNAR.data : savedRegular.s,
|
||||
baseName,
|
||||
method,
|
||||
hashAlgo);
|
||||
logger->stopWork();
|
||||
|
||||
to << store->printStorePath(path);
|
||||
|
|
|
@ -9,13 +9,13 @@
|
|||
namespace nix {
|
||||
|
||||
|
||||
void DerivationOutput::parseHashInfo(bool & recursive, Hash & hash) const
|
||||
void DerivationOutput::parseHashInfo(FileIngestionMethod & recursive, Hash & hash) const
|
||||
{
|
||||
recursive = false;
|
||||
recursive = FileIngestionMethod::Flat;
|
||||
string algo = hashAlgo;
|
||||
|
||||
if (string(algo, 0, 2) == "r:") {
|
||||
recursive = true;
|
||||
recursive = FileIngestionMethod::Recursive;
|
||||
algo = string(algo, 2);
|
||||
}
|
||||
|
||||
|
|
|
@ -22,7 +22,7 @@ struct DerivationOutput
|
|||
, hashAlgo(std::move(hashAlgo))
|
||||
, hash(std::move(hash))
|
||||
{ }
|
||||
void parseHashInfo(bool & recursive, Hash & hash) const;
|
||||
void parseHashInfo(FileIngestionMethod & recursive, Hash & hash) const;
|
||||
};
|
||||
|
||||
typedef std::map<string, DerivationOutput> DerivationOutputs;
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
#include "serialise.hh"
|
||||
#include "store-api.hh"
|
||||
#include "archive.hh"
|
||||
#include "worker-protocol.hh"
|
||||
|
@ -100,7 +101,9 @@ StorePaths Store::importPaths(Source & source, std::shared_ptr<FSAccessor> acces
|
|||
if (readInt(source) == 1)
|
||||
readString(source);
|
||||
|
||||
addToStore(info, tee.source.data, NoRepair, checkSigs, accessor);
|
||||
// Can't use underlying source, which would have been exhausted
|
||||
auto source = StringSource { *tee.source.data };
|
||||
addToStore(info, source, NoRepair, checkSigs, accessor);
|
||||
|
||||
res.push_back(info.path.clone());
|
||||
}
|
||||
|
|
|
@ -599,8 +599,7 @@ struct curlFileTransfer : public FileTransfer
|
|||
workerThreadMain();
|
||||
} catch (nix::Interrupted & e) {
|
||||
} catch (std::exception & e) {
|
||||
logError(
|
||||
ErrorInfo {
|
||||
logError({
|
||||
.name = "File transfer",
|
||||
.hint = hintfmt("unexpected error in download thread: %s",
|
||||
e.what())
|
||||
|
|
|
@ -195,7 +195,7 @@ struct LegacySSHStore : public Store
|
|||
{ unsupported("queryPathFromHashPart"); }
|
||||
|
||||
StorePath addToStore(const string & name, const Path & srcPath,
|
||||
bool recursive, HashType hashAlgo,
|
||||
FileIngestionMethod method, HashType hashAlgo,
|
||||
PathFilter & filter, RepairFlag repair) override
|
||||
{ unsupported("addToStore"); }
|
||||
|
||||
|
|
|
@ -87,8 +87,7 @@ LocalStore::LocalStore(const Params & params)
|
|||
|
||||
struct group * gr = getgrnam(settings.buildUsersGroup.get().c_str());
|
||||
if (!gr)
|
||||
logError(
|
||||
ErrorInfo {
|
||||
logError({
|
||||
.name = "'build-users-group' not found",
|
||||
.hint = hintfmt(
|
||||
"warning: the group '%1%' specified in 'build-users-group' does not exist",
|
||||
|
@ -562,10 +561,10 @@ void LocalStore::checkDerivationOutputs(const StorePath & drvPath, const Derivat
|
|||
if (out == drv.outputs.end())
|
||||
throw Error("derivation '%s' does not have an output named 'out'", printStorePath(drvPath));
|
||||
|
||||
bool recursive; Hash h;
|
||||
out->second.parseHashInfo(recursive, h);
|
||||
FileIngestionMethod method; Hash h;
|
||||
out->second.parseHashInfo(method, h);
|
||||
|
||||
check(makeFixedOutputPath(recursive, h, drvName), out->second.path, "out");
|
||||
check(makeFixedOutputPath(method, h, drvName), out->second.path, "out");
|
||||
}
|
||||
|
||||
else {
|
||||
|
@ -1048,11 +1047,11 @@ void LocalStore::addToStore(const ValidPathInfo & info, Source & source,
|
|||
|
||||
|
||||
StorePath LocalStore::addToStoreFromDump(const string & dump, const string & name,
|
||||
bool recursive, HashType hashAlgo, RepairFlag repair)
|
||||
FileIngestionMethod method, HashType hashAlgo, RepairFlag repair)
|
||||
{
|
||||
Hash h = hashString(hashAlgo, dump);
|
||||
|
||||
auto dstPath = makeFixedOutputPath(recursive, h, name);
|
||||
auto dstPath = makeFixedOutputPath(method, h, name);
|
||||
|
||||
addTempRoot(dstPath);
|
||||
|
||||
|
@ -1072,7 +1071,7 @@ StorePath LocalStore::addToStoreFromDump(const string & dump, const string & nam
|
|||
|
||||
autoGC();
|
||||
|
||||
if (recursive) {
|
||||
if (method == FileIngestionMethod::Recursive) {
|
||||
StringSource source(dump);
|
||||
restorePath(realPath, source);
|
||||
} else
|
||||
|
@ -1085,7 +1084,7 @@ StorePath LocalStore::addToStoreFromDump(const string & dump, const string & nam
|
|||
above (if called with recursive == true and hashAlgo ==
|
||||
sha256); otherwise, compute it here. */
|
||||
HashResult hash;
|
||||
if (recursive) {
|
||||
if (method == FileIngestionMethod::Recursive) {
|
||||
hash.first = hashAlgo == htSHA256 ? h : hashString(htSHA256, dump);
|
||||
hash.second = dump.size();
|
||||
} else
|
||||
|
@ -1096,7 +1095,7 @@ StorePath LocalStore::addToStoreFromDump(const string & dump, const string & nam
|
|||
ValidPathInfo info(dstPath.clone());
|
||||
info.narHash = hash.first;
|
||||
info.narSize = hash.second;
|
||||
info.ca = makeFixedOutputCA(recursive, h);
|
||||
info.ca = makeFixedOutputCA(method, h);
|
||||
registerValidPath(info);
|
||||
}
|
||||
|
||||
|
@ -1108,7 +1107,7 @@ StorePath LocalStore::addToStoreFromDump(const string & dump, const string & nam
|
|||
|
||||
|
||||
StorePath LocalStore::addToStore(const string & name, const Path & _srcPath,
|
||||
bool recursive, HashType hashAlgo, PathFilter & filter, RepairFlag repair)
|
||||
FileIngestionMethod method, HashType hashAlgo, PathFilter & filter, RepairFlag repair)
|
||||
{
|
||||
Path srcPath(absPath(_srcPath));
|
||||
|
||||
|
@ -1116,12 +1115,12 @@ StorePath LocalStore::addToStore(const string & name, const Path & _srcPath,
|
|||
method for very large paths, but `copyPath' is mainly used for
|
||||
small files. */
|
||||
StringSink sink;
|
||||
if (recursive)
|
||||
if (method == FileIngestionMethod::Recursive)
|
||||
dumpPath(srcPath, sink, filter);
|
||||
else
|
||||
sink.s = make_ref<std::string>(readFile(srcPath));
|
||||
|
||||
return addToStoreFromDump(*sink.s, name, recursive, hashAlgo, repair);
|
||||
return addToStoreFromDump(*sink.s, name, method, hashAlgo, repair);
|
||||
}
|
||||
|
||||
|
||||
|
@ -1242,8 +1241,7 @@ bool LocalStore::verifyStore(bool checkContents, RepairFlag repair)
|
|||
Path linkPath = linksDir + "/" + link.name;
|
||||
string hash = hashPath(htSHA256, linkPath).first.to_string(Base32, false);
|
||||
if (hash != link.name) {
|
||||
logError(
|
||||
ErrorInfo {
|
||||
logError({
|
||||
.name = "Invalid hash",
|
||||
.hint = hintfmt(
|
||||
"link '%s' was modified! expected hash '%s', got '%s'",
|
||||
|
@ -1281,8 +1279,7 @@ bool LocalStore::verifyStore(bool checkContents, RepairFlag repair)
|
|||
auto current = hashSink->finish();
|
||||
|
||||
if (info->narHash != nullHash && info->narHash != current.first) {
|
||||
logError(
|
||||
ErrorInfo {
|
||||
logError({
|
||||
.name = "Invalid hash - path modified",
|
||||
.hint = hintfmt("path '%s' was modified! expected hash '%s', got '%s'",
|
||||
printStorePath(i), info->narHash.to_string(), current.first.to_string())
|
||||
|
@ -1337,8 +1334,7 @@ void LocalStore::verifyPath(const Path & pathS, const StringSet & store,
|
|||
if (!done.insert(pathS).second) return;
|
||||
|
||||
if (!isStorePath(pathS)) {
|
||||
logError(
|
||||
ErrorInfo {
|
||||
logError({
|
||||
.name = "Nix path not found",
|
||||
.hint = hintfmt("path '%s' is not in the Nix store", pathS)
|
||||
});
|
||||
|
@ -1364,8 +1360,7 @@ void LocalStore::verifyPath(const Path & pathS, const StringSet & store,
|
|||
auto state(_state.lock());
|
||||
invalidatePath(*state, path);
|
||||
} else {
|
||||
logError(
|
||||
ErrorInfo {
|
||||
logError({
|
||||
.name = "Missing path with referrers",
|
||||
.hint = hintfmt("path '%s' disappeared, but it still has valid referrers!", pathS)
|
||||
});
|
||||
|
|
|
@ -149,7 +149,7 @@ public:
|
|||
std::shared_ptr<FSAccessor> accessor) override;
|
||||
|
||||
StorePath addToStore(const string & name, const Path & srcPath,
|
||||
bool recursive, HashType hashAlgo,
|
||||
FileIngestionMethod method, HashType hashAlgo,
|
||||
PathFilter & filter, RepairFlag repair) override;
|
||||
|
||||
/* Like addToStore(), but the contents of the path are contained
|
||||
|
@ -157,7 +157,7 @@ public:
|
|||
true) or simply the contents of a regular file (if recursive ==
|
||||
false). */
|
||||
StorePath addToStoreFromDump(const string & dump, const string & name,
|
||||
bool recursive = true, HashType hashAlgo = htSHA256, RepairFlag repair = NoRepair) override;
|
||||
FileIngestionMethod method = FileIngestionMethod::Recursive, HashType hashAlgo = htSHA256, RepairFlag repair = NoRepair) override;
|
||||
|
||||
StorePath addTextToStore(const string & name, const string & s,
|
||||
const StorePathSet & references, RepairFlag repair) override;
|
||||
|
|
|
@ -130,8 +130,7 @@ void LocalStore::optimisePath_(Activity * act, OptimiseStats & stats,
|
|||
NixOS (example: $fontconfig/var/cache being modified). Skip
|
||||
those files. FIXME: check the modification time. */
|
||||
if (S_ISREG(st.st_mode) && (st.st_mode & S_IWUSR)) {
|
||||
logWarning(
|
||||
ErrorInfo {
|
||||
logWarning({
|
||||
.name = "Suspicious file",
|
||||
.hint = hintfmt("skipping suspicious writable file '%1%'", path)
|
||||
});
|
||||
|
@ -198,8 +197,7 @@ void LocalStore::optimisePath_(Activity * act, OptimiseStats & stats,
|
|||
}
|
||||
|
||||
if (st.st_size != stLink.st_size) {
|
||||
logWarning(
|
||||
ErrorInfo {
|
||||
logWarning({
|
||||
.name = "Corrupted link",
|
||||
.hint = hintfmt("removing corrupted link '%1%'", linkPath)
|
||||
});
|
||||
|
@ -237,8 +235,7 @@ void LocalStore::optimisePath_(Activity * act, OptimiseStats & stats,
|
|||
/* Atomically replace the old file with the new hard link. */
|
||||
if (rename(tempLink.c_str(), path.c_str()) == -1) {
|
||||
if (unlink(tempLink.c_str()) == -1)
|
||||
logError(
|
||||
ErrorInfo {
|
||||
logError({
|
||||
.name = "Unlink error",
|
||||
.hint = hintfmt("unable to unlink '%1%'", tempLink)
|
||||
});
|
||||
|
|
|
@ -73,6 +73,11 @@ const size_t storePathHashLen = 32; // i.e. 160 bits
|
|||
/* Extension of derivations in the Nix store. */
|
||||
const std::string drvExtension = ".drv";
|
||||
|
||||
enum struct FileIngestionMethod : uint8_t {
|
||||
Flat = false,
|
||||
Recursive = true
|
||||
};
|
||||
|
||||
struct StorePathWithOutputs
|
||||
{
|
||||
StorePath path;
|
||||
|
|
|
@ -484,7 +484,7 @@ void RemoteStore::addToStore(const ValidPathInfo & info, Source & source,
|
|||
|
||||
|
||||
StorePath RemoteStore::addToStore(const string & name, const Path & _srcPath,
|
||||
bool recursive, HashType hashAlgo, PathFilter & filter, RepairFlag repair)
|
||||
FileIngestionMethod method, HashType hashAlgo, PathFilter & filter, RepairFlag repair)
|
||||
{
|
||||
if (repair) throw Error("repairing is not supported when building through the Nix daemon");
|
||||
|
||||
|
@ -492,9 +492,11 @@ StorePath RemoteStore::addToStore(const string & name, const Path & _srcPath,
|
|||
|
||||
Path srcPath(absPath(_srcPath));
|
||||
|
||||
conn->to << wopAddToStore << name
|
||||
<< ((hashAlgo == htSHA256 && recursive) ? 0 : 1) /* backwards compatibility hack */
|
||||
<< (recursive ? 1 : 0)
|
||||
conn->to
|
||||
<< wopAddToStore
|
||||
<< name
|
||||
<< ((hashAlgo == htSHA256 && method == FileIngestionMethod::Recursive) ? 0 : 1) /* backwards compatibility hack */
|
||||
<< (method == FileIngestionMethod::Recursive ? 1 : 0)
|
||||
<< printHashType(hashAlgo);
|
||||
|
||||
try {
|
||||
|
|
|
@ -65,7 +65,7 @@ public:
|
|||
std::shared_ptr<FSAccessor> accessor) override;
|
||||
|
||||
StorePath addToStore(const string & name, const Path & srcPath,
|
||||
bool recursive = true, HashType hashAlgo = htSHA256,
|
||||
FileIngestionMethod method = FileIngestionMethod::Recursive, HashType hashAlgo = htSHA256,
|
||||
PathFilter & filter = defaultPathFilter, RepairFlag repair = NoRepair) override;
|
||||
|
||||
StorePath addTextToStore(const string & name, const string & s,
|
||||
|
|
|
@ -172,19 +172,22 @@ static std::string makeType(
|
|||
|
||||
|
||||
StorePath Store::makeFixedOutputPath(
|
||||
bool recursive,
|
||||
FileIngestionMethod recursive,
|
||||
const Hash & hash,
|
||||
std::string_view name,
|
||||
const StorePathSet & references,
|
||||
bool hasSelfReference) const
|
||||
{
|
||||
if (hash.type == htSHA256 && recursive) {
|
||||
if (hash.type == htSHA256 && recursive == FileIngestionMethod::Recursive) {
|
||||
return makeStorePath(makeType(*this, "source", references, hasSelfReference), hash, name);
|
||||
} else {
|
||||
assert(references.empty());
|
||||
return makeStorePath("output:out", hashString(htSHA256,
|
||||
"fixed:out:" + (recursive ? (string) "r:" : "") +
|
||||
hash.to_string(Base16) + ":"), name);
|
||||
return makeStorePath("output:out",
|
||||
hashString(htSHA256,
|
||||
"fixed:out:"
|
||||
+ (recursive == FileIngestionMethod::Recursive ? (string) "r:" : "")
|
||||
+ hash.to_string(Base16) + ":"),
|
||||
name);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -201,10 +204,12 @@ StorePath Store::makeTextPath(std::string_view name, const Hash & hash,
|
|||
|
||||
|
||||
std::pair<StorePath, Hash> Store::computeStorePathForPath(std::string_view name,
|
||||
const Path & srcPath, bool recursive, HashType hashAlgo, PathFilter & filter) const
|
||||
const Path & srcPath, FileIngestionMethod method, HashType hashAlgo, PathFilter & filter) const
|
||||
{
|
||||
Hash h = recursive ? hashPath(hashAlgo, srcPath, filter).first : hashFile(hashAlgo, srcPath);
|
||||
return std::make_pair(makeFixedOutputPath(recursive, h, name), h);
|
||||
Hash h = method == FileIngestionMethod::Recursive
|
||||
? hashPath(hashAlgo, srcPath, filter).first
|
||||
: hashFile(hashAlgo, srcPath);
|
||||
return std::make_pair(makeFixedOutputPath(method, h, name), h);
|
||||
}
|
||||
|
||||
|
||||
|
@ -786,8 +791,8 @@ bool ValidPathInfo::isContentAddressed(const Store & store) const
|
|||
}
|
||||
|
||||
else if (hasPrefix(ca, "fixed:")) {
|
||||
bool recursive = ca.compare(6, 2, "r:") == 0;
|
||||
Hash hash(std::string(ca, recursive ? 8 : 6));
|
||||
FileIngestionMethod recursive { ca.compare(6, 2, "r:") == 0 };
|
||||
Hash hash(std::string(ca, recursive == FileIngestionMethod::Recursive ? 8 : 6));
|
||||
auto refs = cloneStorePathSet(references);
|
||||
bool hasSelfReference = false;
|
||||
if (refs.count(path)) {
|
||||
|
@ -831,27 +836,14 @@ Strings ValidPathInfo::shortRefs() const
|
|||
}
|
||||
|
||||
|
||||
std::string makeFixedOutputCA(bool recursive, const Hash & hash)
|
||||
std::string makeFixedOutputCA(FileIngestionMethod recursive, const Hash & hash)
|
||||
{
|
||||
return "fixed:" + (recursive ? (std::string) "r:" : "") + hash.to_string();
|
||||
return "fixed:"
|
||||
+ (recursive == FileIngestionMethod::Recursive ? (std::string) "r:" : "")
|
||||
+ hash.to_string();
|
||||
}
|
||||
|
||||
|
||||
void Store::addToStore(const ValidPathInfo & info, Source & narSource,
|
||||
RepairFlag repair, CheckSigsFlag checkSigs,
|
||||
std::shared_ptr<FSAccessor> accessor)
|
||||
{
|
||||
addToStore(info, make_ref<std::string>(narSource.drain()), repair, checkSigs, accessor);
|
||||
}
|
||||
|
||||
void Store::addToStore(const ValidPathInfo & info, const ref<std::string> & nar,
|
||||
RepairFlag repair, CheckSigsFlag checkSigs,
|
||||
std::shared_ptr<FSAccessor> accessor)
|
||||
{
|
||||
StringSource source(*nar);
|
||||
addToStore(info, source, repair, checkSigs, accessor);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -44,7 +44,6 @@ enum CheckSigsFlag : bool { NoCheckSigs = false, CheckSigs = true };
|
|||
enum SubstituteFlag : bool { NoSubstitute = false, Substitute = true };
|
||||
enum AllowInvalidFlag : bool { DisallowInvalid = false, AllowInvalid = true };
|
||||
|
||||
|
||||
/* Magic header of exportPath() output (obsolete). */
|
||||
const uint32_t exportMagic = 0x4558494e;
|
||||
|
||||
|
@ -347,7 +346,7 @@ public:
|
|||
StorePath makeOutputPath(const string & id,
|
||||
const Hash & hash, std::string_view name) const;
|
||||
|
||||
StorePath makeFixedOutputPath(bool recursive,
|
||||
StorePath makeFixedOutputPath(FileIngestionMethod method,
|
||||
const Hash & hash, std::string_view name,
|
||||
const StorePathSet & references = {},
|
||||
bool hasSelfReference = false) const;
|
||||
|
@ -359,7 +358,7 @@ public:
|
|||
store path to which srcPath is to be copied. Returns the store
|
||||
path and the cryptographic hash of the contents of srcPath. */
|
||||
std::pair<StorePath, Hash> computeStorePathForPath(std::string_view name,
|
||||
const Path & srcPath, bool recursive = true,
|
||||
const Path & srcPath, FileIngestionMethod method = FileIngestionMethod::Recursive,
|
||||
HashType hashAlgo = htSHA256, PathFilter & filter = defaultPathFilter) const;
|
||||
|
||||
/* Preparatory part of addTextToStore().
|
||||
|
@ -451,24 +450,19 @@ public:
|
|||
/* Import a path into the store. */
|
||||
virtual void addToStore(const ValidPathInfo & info, Source & narSource,
|
||||
RepairFlag repair = NoRepair, CheckSigsFlag checkSigs = CheckSigs,
|
||||
std::shared_ptr<FSAccessor> accessor = 0);
|
||||
|
||||
// FIXME: remove
|
||||
virtual void addToStore(const ValidPathInfo & info, const ref<std::string> & nar,
|
||||
RepairFlag repair = NoRepair, CheckSigsFlag checkSigs = CheckSigs,
|
||||
std::shared_ptr<FSAccessor> accessor = 0);
|
||||
std::shared_ptr<FSAccessor> accessor = 0) = 0;
|
||||
|
||||
/* Copy the contents of a path to the store and register the
|
||||
validity the resulting path. The resulting path is returned.
|
||||
The function object `filter' can be used to exclude files (see
|
||||
libutil/archive.hh). */
|
||||
virtual StorePath addToStore(const string & name, const Path & srcPath,
|
||||
bool recursive = true, HashType hashAlgo = htSHA256,
|
||||
FileIngestionMethod method = FileIngestionMethod::Recursive, HashType hashAlgo = htSHA256,
|
||||
PathFilter & filter = defaultPathFilter, RepairFlag repair = NoRepair) = 0;
|
||||
|
||||
// FIXME: remove?
|
||||
virtual StorePath addToStoreFromDump(const string & dump, const string & name,
|
||||
bool recursive = true, HashType hashAlgo = htSHA256, RepairFlag repair = NoRepair)
|
||||
FileIngestionMethod method = FileIngestionMethod::Recursive, HashType hashAlgo = htSHA256, RepairFlag repair = NoRepair)
|
||||
{
|
||||
throw Error("addToStoreFromDump() is not supported by this store");
|
||||
}
|
||||
|
@ -851,7 +845,7 @@ std::optional<ValidPathInfo> decodeValidPathInfo(
|
|||
|
||||
/* Compute the content-addressability assertion (ValidPathInfo::ca)
|
||||
for paths created by makeFixedOutputPath() / addToStore(). */
|
||||
std::string makeFixedOutputCA(bool recursive, const Hash & hash);
|
||||
std::string makeFixedOutputCA(FileIngestionMethod method, const Hash & hash);
|
||||
|
||||
|
||||
/* Split URI into protocol+hierarchy part and its parameter set. */
|
||||
|
|
|
@ -65,11 +65,7 @@ void Config::getSettings(std::map<std::string, SettingInfo> & res, bool override
|
|||
res.emplace(opt.first, SettingInfo{opt.second.setting->to_string(), opt.second.setting->description});
|
||||
}
|
||||
|
||||
void AbstractConfig::applyConfigFile(const Path & path)
|
||||
{
|
||||
try {
|
||||
string contents = readFile(path);
|
||||
|
||||
void AbstractConfig::applyConfig(const std::string & contents, const std::string & path) {
|
||||
unsigned int pos = 0;
|
||||
|
||||
while (pos < contents.size()) {
|
||||
|
@ -119,6 +115,13 @@ void AbstractConfig::applyConfigFile(const Path & path)
|
|||
|
||||
set(name, concatStringsSep(" ", Strings(i, tokens.end()))); // FIXME: slow
|
||||
};
|
||||
}
|
||||
|
||||
void AbstractConfig::applyConfigFile(const Path & path)
|
||||
{
|
||||
try {
|
||||
string contents = readFile(path);
|
||||
applyConfig(contents, path);
|
||||
} catch (SysError &) { }
|
||||
}
|
||||
|
||||
|
|
|
@ -7,6 +7,38 @@
|
|||
|
||||
namespace nix {
|
||||
|
||||
/**
|
||||
* The Config class provides Nix runtime configurations.
|
||||
*
|
||||
* What is a Configuration?
|
||||
* A collection of uniquely named Settings.
|
||||
*
|
||||
* What is a Setting?
|
||||
* Each property that you can set in a configuration corresponds to a
|
||||
* `Setting`. A setting records value and description of a property
|
||||
* with a default and optional aliases.
|
||||
*
|
||||
* A valid configuration consists of settings that are registered to a
|
||||
* `Config` object instance:
|
||||
*
|
||||
* Config config;
|
||||
* Setting<std::string> systemSetting{&config, "x86_64-linux", "system", "the current system"};
|
||||
*
|
||||
* The above creates a `Config` object and registers a setting called "system"
|
||||
* via the variable `systemSetting` with it. The setting defaults to the string
|
||||
* "x86_64-linux", it's description is "the current system". All of the
|
||||
* registered settings can then be accessed as shown below:
|
||||
*
|
||||
* std::map<std::string, Config::SettingInfo> settings;
|
||||
* config.getSettings(settings);
|
||||
* config["system"].description == "the current system"
|
||||
* config["system"].value == "x86_64-linux"
|
||||
*
|
||||
*
|
||||
* The above retrieves all currently known settings from the `Config` object
|
||||
* and adds them to the `settings` map.
|
||||
*/
|
||||
|
||||
class Args;
|
||||
class AbstractSetting;
|
||||
class JSONPlaceholder;
|
||||
|
@ -23,6 +55,10 @@ protected:
|
|||
|
||||
public:
|
||||
|
||||
/**
|
||||
* Sets the value referenced by `name` to `value`. Returns true if the
|
||||
* setting is known, false otherwise.
|
||||
*/
|
||||
virtual bool set(const std::string & name, const std::string & value) = 0;
|
||||
|
||||
struct SettingInfo
|
||||
|
@ -31,18 +67,52 @@ public:
|
|||
std::string description;
|
||||
};
|
||||
|
||||
/**
|
||||
* Adds the currently known settings to the given result map `res`.
|
||||
* - res: map to store settings in
|
||||
* - overridenOnly: when set to true only overridden settings will be added to `res`
|
||||
*/
|
||||
virtual void getSettings(std::map<std::string, SettingInfo> & res, bool overridenOnly = false) = 0;
|
||||
|
||||
/**
|
||||
* Parses the configuration in `contents` and applies it
|
||||
* - contents: configuration contents to be parsed and applied
|
||||
* - path: location of the configuration file
|
||||
*/
|
||||
void applyConfig(const std::string & contents, const std::string & path = "<unknown>");
|
||||
|
||||
/**
|
||||
* Applies a nix configuration file
|
||||
* - path: the location of the config file to apply
|
||||
*/
|
||||
void applyConfigFile(const Path & path);
|
||||
|
||||
/**
|
||||
* Resets the `overridden` flag of all Settings
|
||||
*/
|
||||
virtual void resetOverriden() = 0;
|
||||
|
||||
/**
|
||||
* Outputs all settings to JSON
|
||||
* - out: JSONObject to write the configuration to
|
||||
*/
|
||||
virtual void toJSON(JSONObject & out) = 0;
|
||||
|
||||
/**
|
||||
* Converts settings to `Args` to be used on the command line interface
|
||||
* - args: args to write to
|
||||
* - category: category of the settings
|
||||
*/
|
||||
virtual void convertToArgs(Args & args, const std::string & category) = 0;
|
||||
|
||||
/**
|
||||
* Logs a warning for each unregistered setting
|
||||
*/
|
||||
void warnUnknownSettings();
|
||||
|
||||
/**
|
||||
* Re-applies all previously attempted changes to unknown settings
|
||||
*/
|
||||
void reapplyUnknownSettings();
|
||||
};
|
||||
|
||||
|
|
|
@ -10,13 +10,16 @@ namespace nix {
|
|||
|
||||
const std::string nativeSystem = SYSTEM;
|
||||
|
||||
|
||||
// addPrefix is used for show-trace. Strings added with addPrefix
|
||||
// will print ahead of the error itself.
|
||||
BaseError & BaseError::addPrefix(const FormatOrString & fs)
|
||||
{
|
||||
prefix_ = fs.s + prefix_;
|
||||
return *this;
|
||||
}
|
||||
|
||||
// c++ std::exception descendants must have a 'const char* what()' function.
|
||||
// This stringifies the error and caches it for use by what(), or similarly by msg().
|
||||
const string& BaseError::calcWhat() const
|
||||
{
|
||||
if (what_.has_value())
|
||||
|
@ -124,25 +127,25 @@ void getCodeLines(NixCode &nixCode)
|
|||
}
|
||||
}
|
||||
|
||||
|
||||
// if nixCode contains lines of code, print them to the ostream, indicating the error column.
|
||||
void printCodeLines(std::ostream &out, const string &prefix, const NixCode &nixCode)
|
||||
{
|
||||
// previous line of code.
|
||||
if (nixCode.prevLineOfCode.has_value()) {
|
||||
out << fmt("%1% %|2$5d|| %3%",
|
||||
out << std::endl
|
||||
<< fmt("%1% %|2$5d|| %3%",
|
||||
prefix,
|
||||
(nixCode.errPos.line - 1),
|
||||
*nixCode.prevLineOfCode)
|
||||
<< std::endl;
|
||||
*nixCode.prevLineOfCode);
|
||||
}
|
||||
|
||||
if (nixCode.errLineOfCode.has_value()) {
|
||||
// line of code containing the error.
|
||||
out << fmt("%1% %|2$5d|| %3%",
|
||||
out << std::endl
|
||||
<< fmt("%1% %|2$5d|| %3%",
|
||||
prefix,
|
||||
(nixCode.errPos.line),
|
||||
*nixCode.errLineOfCode)
|
||||
<< std::endl;
|
||||
*nixCode.errLineOfCode);
|
||||
// error arrows for the column range.
|
||||
if (nixCode.errPos.column > 0) {
|
||||
int start = nixCode.errPos.column;
|
||||
|
@ -153,20 +156,21 @@ void printCodeLines(std::ostream &out, const string &prefix, const NixCode &nixC
|
|||
|
||||
std::string arrows("^");
|
||||
|
||||
out << fmt("%1% |%2%" ANSI_RED "%3%" ANSI_NORMAL,
|
||||
out << std::endl
|
||||
<< fmt("%1% |%2%" ANSI_RED "%3%" ANSI_NORMAL,
|
||||
prefix,
|
||||
spaces,
|
||||
arrows) << std::endl;
|
||||
arrows);
|
||||
}
|
||||
}
|
||||
|
||||
// next line of code.
|
||||
if (nixCode.nextLineOfCode.has_value()) {
|
||||
out << fmt("%1% %|2$5d|| %3%",
|
||||
out << std::endl
|
||||
<< fmt("%1% %|2$5d|| %3%",
|
||||
prefix,
|
||||
(nixCode.errPos.line + 1),
|
||||
*nixCode.nextLineOfCode)
|
||||
<< std::endl;
|
||||
*nixCode.nextLineOfCode);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -239,17 +243,15 @@ std::ostream& operator<<(std::ostream &out, const ErrorInfo &einfo)
|
|||
levelString,
|
||||
einfo.name,
|
||||
dashes,
|
||||
einfo.programName.value_or(""))
|
||||
<< std::endl;
|
||||
einfo.programName.value_or(""));
|
||||
else
|
||||
out << fmt("%1%%2%" ANSI_BLUE " -----%3% %4%" ANSI_NORMAL,
|
||||
prefix,
|
||||
levelString,
|
||||
dashes,
|
||||
einfo.programName.value_or(""))
|
||||
<< std::endl;
|
||||
einfo.programName.value_or(""));
|
||||
|
||||
// filename, line, column.
|
||||
bool nl = false; // intersperse newline between sections.
|
||||
if (einfo.nixCode.has_value()) {
|
||||
switch (einfo.nixCode->errPos.origin) {
|
||||
case foFile: {
|
||||
|
@ -273,30 +275,36 @@ std::ostream& operator<<(std::ostream &out, const ErrorInfo &einfo)
|
|||
default:
|
||||
throw Error("invalid FileOrigin in errPos");
|
||||
}
|
||||
nl = true;
|
||||
}
|
||||
|
||||
// description
|
||||
if (einfo.description != "") {
|
||||
out << prefix << einfo.description << std::endl;
|
||||
out << prefix << std::endl;
|
||||
if (nl)
|
||||
out << std::endl << prefix;
|
||||
out << std::endl << prefix << einfo.description;
|
||||
nl = true;
|
||||
}
|
||||
|
||||
|
||||
if (einfo.nixCode.has_value()) {
|
||||
NixCode nixcode = *einfo.nixCode;
|
||||
getCodeLines(nixcode);
|
||||
|
||||
// lines of code.
|
||||
if (nixcode.errLineOfCode.has_value()) {
|
||||
if (nl)
|
||||
out << std::endl << prefix;
|
||||
printCodeLines(out, prefix, nixcode);
|
||||
out << prefix << std::endl;
|
||||
nl = true;
|
||||
}
|
||||
}
|
||||
|
||||
// hint
|
||||
if (einfo.hint.has_value()) {
|
||||
out << prefix << *einfo.hint << std::endl;
|
||||
out << prefix << std::endl;
|
||||
if (nl)
|
||||
out << std::endl << prefix;
|
||||
out << std::endl << prefix << *einfo.hint;
|
||||
nl = true;
|
||||
}
|
||||
|
||||
return out;
|
||||
|
|
|
@ -22,6 +22,23 @@
|
|||
|
||||
namespace nix {
|
||||
|
||||
/*
|
||||
|
||||
This file defines two main structs/classes used in nix error handling.
|
||||
|
||||
ErrorInfo provides a standard payload of error information, with conversion to string
|
||||
happening in the logger rather than at the call site.
|
||||
|
||||
BaseError is the ancestor of nix specific exceptions (and Interrupted), and contains
|
||||
an ErrorInfo.
|
||||
|
||||
ErrorInfo structs are sent to the logger as part of an exception, or directly with the
|
||||
logError or logWarning macros.
|
||||
|
||||
See the error-demo.cc program for usage examples.
|
||||
|
||||
*/
|
||||
|
||||
typedef enum {
|
||||
lvlError = 0,
|
||||
lvlWarn,
|
||||
|
@ -38,7 +55,7 @@ typedef enum {
|
|||
foString
|
||||
} FileOrigin;
|
||||
|
||||
|
||||
// ErrPos indicates the location of an error in a nix file.
|
||||
struct ErrPos {
|
||||
int line = 0;
|
||||
int column = 0;
|
||||
|
@ -50,6 +67,7 @@ struct ErrPos {
|
|||
return line != 0;
|
||||
}
|
||||
|
||||
// convert from the Pos struct, found in libexpr.
|
||||
template <class P>
|
||||
ErrPos& operator=(const P &pos)
|
||||
{
|
||||
|
@ -74,8 +92,6 @@ struct NixCode {
|
|||
std::optional<string> nextLineOfCode;
|
||||
};
|
||||
|
||||
// -------------------------------------------------
|
||||
// ErrorInfo.
|
||||
struct ErrorInfo {
|
||||
Verbosity level;
|
||||
string name;
|
||||
|
|
|
@ -251,8 +251,7 @@ bool handleJSONLogMessage(const std::string & msg,
|
|||
}
|
||||
|
||||
} catch (std::exception & e) {
|
||||
logError(
|
||||
ErrorInfo {
|
||||
logError({
|
||||
.name = "Json log message",
|
||||
.hint = hintfmt("bad log message from builder: %s", e.what())
|
||||
});
|
||||
|
|
|
@ -150,9 +150,23 @@ bool handleJSONLogMessage(const std::string & msg,
|
|||
|
||||
extern Verbosity verbosity; /* suppress msgs > this */
|
||||
|
||||
/* Print a message if the current log level is at least the specified
|
||||
level. Note that this has to be implemented as a macro to ensure
|
||||
that the arguments are evaluated lazily. */
|
||||
/* Print a message with the standard ErrorInfo format.
|
||||
In general, use these 'log' macros for reporting problems that may require user
|
||||
intervention or that need more explanation. Use the 'print' macros for more
|
||||
lightweight status messages. */
|
||||
#define logErrorInfo(level, errorInfo...) \
|
||||
do { \
|
||||
if (level <= nix::verbosity) { \
|
||||
logger->logEI(level, errorInfo); \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
#define logError(errorInfo...) logErrorInfo(lvlError, errorInfo)
|
||||
#define logWarning(errorInfo...) logErrorInfo(lvlWarn, errorInfo)
|
||||
|
||||
/* Print a string message if the current log level is at least the specified
|
||||
level. Note that this has to be implemented as a macro to ensure that the
|
||||
arguments are evaluated lazily. */
|
||||
#define printMsg(level, args...) \
|
||||
do { \
|
||||
if (level <= nix::verbosity) { \
|
||||
|
@ -166,18 +180,7 @@ extern Verbosity verbosity; /* suppress msgs > this */
|
|||
#define debug(args...) printMsg(lvlDebug, args)
|
||||
#define vomit(args...) printMsg(lvlVomit, args)
|
||||
|
||||
#define logErrorInfo(level, errorInfo...) \
|
||||
do { \
|
||||
if (level <= nix::verbosity) { \
|
||||
logger->logEI(level, errorInfo); \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
#define logError(errorInfo...) logErrorInfo(lvlError, errorInfo)
|
||||
#define logWarning(errorInfo...) logErrorInfo(lvlWarn, errorInfo)
|
||||
|
||||
|
||||
|
||||
/* if verbosity >= lvlWarn, print a message with a yellow 'warning:' prefix. */
|
||||
template<typename... Args>
|
||||
inline void warn(const std::string & fs, const Args & ... args)
|
||||
{
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
#pragma once
|
||||
|
||||
#include <cassert>
|
||||
#include <map>
|
||||
#include <list>
|
||||
#include <optional>
|
||||
|
|
264
src/libutil/tests/config.cc
Normal file
264
src/libutil/tests/config.cc
Normal file
|
@ -0,0 +1,264 @@
|
|||
#include "json.hh"
|
||||
#include "config.hh"
|
||||
#include "args.hh"
|
||||
|
||||
#include <sstream>
|
||||
#include <gtest/gtest.h>
|
||||
|
||||
namespace nix {
|
||||
|
||||
/* ----------------------------------------------------------------------------
|
||||
* Config
|
||||
* --------------------------------------------------------------------------*/
|
||||
|
||||
TEST(Config, setUndefinedSetting) {
|
||||
Config config;
|
||||
ASSERT_EQ(config.set("undefined-key", "value"), false);
|
||||
}
|
||||
|
||||
TEST(Config, setDefinedSetting) {
|
||||
Config config;
|
||||
std::string value;
|
||||
Setting<std::string> foo{&config, value, "name-of-the-setting", "description"};
|
||||
ASSERT_EQ(config.set("name-of-the-setting", "value"), true);
|
||||
}
|
||||
|
||||
TEST(Config, getDefinedSetting) {
|
||||
Config config;
|
||||
std::string value;
|
||||
std::map<std::string, Config::SettingInfo> settings;
|
||||
Setting<std::string> foo{&config, value, "name-of-the-setting", "description"};
|
||||
|
||||
config.getSettings(settings, /* overridenOnly = */ false);
|
||||
const auto iter = settings.find("name-of-the-setting");
|
||||
ASSERT_NE(iter, settings.end());
|
||||
ASSERT_EQ(iter->second.value, "");
|
||||
ASSERT_EQ(iter->second.description, "description");
|
||||
}
|
||||
|
||||
TEST(Config, getDefinedOverridenSettingNotSet) {
|
||||
Config config;
|
||||
std::string value;
|
||||
std::map<std::string, Config::SettingInfo> settings;
|
||||
Setting<std::string> foo{&config, value, "name-of-the-setting", "description"};
|
||||
|
||||
config.getSettings(settings, /* overridenOnly = */ true);
|
||||
const auto e = settings.find("name-of-the-setting");
|
||||
ASSERT_EQ(e, settings.end());
|
||||
}
|
||||
|
||||
TEST(Config, getDefinedSettingSet1) {
|
||||
Config config;
|
||||
std::string value;
|
||||
std::map<std::string, Config::SettingInfo> settings;
|
||||
Setting<std::string> setting{&config, value, "name-of-the-setting", "description"};
|
||||
|
||||
setting.assign("value");
|
||||
|
||||
config.getSettings(settings, /* overridenOnly = */ false);
|
||||
const auto iter = settings.find("name-of-the-setting");
|
||||
ASSERT_NE(iter, settings.end());
|
||||
ASSERT_EQ(iter->second.value, "value");
|
||||
ASSERT_EQ(iter->second.description, "description");
|
||||
}
|
||||
|
||||
TEST(Config, getDefinedSettingSet2) {
|
||||
Config config;
|
||||
std::map<std::string, Config::SettingInfo> settings;
|
||||
Setting<std::string> setting{&config, "", "name-of-the-setting", "description"};
|
||||
|
||||
ASSERT_TRUE(config.set("name-of-the-setting", "value"));
|
||||
|
||||
config.getSettings(settings, /* overridenOnly = */ false);
|
||||
const auto e = settings.find("name-of-the-setting");
|
||||
ASSERT_NE(e, settings.end());
|
||||
ASSERT_EQ(e->second.value, "value");
|
||||
ASSERT_EQ(e->second.description, "description");
|
||||
}
|
||||
|
||||
TEST(Config, addSetting) {
|
||||
class TestSetting : public AbstractSetting {
|
||||
public:
|
||||
TestSetting() : AbstractSetting("test", "test", {}) {}
|
||||
void set(const std::string & value) {}
|
||||
std::string to_string() const { return {}; }
|
||||
};
|
||||
|
||||
Config config;
|
||||
TestSetting setting;
|
||||
|
||||
ASSERT_FALSE(config.set("test", "value"));
|
||||
config.addSetting(&setting);
|
||||
ASSERT_TRUE(config.set("test", "value"));
|
||||
}
|
||||
|
||||
TEST(Config, withInitialValue) {
|
||||
const StringMap initials = {
|
||||
{ "key", "value" },
|
||||
};
|
||||
Config config(initials);
|
||||
|
||||
{
|
||||
std::map<std::string, Config::SettingInfo> settings;
|
||||
config.getSettings(settings, /* overridenOnly = */ false);
|
||||
ASSERT_EQ(settings.find("key"), settings.end());
|
||||
}
|
||||
|
||||
Setting<std::string> setting{&config, "default-value", "key", "description"};
|
||||
|
||||
{
|
||||
std::map<std::string, Config::SettingInfo> settings;
|
||||
config.getSettings(settings, /* overridenOnly = */ false);
|
||||
ASSERT_EQ(settings["key"].value, "value");
|
||||
}
|
||||
}
|
||||
|
||||
TEST(Config, resetOverriden) {
|
||||
Config config;
|
||||
config.resetOverriden();
|
||||
}
|
||||
|
||||
TEST(Config, resetOverridenWithSetting) {
|
||||
Config config;
|
||||
Setting<std::string> setting{&config, "", "name-of-the-setting", "description"};
|
||||
|
||||
{
|
||||
std::map<std::string, Config::SettingInfo> settings;
|
||||
|
||||
setting.set("foo");
|
||||
ASSERT_EQ(setting.get(), "foo");
|
||||
config.getSettings(settings, /* overridenOnly = */ true);
|
||||
ASSERT_TRUE(settings.empty());
|
||||
}
|
||||
|
||||
{
|
||||
std::map<std::string, Config::SettingInfo> settings;
|
||||
|
||||
setting.override("bar");
|
||||
ASSERT_TRUE(setting.overriden);
|
||||
ASSERT_EQ(setting.get(), "bar");
|
||||
config.getSettings(settings, /* overridenOnly = */ true);
|
||||
ASSERT_FALSE(settings.empty());
|
||||
}
|
||||
|
||||
{
|
||||
std::map<std::string, Config::SettingInfo> settings;
|
||||
|
||||
config.resetOverriden();
|
||||
ASSERT_FALSE(setting.overriden);
|
||||
config.getSettings(settings, /* overridenOnly = */ true);
|
||||
ASSERT_TRUE(settings.empty());
|
||||
}
|
||||
}
|
||||
|
||||
TEST(Config, toJSONOnEmptyConfig) {
|
||||
std::stringstream out;
|
||||
{ // Scoped to force the destructor of JSONObject to write the final `}`
|
||||
JSONObject obj(out);
|
||||
Config config;
|
||||
config.toJSON(obj);
|
||||
}
|
||||
|
||||
ASSERT_EQ(out.str(), "{}");
|
||||
}
|
||||
|
||||
TEST(Config, toJSONOnNonEmptyConfig) {
|
||||
std::stringstream out;
|
||||
{ // Scoped to force the destructor of JSONObject to write the final `}`
|
||||
JSONObject obj(out);
|
||||
|
||||
Config config;
|
||||
std::map<std::string, Config::SettingInfo> settings;
|
||||
Setting<std::string> setting{&config, "", "name-of-the-setting", "description"};
|
||||
setting.assign("value");
|
||||
|
||||
config.toJSON(obj);
|
||||
}
|
||||
ASSERT_EQ(out.str(), R"#({"name-of-the-setting":{"description":"description","value":"value"}})#");
|
||||
}
|
||||
|
||||
TEST(Config, setSettingAlias) {
|
||||
Config config;
|
||||
Setting<std::string> setting{&config, "", "some-int", "best number", { "another-int" }};
|
||||
ASSERT_TRUE(config.set("some-int", "1"));
|
||||
ASSERT_EQ(setting.get(), "1");
|
||||
ASSERT_TRUE(config.set("another-int", "2"));
|
||||
ASSERT_EQ(setting.get(), "2");
|
||||
ASSERT_TRUE(config.set("some-int", "3"));
|
||||
ASSERT_EQ(setting.get(), "3");
|
||||
}
|
||||
|
||||
/* FIXME: The reapplyUnknownSettings method doesn't seem to do anything
|
||||
* useful (these days). Whenever we add a new setting to Config the
|
||||
* unknown settings are always considered. In which case is this function
|
||||
* actually useful? Is there some way to register a Setting without calling
|
||||
* addSetting? */
|
||||
TEST(Config, DISABLED_reapplyUnknownSettings) {
|
||||
Config config;
|
||||
ASSERT_FALSE(config.set("name-of-the-setting", "unknownvalue"));
|
||||
Setting<std::string> setting{&config, "default", "name-of-the-setting", "description"};
|
||||
ASSERT_EQ(setting.get(), "default");
|
||||
config.reapplyUnknownSettings();
|
||||
ASSERT_EQ(setting.get(), "unknownvalue");
|
||||
}
|
||||
|
||||
TEST(Config, applyConfigEmpty) {
|
||||
Config config;
|
||||
std::map<std::string, Config::SettingInfo> settings;
|
||||
config.applyConfig("");
|
||||
config.getSettings(settings);
|
||||
ASSERT_TRUE(settings.empty());
|
||||
}
|
||||
|
||||
TEST(Config, applyConfigEmptyWithComment) {
|
||||
Config config;
|
||||
std::map<std::string, Config::SettingInfo> settings;
|
||||
config.applyConfig("# just a comment");
|
||||
config.getSettings(settings);
|
||||
ASSERT_TRUE(settings.empty());
|
||||
}
|
||||
|
||||
TEST(Config, applyConfigAssignment) {
|
||||
Config config;
|
||||
std::map<std::string, Config::SettingInfo> settings;
|
||||
Setting<std::string> setting{&config, "", "name-of-the-setting", "description"};
|
||||
config.applyConfig(
|
||||
"name-of-the-setting = value-from-file #useful comment\n"
|
||||
"# name-of-the-setting = foo\n"
|
||||
);
|
||||
config.getSettings(settings);
|
||||
ASSERT_FALSE(settings.empty());
|
||||
ASSERT_EQ(settings["name-of-the-setting"].value, "value-from-file");
|
||||
}
|
||||
|
||||
TEST(Config, applyConfigWithReassignedSetting) {
|
||||
Config config;
|
||||
std::map<std::string, Config::SettingInfo> settings;
|
||||
Setting<std::string> setting{&config, "", "name-of-the-setting", "description"};
|
||||
config.applyConfig(
|
||||
"name-of-the-setting = first-value\n"
|
||||
"name-of-the-setting = second-value\n"
|
||||
);
|
||||
config.getSettings(settings);
|
||||
ASSERT_FALSE(settings.empty());
|
||||
ASSERT_EQ(settings["name-of-the-setting"].value, "second-value");
|
||||
}
|
||||
|
||||
TEST(Config, applyConfigFailsOnMissingIncludes) {
|
||||
Config config;
|
||||
std::map<std::string, Config::SettingInfo> settings;
|
||||
Setting<std::string> setting{&config, "", "name-of-the-setting", "description"};
|
||||
|
||||
ASSERT_THROW(config.applyConfig(
|
||||
"name-of-the-setting = value-from-file\n"
|
||||
"# name-of-the-setting = foo\n"
|
||||
"include /nix/store/does/not/exist.nix"
|
||||
), Error);
|
||||
}
|
||||
|
||||
TEST(Config, applyConfigInvalidThrows) {
|
||||
Config config;
|
||||
ASSERT_THROW(config.applyConfig("value == key"), UsageError);
|
||||
ASSERT_THROW(config.applyConfig("value "), UsageError);
|
||||
}
|
||||
}
|
80
src/libutil/tests/hash.cc
Normal file
80
src/libutil/tests/hash.cc
Normal file
|
@ -0,0 +1,80 @@
|
|||
#include "hash.hh"
|
||||
#include <gtest/gtest.h>
|
||||
|
||||
namespace nix {
|
||||
|
||||
/* ----------------------------------------------------------------------------
|
||||
* hashString
|
||||
* --------------------------------------------------------------------------*/
|
||||
|
||||
TEST(hashString, testKnownMD5Hashes1) {
|
||||
// values taken from: https://tools.ietf.org/html/rfc1321
|
||||
auto s1 = "";
|
||||
auto hash = hashString(HashType::htMD5, s1);
|
||||
ASSERT_EQ(hash.to_string(Base::Base16), "md5:d41d8cd98f00b204e9800998ecf8427e");
|
||||
}
|
||||
|
||||
TEST(hashString, testKnownMD5Hashes2) {
|
||||
// values taken from: https://tools.ietf.org/html/rfc1321
|
||||
auto s2 = "abc";
|
||||
auto hash = hashString(HashType::htMD5, s2);
|
||||
ASSERT_EQ(hash.to_string(Base::Base16), "md5:900150983cd24fb0d6963f7d28e17f72");
|
||||
}
|
||||
|
||||
TEST(hashString, testKnownSHA1Hashes1) {
|
||||
// values taken from: https://tools.ietf.org/html/rfc3174
|
||||
auto s = "abc";
|
||||
auto hash = hashString(HashType::htSHA1, s);
|
||||
ASSERT_EQ(hash.to_string(Base::Base16),"sha1:a9993e364706816aba3e25717850c26c9cd0d89d");
|
||||
}
|
||||
|
||||
TEST(hashString, testKnownSHA1Hashes2) {
|
||||
// values taken from: https://tools.ietf.org/html/rfc3174
|
||||
auto s = "abcdbcdecdefdefgefghfghighijhijkijkljklmklmnlmnomnopnopq";
|
||||
auto hash = hashString(HashType::htSHA1, s);
|
||||
ASSERT_EQ(hash.to_string(Base::Base16),"sha1:84983e441c3bd26ebaae4aa1f95129e5e54670f1");
|
||||
}
|
||||
|
||||
TEST(hashString, testKnownSHA256Hashes1) {
|
||||
// values taken from: https://tools.ietf.org/html/rfc4634
|
||||
auto s = "abc";
|
||||
|
||||
auto hash = hashString(HashType::htSHA256, s);
|
||||
ASSERT_EQ(hash.to_string(Base::Base16),
|
||||
"sha256:ba7816bf8f01cfea414140de5dae2223b00361a396177a9cb410ff61f20015ad");
|
||||
}
|
||||
|
||||
TEST(hashString, testKnownSHA256Hashes2) {
|
||||
// values taken from: https://tools.ietf.org/html/rfc4634
|
||||
auto s = "abcdbcdecdefdefgefghfghighijhijkijkljklmklmnlmnomnopnopq";
|
||||
auto hash = hashString(HashType::htSHA256, s);
|
||||
ASSERT_EQ(hash.to_string(Base::Base16),
|
||||
"sha256:248d6a61d20638b8e5c026930c3e6039a33ce45964ff2167f6ecedd419db06c1");
|
||||
}
|
||||
|
||||
TEST(hashString, testKnownSHA512Hashes1) {
|
||||
// values taken from: https://tools.ietf.org/html/rfc4634
|
||||
auto s = "abc";
|
||||
auto hash = hashString(HashType::htSHA512, s);
|
||||
ASSERT_EQ(hash.to_string(Base::Base16),
|
||||
"sha512:ddaf35a193617abacc417349ae20413112e6fa4e89a9"
|
||||
"7ea20a9eeee64b55d39a2192992a274fc1a836ba3c23a3feebbd"
|
||||
"454d4423643ce80e2a9ac94fa54ca49f");
|
||||
}
|
||||
|
||||
TEST(hashString, testKnownSHA512Hashes2) {
|
||||
// values taken from: https://tools.ietf.org/html/rfc4634
|
||||
auto s = "abcdefghbcdefghicdefghijdefghijkefghijklfghijklmghijklmnhijklmnoijklmnopjklmnopqklmnopqrlmnopqrsmnopqrstnopqrstu";
|
||||
|
||||
auto hash = hashString(HashType::htSHA512, s);
|
||||
ASSERT_EQ(hash.to_string(Base::Base16),
|
||||
"sha512:8e959b75dae313da8cf4f72814fc143f8f7779c6eb9f7fa1"
|
||||
"7299aeadb6889018501d289e4900f7e4331b99dec4b5433a"
|
||||
"c7d329eeb6dd26545e96e55b874be909");
|
||||
}
|
||||
|
||||
TEST(hashString, hashingWithUnknownAlgoExits) {
|
||||
auto s = "unknown";
|
||||
ASSERT_DEATH(hashString(HashType::htUnknown, s), "");
|
||||
}
|
||||
}
|
193
src/libutil/tests/json.cc
Normal file
193
src/libutil/tests/json.cc
Normal file
|
@ -0,0 +1,193 @@
|
|||
#include "json.hh"
|
||||
#include <gtest/gtest.h>
|
||||
#include <sstream>
|
||||
|
||||
namespace nix {
|
||||
|
||||
/* ----------------------------------------------------------------------------
|
||||
* toJSON
|
||||
* --------------------------------------------------------------------------*/
|
||||
|
||||
TEST(toJSON, quotesCharPtr) {
|
||||
const char* input = "test";
|
||||
std::stringstream out;
|
||||
toJSON(out, input);
|
||||
|
||||
ASSERT_EQ(out.str(), "\"test\"");
|
||||
}
|
||||
|
||||
TEST(toJSON, quotesStdString) {
|
||||
std::string input = "test";
|
||||
std::stringstream out;
|
||||
toJSON(out, input);
|
||||
|
||||
ASSERT_EQ(out.str(), "\"test\"");
|
||||
}
|
||||
|
||||
TEST(toJSON, convertsNullptrtoNull) {
|
||||
auto input = nullptr;
|
||||
std::stringstream out;
|
||||
toJSON(out, input);
|
||||
|
||||
ASSERT_EQ(out.str(), "null");
|
||||
}
|
||||
|
||||
TEST(toJSON, convertsNullToNull) {
|
||||
const char* input = 0;
|
||||
std::stringstream out;
|
||||
toJSON(out, input);
|
||||
|
||||
ASSERT_EQ(out.str(), "null");
|
||||
}
|
||||
|
||||
|
||||
TEST(toJSON, convertsFloat) {
|
||||
auto input = 1.024f;
|
||||
std::stringstream out;
|
||||
toJSON(out, input);
|
||||
|
||||
ASSERT_EQ(out.str(), "1.024");
|
||||
}
|
||||
|
||||
TEST(toJSON, convertsDouble) {
|
||||
const double input = 1.024;
|
||||
std::stringstream out;
|
||||
toJSON(out, input);
|
||||
|
||||
ASSERT_EQ(out.str(), "1.024");
|
||||
}
|
||||
|
||||
TEST(toJSON, convertsBool) {
|
||||
auto input = false;
|
||||
std::stringstream out;
|
||||
toJSON(out, input);
|
||||
|
||||
ASSERT_EQ(out.str(), "false");
|
||||
}
|
||||
|
||||
TEST(toJSON, quotesTab) {
|
||||
std::stringstream out;
|
||||
toJSON(out, "\t");
|
||||
|
||||
ASSERT_EQ(out.str(), "\"\\t\"");
|
||||
}
|
||||
|
||||
TEST(toJSON, quotesNewline) {
|
||||
std::stringstream out;
|
||||
toJSON(out, "\n");
|
||||
|
||||
ASSERT_EQ(out.str(), "\"\\n\"");
|
||||
}
|
||||
|
||||
TEST(toJSON, quotesCreturn) {
|
||||
std::stringstream out;
|
||||
toJSON(out, "\r");
|
||||
|
||||
ASSERT_EQ(out.str(), "\"\\r\"");
|
||||
}
|
||||
|
||||
TEST(toJSON, quotesCreturnNewLine) {
|
||||
std::stringstream out;
|
||||
toJSON(out, "\r\n");
|
||||
|
||||
ASSERT_EQ(out.str(), "\"\\r\\n\"");
|
||||
}
|
||||
|
||||
TEST(toJSON, quotesDoublequotes) {
|
||||
std::stringstream out;
|
||||
toJSON(out, "\"");
|
||||
|
||||
ASSERT_EQ(out.str(), "\"\\\"\"");
|
||||
}
|
||||
|
||||
TEST(toJSON, substringEscape) {
|
||||
std::stringstream out;
|
||||
const char *s = "foo\t";
|
||||
toJSON(out, s+3, s + strlen(s));
|
||||
|
||||
ASSERT_EQ(out.str(), "\"\\t\"");
|
||||
}
|
||||
|
||||
/* ----------------------------------------------------------------------------
|
||||
* JSONObject
|
||||
* --------------------------------------------------------------------------*/
|
||||
|
||||
TEST(JSONObject, emptyObject) {
|
||||
std::stringstream out;
|
||||
{
|
||||
JSONObject t(out);
|
||||
}
|
||||
ASSERT_EQ(out.str(), "{}");
|
||||
}
|
||||
|
||||
TEST(JSONObject, objectWithList) {
|
||||
std::stringstream out;
|
||||
{
|
||||
JSONObject t(out);
|
||||
auto l = t.list("list");
|
||||
l.elem("element");
|
||||
}
|
||||
ASSERT_EQ(out.str(), R"#({"list":["element"]})#");
|
||||
}
|
||||
|
||||
TEST(JSONObject, objectWithListIndent) {
|
||||
std::stringstream out;
|
||||
{
|
||||
JSONObject t(out, true);
|
||||
auto l = t.list("list");
|
||||
l.elem("element");
|
||||
}
|
||||
ASSERT_EQ(out.str(),
|
||||
R"#({
|
||||
"list": [
|
||||
"element"
|
||||
]
|
||||
})#");
|
||||
}
|
||||
|
||||
TEST(JSONObject, objectWithPlaceholderAndList) {
|
||||
std::stringstream out;
|
||||
{
|
||||
JSONObject t(out);
|
||||
auto l = t.placeholder("list");
|
||||
l.list().elem("element");
|
||||
}
|
||||
|
||||
ASSERT_EQ(out.str(), R"#({"list":["element"]})#");
|
||||
}
|
||||
|
||||
TEST(JSONObject, objectWithPlaceholderAndObject) {
|
||||
std::stringstream out;
|
||||
{
|
||||
JSONObject t(out);
|
||||
auto l = t.placeholder("object");
|
||||
l.object().attr("key", "value");
|
||||
}
|
||||
|
||||
ASSERT_EQ(out.str(), R"#({"object":{"key":"value"}})#");
|
||||
}
|
||||
|
||||
/* ----------------------------------------------------------------------------
|
||||
* JSONList
|
||||
* --------------------------------------------------------------------------*/
|
||||
|
||||
TEST(JSONList, empty) {
|
||||
std::stringstream out;
|
||||
{
|
||||
JSONList l(out);
|
||||
}
|
||||
ASSERT_EQ(out.str(), R"#([])#");
|
||||
}
|
||||
|
||||
TEST(JSONList, withElements) {
|
||||
std::stringstream out;
|
||||
{
|
||||
JSONList l(out);
|
||||
l.elem("one");
|
||||
l.object();
|
||||
l.placeholder().write("three");
|
||||
}
|
||||
ASSERT_EQ(out.str(), R"#(["one",{},"three"])#");
|
||||
}
|
||||
}
|
||||
|
|
@ -8,7 +8,7 @@ libutil-tests_INSTALL_DIR :=
|
|||
|
||||
libutil-tests_SOURCES := $(wildcard $(d)/*.cc)
|
||||
|
||||
libutil-tests_CXXFLAGS += -I src/libutil
|
||||
libutil-tests_CXXFLAGS += -I src/libutil -I src/libexpr
|
||||
|
||||
libutil-tests_LIBS = libutil
|
||||
|
||||
|
|
255
src/libutil/tests/logging.cc
Normal file
255
src/libutil/tests/logging.cc
Normal file
|
@ -0,0 +1,255 @@
|
|||
#include "logging.hh"
|
||||
#include "nixexpr.hh"
|
||||
#include "util.hh"
|
||||
|
||||
#include <gtest/gtest.h>
|
||||
|
||||
namespace nix {
|
||||
|
||||
/* ----------------------------------------------------------------------------
|
||||
* logEI
|
||||
* --------------------------------------------------------------------------*/
|
||||
|
||||
TEST(logEI, catpuresBasicProperties) {
|
||||
|
||||
MakeError(TestError, Error);
|
||||
ErrorInfo::programName = std::optional("error-unit-test");
|
||||
|
||||
try {
|
||||
throw TestError("an error for testing purposes");
|
||||
} catch (Error &e) {
|
||||
testing::internal::CaptureStderr();
|
||||
logger->logEI(e.info());
|
||||
auto str = testing::internal::GetCapturedStderr();
|
||||
|
||||
ASSERT_STREQ(str.c_str(),"\x1B[31;1merror:\x1B[0m\x1B[34;1m --- TestError ------------------------------------ error-unit-test\x1B[0m\nan error for testing purposes\n");
|
||||
}
|
||||
}
|
||||
|
||||
TEST(logEI, appendingHintsToPreviousError) {
|
||||
|
||||
MakeError(TestError, Error);
|
||||
ErrorInfo::programName = std::optional("error-unit-test");
|
||||
|
||||
try {
|
||||
auto e = Error("initial error");
|
||||
throw TestError(e.info());
|
||||
} catch (Error &e) {
|
||||
ErrorInfo ei = e.info();
|
||||
ei.hint = hintfmt("%s; subsequent error message.", normaltxt(e.info().hint ? e.info().hint->str() : ""));
|
||||
|
||||
testing::internal::CaptureStderr();
|
||||
logger->logEI(ei);
|
||||
auto str = testing::internal::GetCapturedStderr();
|
||||
|
||||
ASSERT_STREQ(str.c_str(), "\x1B[31;1merror:\x1B[0m\x1B[34;1m --- TestError ------------------------------------ error-unit-test\x1B[0m\n\x1B[33;1m\x1B[0minitial error\x1B[0m; subsequent error message.\n");
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
TEST(logEI, picksUpSysErrorExitCode) {
|
||||
|
||||
MakeError(TestError, Error);
|
||||
ErrorInfo::programName = std::optional("error-unit-test");
|
||||
|
||||
try {
|
||||
auto x = readFile(-1);
|
||||
}
|
||||
catch (SysError &e) {
|
||||
testing::internal::CaptureStderr();
|
||||
logError(e.info());
|
||||
auto str = testing::internal::GetCapturedStderr();
|
||||
|
||||
ASSERT_STREQ(str.c_str(), "\x1B[31;1merror:\x1B[0m\x1B[34;1m --- SysError ------------------------------------- error-unit-test\x1B[0m\n\x1B[33;1m\x1B[0mstatting file\x1B[0m: \x1B[33;1mBad file descriptor\x1B[0m\n");
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
TEST(logEI, loggingErrorOnInfoLevel) {
|
||||
testing::internal::CaptureStderr();
|
||||
|
||||
logger->logEI({ .level = lvlInfo,
|
||||
.name = "Info name",
|
||||
.description = "Info description",
|
||||
});
|
||||
|
||||
auto str = testing::internal::GetCapturedStderr();
|
||||
ASSERT_STREQ(str.c_str(), "\x1B[32;1minfo:\x1B[0m\x1B[34;1m --- Info name ------------------------------------- error-unit-test\x1B[0m\nInfo description\n");
|
||||
}
|
||||
|
||||
TEST(logEI, loggingErrorOnTalkativeLevel) {
|
||||
verbosity = lvlTalkative;
|
||||
|
||||
testing::internal::CaptureStderr();
|
||||
|
||||
logger->logEI({ .level = lvlTalkative,
|
||||
.name = "Talkative name",
|
||||
.description = "Talkative description",
|
||||
});
|
||||
|
||||
auto str = testing::internal::GetCapturedStderr();
|
||||
ASSERT_STREQ(str.c_str(), "\x1B[32;1mtalk:\x1B[0m\x1B[34;1m --- Talkative name -------------------------------- error-unit-test\x1B[0m\nTalkative description\n");
|
||||
}
|
||||
|
||||
TEST(logEI, loggingErrorOnChattyLevel) {
|
||||
verbosity = lvlChatty;
|
||||
|
||||
testing::internal::CaptureStderr();
|
||||
|
||||
logger->logEI({ .level = lvlChatty,
|
||||
.name = "Chatty name",
|
||||
.description = "Talkative description",
|
||||
});
|
||||
|
||||
auto str = testing::internal::GetCapturedStderr();
|
||||
ASSERT_STREQ(str.c_str(), "\x1B[32;1mchat:\x1B[0m\x1B[34;1m --- Chatty name ----------------------------------- error-unit-test\x1B[0m\nTalkative description\n");
|
||||
}
|
||||
|
||||
TEST(logEI, loggingErrorOnDebugLevel) {
|
||||
verbosity = lvlDebug;
|
||||
|
||||
testing::internal::CaptureStderr();
|
||||
|
||||
logger->logEI({ .level = lvlDebug,
|
||||
.name = "Debug name",
|
||||
.description = "Debug description",
|
||||
});
|
||||
|
||||
auto str = testing::internal::GetCapturedStderr();
|
||||
ASSERT_STREQ(str.c_str(), "\x1B[33;1mdebug:\x1B[0m\x1B[34;1m --- Debug name ----------------------------------- error-unit-test\x1B[0m\nDebug description\n");
|
||||
}
|
||||
|
||||
TEST(logEI, loggingErrorOnVomitLevel) {
|
||||
verbosity = lvlVomit;
|
||||
|
||||
testing::internal::CaptureStderr();
|
||||
|
||||
logger->logEI({ .level = lvlVomit,
|
||||
.name = "Vomit name",
|
||||
.description = "Vomit description",
|
||||
});
|
||||
|
||||
auto str = testing::internal::GetCapturedStderr();
|
||||
ASSERT_STREQ(str.c_str(), "\x1B[32;1mvomit:\x1B[0m\x1B[34;1m --- Vomit name ----------------------------------- error-unit-test\x1B[0m\nVomit description\n");
|
||||
}
|
||||
|
||||
/* ----------------------------------------------------------------------------
|
||||
* logError
|
||||
* --------------------------------------------------------------------------*/
|
||||
|
||||
|
||||
TEST(logError, logErrorWithoutHintOrCode) {
|
||||
testing::internal::CaptureStderr();
|
||||
|
||||
logError({
|
||||
.name = "name",
|
||||
.description = "error description",
|
||||
});
|
||||
|
||||
auto str = testing::internal::GetCapturedStderr();
|
||||
ASSERT_STREQ(str.c_str(), "\x1B[31;1merror:\x1B[0m\x1B[34;1m --- name ----------------------------------------- error-unit-test\x1B[0m\nerror description\n");
|
||||
}
|
||||
|
||||
TEST(logError, logErrorWithPreviousAndNextLinesOfCode) {
|
||||
SymbolTable testTable;
|
||||
auto problem_file = testTable.create("myfile.nix");
|
||||
|
||||
testing::internal::CaptureStderr();
|
||||
|
||||
logError({
|
||||
.name = "error name",
|
||||
.description = "error with code lines",
|
||||
.hint = hintfmt("this hint has %1% templated %2%!!",
|
||||
"yellow",
|
||||
"values"),
|
||||
.nixCode = NixCode {
|
||||
.errPos = Pos(problem_file, 40, 13),
|
||||
.prevLineOfCode = "previous line of code",
|
||||
.errLineOfCode = "this is the problem line of code",
|
||||
.nextLineOfCode = "next line of code",
|
||||
}});
|
||||
|
||||
|
||||
auto str = testing::internal::GetCapturedStderr();
|
||||
ASSERT_STREQ(str.c_str(), "\x1B[31;1merror:\x1B[0m\x1B[34;1m --- error name ----------------------------------- error-unit-test\x1B[0m\nin file: \x1B[34;1mmyfile.nix (40:13)\x1B[0m\n\nerror with code lines\n\n 39| previous line of code\n 40| this is the problem line of code\n | \x1B[31;1m^\x1B[0m\n 41| next line of code\n\nthis hint has \x1B[33;1myellow\x1B[0m templated \x1B[33;1mvalues\x1B[0m!!\n");
|
||||
}
|
||||
|
||||
TEST(logError, logErrorWithoutLinesOfCode) {
|
||||
SymbolTable testTable;
|
||||
auto problem_file = testTable.create("myfile.nix");
|
||||
testing::internal::CaptureStderr();
|
||||
|
||||
logError({
|
||||
.name = "error name",
|
||||
.description = "error without any code lines.",
|
||||
.hint = hintfmt("this hint has %1% templated %2%!!",
|
||||
"yellow",
|
||||
"values"),
|
||||
.nixCode = NixCode {
|
||||
.errPos = Pos(problem_file, 40, 13)
|
||||
}});
|
||||
|
||||
auto str = testing::internal::GetCapturedStderr();
|
||||
ASSERT_STREQ(str.c_str(), "\x1B[31;1merror:\x1B[0m\x1B[34;1m --- error name ----------------------------------- error-unit-test\x1B[0m\nin file: \x1B[34;1mmyfile.nix (40:13)\x1B[0m\n\nerror without any code lines.\n\nthis hint has \x1B[33;1myellow\x1B[0m templated \x1B[33;1mvalues\x1B[0m!!\n");
|
||||
}
|
||||
|
||||
TEST(logError, logErrorWithOnlyHintAndName) {
|
||||
SymbolTable testTable;
|
||||
auto problem_file = testTable.create("myfile.nix");
|
||||
testing::internal::CaptureStderr();
|
||||
|
||||
logError({
|
||||
.name = "error name",
|
||||
.hint = hintfmt("hint %1%", "only"),
|
||||
.nixCode = NixCode {
|
||||
.errPos = Pos(problem_file, 40, 13)
|
||||
}});
|
||||
|
||||
auto str = testing::internal::GetCapturedStderr();
|
||||
ASSERT_STREQ(str.c_str(), "\x1B[31;1merror:\x1B[0m\x1B[34;1m --- error name ----------------------------------- error-unit-test\x1B[0m\nin file: \x1B[34;1mmyfile.nix (40:13)\x1B[0m\n\nhint \x1B[33;1monly\x1B[0m\n");
|
||||
|
||||
}
|
||||
|
||||
/* ----------------------------------------------------------------------------
|
||||
* logWarning
|
||||
* --------------------------------------------------------------------------*/
|
||||
|
||||
TEST(logWarning, logWarningWithNameDescriptionAndHint) {
|
||||
testing::internal::CaptureStderr();
|
||||
|
||||
logWarning({
|
||||
.name = "name",
|
||||
.description = "error description",
|
||||
.hint = hintfmt("there was a %1%", "warning"),
|
||||
});
|
||||
|
||||
auto str = testing::internal::GetCapturedStderr();
|
||||
ASSERT_STREQ(str.c_str(), "\x1B[33;1mwarning:\x1B[0m\x1B[34;1m --- name --------------------------------------- error-unit-test\x1B[0m\nerror description\n\nthere was a \x1B[33;1mwarning\x1B[0m\n");
|
||||
}
|
||||
|
||||
TEST(logWarning, logWarningWithFileLineNumAndCode) {
|
||||
|
||||
SymbolTable testTable;
|
||||
auto problem_file = testTable.create("myfile.nix");
|
||||
|
||||
testing::internal::CaptureStderr();
|
||||
|
||||
logWarning({
|
||||
.name = "warning name",
|
||||
.description = "warning description",
|
||||
.hint = hintfmt("this hint has %1% templated %2%!!",
|
||||
"yellow",
|
||||
"values"),
|
||||
.nixCode = NixCode {
|
||||
.errPos = Pos(problem_file, 40, 13),
|
||||
.prevLineOfCode = std::nullopt,
|
||||
.errLineOfCode = "this is the problem line of code",
|
||||
.nextLineOfCode = std::nullopt
|
||||
}});
|
||||
|
||||
|
||||
auto str = testing::internal::GetCapturedStderr();
|
||||
ASSERT_STREQ(str.c_str(), "\x1B[33;1mwarning:\x1B[0m\x1B[34;1m --- warning name ------------------------------- error-unit-test\x1B[0m\nin file: \x1B[34;1mmyfile.nix (40:13)\x1B[0m\n\nwarning description\n\n 40| this is the problem line of code\n | \x1B[31;1m^\x1B[0m\n\nthis hint has \x1B[33;1myellow\x1B[0m templated \x1B[33;1mvalues\x1B[0m!!\n");
|
||||
}
|
||||
|
||||
}
|
130
src/libutil/tests/lru-cache.cc
Normal file
130
src/libutil/tests/lru-cache.cc
Normal file
|
@ -0,0 +1,130 @@
|
|||
#include "lru-cache.hh"
|
||||
#include <gtest/gtest.h>
|
||||
|
||||
namespace nix {
|
||||
|
||||
/* ----------------------------------------------------------------------------
|
||||
* size
|
||||
* --------------------------------------------------------------------------*/
|
||||
|
||||
TEST(LRUCache, sizeOfEmptyCacheIsZero) {
|
||||
LRUCache<std::string, std::string> c(10);
|
||||
ASSERT_EQ(c.size(), 0);
|
||||
}
|
||||
|
||||
TEST(LRUCache, sizeOfSingleElementCacheIsOne) {
|
||||
LRUCache<std::string, std::string> c(10);
|
||||
c.upsert("foo", "bar");
|
||||
ASSERT_EQ(c.size(), 1);
|
||||
}
|
||||
|
||||
/* ----------------------------------------------------------------------------
|
||||
* upsert / get
|
||||
* --------------------------------------------------------------------------*/
|
||||
|
||||
TEST(LRUCache, getFromEmptyCache) {
|
||||
LRUCache<std::string, std::string> c(10);
|
||||
auto val = c.get("x");
|
||||
ASSERT_EQ(val.has_value(), false);
|
||||
}
|
||||
|
||||
TEST(LRUCache, getExistingValue) {
|
||||
LRUCache<std::string, std::string> c(10);
|
||||
c.upsert("foo", "bar");
|
||||
auto val = c.get("foo");
|
||||
ASSERT_EQ(val, "bar");
|
||||
}
|
||||
|
||||
TEST(LRUCache, getNonExistingValueFromNonEmptyCache) {
|
||||
LRUCache<std::string, std::string> c(10);
|
||||
c.upsert("foo", "bar");
|
||||
auto val = c.get("another");
|
||||
ASSERT_EQ(val.has_value(), false);
|
||||
}
|
||||
|
||||
TEST(LRUCache, upsertOnZeroCapacityCache) {
|
||||
LRUCache<std::string, std::string> c(0);
|
||||
c.upsert("foo", "bar");
|
||||
auto val = c.get("foo");
|
||||
ASSERT_EQ(val.has_value(), false);
|
||||
}
|
||||
|
||||
TEST(LRUCache, updateExistingValue) {
|
||||
LRUCache<std::string, std::string> c(1);
|
||||
c.upsert("foo", "bar");
|
||||
|
||||
auto val = c.get("foo");
|
||||
ASSERT_EQ(val.value_or("error"), "bar");
|
||||
ASSERT_EQ(c.size(), 1);
|
||||
|
||||
c.upsert("foo", "changed");
|
||||
val = c.get("foo");
|
||||
ASSERT_EQ(val.value_or("error"), "changed");
|
||||
ASSERT_EQ(c.size(), 1);
|
||||
}
|
||||
|
||||
TEST(LRUCache, overwriteOldestWhenCapacityIsReached) {
|
||||
LRUCache<std::string, std::string> c(3);
|
||||
c.upsert("one", "eins");
|
||||
c.upsert("two", "zwei");
|
||||
c.upsert("three", "drei");
|
||||
|
||||
ASSERT_EQ(c.size(), 3);
|
||||
ASSERT_EQ(c.get("one").value_or("error"), "eins");
|
||||
|
||||
// exceed capacity
|
||||
c.upsert("another", "whatever");
|
||||
|
||||
ASSERT_EQ(c.size(), 3);
|
||||
// Retrieving "one" makes it the most recent element thus
|
||||
// two will be the oldest one and thus replaced.
|
||||
ASSERT_EQ(c.get("two").has_value(), false);
|
||||
ASSERT_EQ(c.get("another").value(), "whatever");
|
||||
}
|
||||
|
||||
/* ----------------------------------------------------------------------------
|
||||
* clear
|
||||
* --------------------------------------------------------------------------*/
|
||||
|
||||
TEST(LRUCache, clearEmptyCache) {
|
||||
LRUCache<std::string, std::string> c(10);
|
||||
c.clear();
|
||||
ASSERT_EQ(c.size(), 0);
|
||||
}
|
||||
|
||||
TEST(LRUCache, clearNonEmptyCache) {
|
||||
LRUCache<std::string, std::string> c(10);
|
||||
c.upsert("one", "eins");
|
||||
c.upsert("two", "zwei");
|
||||
c.upsert("three", "drei");
|
||||
ASSERT_EQ(c.size(), 3);
|
||||
c.clear();
|
||||
ASSERT_EQ(c.size(), 0);
|
||||
}
|
||||
|
||||
/* ----------------------------------------------------------------------------
|
||||
* erase
|
||||
* --------------------------------------------------------------------------*/
|
||||
|
||||
TEST(LRUCache, eraseFromEmptyCache) {
|
||||
LRUCache<std::string, std::string> c(10);
|
||||
ASSERT_EQ(c.erase("foo"), false);
|
||||
ASSERT_EQ(c.size(), 0);
|
||||
}
|
||||
|
||||
TEST(LRUCache, eraseMissingFromNonEmptyCache) {
|
||||
LRUCache<std::string, std::string> c(10);
|
||||
c.upsert("one", "eins");
|
||||
ASSERT_EQ(c.erase("foo"), false);
|
||||
ASSERT_EQ(c.size(), 1);
|
||||
ASSERT_EQ(c.get("one").value_or("error"), "eins");
|
||||
}
|
||||
|
||||
TEST(LRUCache, eraseFromNonEmptyCache) {
|
||||
LRUCache<std::string, std::string> c(10);
|
||||
c.upsert("one", "eins");
|
||||
ASSERT_EQ(c.erase("one"), true);
|
||||
ASSERT_EQ(c.size(), 0);
|
||||
ASSERT_EQ(c.get("one").value_or("empty"), "empty");
|
||||
}
|
||||
}
|
|
@ -5,6 +5,8 @@
|
|||
|
||||
namespace nix {
|
||||
|
||||
/* ----------- tests for util.hh ------------------------------------------------*/
|
||||
|
||||
/* ----------------------------------------------------------------------------
|
||||
* absPath
|
||||
* --------------------------------------------------------------------------*/
|
||||
|
@ -15,6 +17,9 @@ namespace nix {
|
|||
ASSERT_EQ(p, "/");
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
TEST(absPath, turnsEmptyPathIntoCWD) {
|
||||
char cwd[PATH_MAX+1];
|
||||
auto p = absPath("");
|
||||
|
@ -581,5 +586,4 @@ namespace nix {
|
|||
|
||||
ASSERT_EQ(filterANSIEscapes(s, true), "foo bar baz" );
|
||||
}
|
||||
|
||||
}
|
||||
|
|
266
src/libutil/tests/url.cc
Normal file
266
src/libutil/tests/url.cc
Normal file
|
@ -0,0 +1,266 @@
|
|||
#include "url.hh"
|
||||
#include <gtest/gtest.h>
|
||||
|
||||
namespace nix {
|
||||
|
||||
/* ----------- tests for url.hh --------------------------------------------------*/
|
||||
|
||||
string print_map(std::map<string, string> m) {
|
||||
std::map<string, string>::iterator it;
|
||||
string s = "{ ";
|
||||
for (it = m.begin(); it != m.end(); ++it) {
|
||||
s += "{ ";
|
||||
s += it->first;
|
||||
s += " = ";
|
||||
s += it->second;
|
||||
s += " } ";
|
||||
}
|
||||
s += "}";
|
||||
return s;
|
||||
}
|
||||
|
||||
|
||||
std::ostream& operator<<(std::ostream& os, const ParsedURL& p) {
|
||||
return os << "\n"
|
||||
<< "url: " << p.url << "\n"
|
||||
<< "base: " << p.base << "\n"
|
||||
<< "scheme: " << p.scheme << "\n"
|
||||
<< "authority: " << p.authority.value() << "\n"
|
||||
<< "path: " << p.path << "\n"
|
||||
<< "query: " << print_map(p.query) << "\n"
|
||||
<< "fragment: " << p.fragment << "\n";
|
||||
}
|
||||
|
||||
TEST(parseURL, parsesSimpleHttpUrl) {
|
||||
auto s = "http://www.example.org/file.tar.gz";
|
||||
auto parsed = parseURL(s);
|
||||
|
||||
ParsedURL expected {
|
||||
.url = "http://www.example.org/file.tar.gz",
|
||||
.base = "http://www.example.org/file.tar.gz",
|
||||
.scheme = "http",
|
||||
.authority = "www.example.org",
|
||||
.path = "/file.tar.gz",
|
||||
.query = (StringMap) { },
|
||||
.fragment = "",
|
||||
};
|
||||
|
||||
ASSERT_EQ(parsed, expected);
|
||||
}
|
||||
|
||||
TEST(parseURL, parsesSimpleHttpsUrl) {
|
||||
auto s = "https://www.example.org/file.tar.gz";
|
||||
auto parsed = parseURL(s);
|
||||
|
||||
ParsedURL expected {
|
||||
.url = "https://www.example.org/file.tar.gz",
|
||||
.base = "https://www.example.org/file.tar.gz",
|
||||
.scheme = "https",
|
||||
.authority = "www.example.org",
|
||||
.path = "/file.tar.gz",
|
||||
.query = (StringMap) { },
|
||||
.fragment = "",
|
||||
};
|
||||
|
||||
ASSERT_EQ(parsed, expected);
|
||||
}
|
||||
|
||||
TEST(parseURL, parsesSimpleHttpUrlWithQueryAndFragment) {
|
||||
auto s = "https://www.example.org/file.tar.gz?download=fast&when=now#hello";
|
||||
auto parsed = parseURL(s);
|
||||
|
||||
ParsedURL expected {
|
||||
.url = "https://www.example.org/file.tar.gz",
|
||||
.base = "https://www.example.org/file.tar.gz",
|
||||
.scheme = "https",
|
||||
.authority = "www.example.org",
|
||||
.path = "/file.tar.gz",
|
||||
.query = (StringMap) { { "download", "fast" }, { "when", "now" } },
|
||||
.fragment = "hello",
|
||||
};
|
||||
|
||||
ASSERT_EQ(parsed, expected);
|
||||
}
|
||||
|
||||
TEST(parseURL, parsesSimpleHttpUrlWithComplexFragment) {
|
||||
auto s = "http://www.example.org/file.tar.gz?field=value#?foo=bar%23";
|
||||
auto parsed = parseURL(s);
|
||||
|
||||
ParsedURL expected {
|
||||
.url = "http://www.example.org/file.tar.gz",
|
||||
.base = "http://www.example.org/file.tar.gz",
|
||||
.scheme = "http",
|
||||
.authority = "www.example.org",
|
||||
.path = "/file.tar.gz",
|
||||
.query = (StringMap) { { "field", "value" } },
|
||||
.fragment = "?foo=bar#",
|
||||
};
|
||||
|
||||
ASSERT_EQ(parsed, expected);
|
||||
}
|
||||
|
||||
|
||||
TEST(parseURL, parseIPv4Address) {
|
||||
auto s = "http://127.0.0.1:8080/file.tar.gz?download=fast&when=now#hello";
|
||||
auto parsed = parseURL(s);
|
||||
|
||||
ParsedURL expected {
|
||||
.url = "http://127.0.0.1:8080/file.tar.gz",
|
||||
.base = "https://127.0.0.1:8080/file.tar.gz",
|
||||
.scheme = "http",
|
||||
.authority = "127.0.0.1:8080",
|
||||
.path = "/file.tar.gz",
|
||||
.query = (StringMap) { { "download", "fast" }, { "when", "now" } },
|
||||
.fragment = "hello",
|
||||
};
|
||||
|
||||
ASSERT_EQ(parsed, expected);
|
||||
}
|
||||
|
||||
TEST(parseURL, parseIPv6Address) {
|
||||
auto s = "http://[2a02:8071:8192:c100:311d:192d:81ac:11ea]:8080";
|
||||
auto parsed = parseURL(s);
|
||||
|
||||
ParsedURL expected {
|
||||
.url = "http://[2a02:8071:8192:c100:311d:192d:81ac:11ea]:8080",
|
||||
.base = "http://[2a02:8071:8192:c100:311d:192d:81ac:11ea]:8080",
|
||||
.scheme = "http",
|
||||
.authority = "[2a02:8071:8192:c100:311d:192d:81ac:11ea]:8080",
|
||||
.path = "",
|
||||
.query = (StringMap) { },
|
||||
.fragment = "",
|
||||
};
|
||||
|
||||
ASSERT_EQ(parsed, expected);
|
||||
|
||||
}
|
||||
|
||||
TEST(parseURL, parseEmptyQueryParams) {
|
||||
auto s = "http://127.0.0.1:8080/file.tar.gz?&&&&&";
|
||||
auto parsed = parseURL(s);
|
||||
ASSERT_EQ(parsed.query, (StringMap) { });
|
||||
}
|
||||
|
||||
TEST(parseURL, parseUserPassword) {
|
||||
auto s = "http://user:pass@www.example.org:8080/file.tar.gz";
|
||||
auto parsed = parseURL(s);
|
||||
|
||||
ParsedURL expected {
|
||||
.url = "http://user:pass@www.example.org/file.tar.gz",
|
||||
.base = "http://user:pass@www.example.org/file.tar.gz",
|
||||
.scheme = "http",
|
||||
.authority = "user:pass@www.example.org:8080",
|
||||
.path = "/file.tar.gz",
|
||||
.query = (StringMap) { },
|
||||
.fragment = "",
|
||||
};
|
||||
|
||||
|
||||
ASSERT_EQ(parsed, expected);
|
||||
}
|
||||
|
||||
TEST(parseURL, parseFileURLWithQueryAndFragment) {
|
||||
auto s = "file:///none/of/your/business";
|
||||
auto parsed = parseURL(s);
|
||||
|
||||
ParsedURL expected {
|
||||
.url = "",
|
||||
.base = "",
|
||||
.scheme = "file",
|
||||
.authority = "",
|
||||
.path = "/none/of/your/business",
|
||||
.query = (StringMap) { },
|
||||
.fragment = "",
|
||||
};
|
||||
|
||||
ASSERT_EQ(parsed, expected);
|
||||
|
||||
}
|
||||
|
||||
TEST(parseURL, parsedUrlsIsEqualToItself) {
|
||||
auto s = "http://www.example.org/file.tar.gz";
|
||||
auto url = parseURL(s);
|
||||
|
||||
ASSERT_TRUE(url == url);
|
||||
}
|
||||
|
||||
TEST(parseURL, parseFTPUrl) {
|
||||
auto s = "ftp://ftp.nixos.org/downloads/nixos.iso";
|
||||
auto parsed = parseURL(s);
|
||||
|
||||
ParsedURL expected {
|
||||
.url = "ftp://ftp.nixos.org/downloads/nixos.iso",
|
||||
.base = "ftp://ftp.nixos.org/downloads/nixos.iso",
|
||||
.scheme = "ftp",
|
||||
.authority = "ftp.nixos.org",
|
||||
.path = "/downloads/nixos.iso",
|
||||
.query = (StringMap) { },
|
||||
.fragment = "",
|
||||
};
|
||||
|
||||
ASSERT_EQ(parsed, expected);
|
||||
}
|
||||
|
||||
TEST(parseURL, parsesAnythingInUriFormat) {
|
||||
auto s = "whatever://github.com/NixOS/nixpkgs.git";
|
||||
auto parsed = parseURL(s);
|
||||
}
|
||||
|
||||
TEST(parseURL, parsesAnythingInUriFormatWithoutDoubleSlash) {
|
||||
auto s = "whatever:github.com/NixOS/nixpkgs.git";
|
||||
auto parsed = parseURL(s);
|
||||
}
|
||||
|
||||
TEST(parseURL, emptyStringIsInvalidURL) {
|
||||
ASSERT_THROW(parseURL(""), Error);
|
||||
}
|
||||
|
||||
/* ----------------------------------------------------------------------------
|
||||
* decodeQuery
|
||||
* --------------------------------------------------------------------------*/
|
||||
|
||||
TEST(decodeQuery, emptyStringYieldsEmptyMap) {
|
||||
auto d = decodeQuery("");
|
||||
ASSERT_EQ(d, (StringMap) { });
|
||||
}
|
||||
|
||||
TEST(decodeQuery, simpleDecode) {
|
||||
auto d = decodeQuery("yi=one&er=two");
|
||||
ASSERT_EQ(d, ((StringMap) { { "yi", "one" }, { "er", "two" } }));
|
||||
}
|
||||
|
||||
TEST(decodeQuery, decodeUrlEncodedArgs) {
|
||||
auto d = decodeQuery("arg=%3D%3D%40%3D%3D");
|
||||
ASSERT_EQ(d, ((StringMap) { { "arg", "==@==" } }));
|
||||
}
|
||||
|
||||
TEST(decodeQuery, decodeArgWithEmptyValue) {
|
||||
auto d = decodeQuery("arg=");
|
||||
ASSERT_EQ(d, ((StringMap) { { "arg", ""} }));
|
||||
}
|
||||
|
||||
/* ----------------------------------------------------------------------------
|
||||
* percentDecode
|
||||
* --------------------------------------------------------------------------*/
|
||||
|
||||
TEST(percentDecode, decodesUrlEncodedString) {
|
||||
string s = "==@==";
|
||||
string d = percentDecode("%3D%3D%40%3D%3D");
|
||||
ASSERT_EQ(d, s);
|
||||
}
|
||||
|
||||
TEST(percentDecode, multipleDecodesAreIdempotent) {
|
||||
string once = percentDecode("%3D%3D%40%3D%3D");
|
||||
string twice = percentDecode(once);
|
||||
|
||||
ASSERT_EQ(once, twice);
|
||||
}
|
||||
|
||||
TEST(percentDecode, trailingPercent) {
|
||||
string s = "==@==%";
|
||||
string d = percentDecode("%3D%3D%40%3D%3D%25");
|
||||
|
||||
ASSERT_EQ(d, s);
|
||||
}
|
||||
|
||||
}
|
105
src/libutil/tests/xml-writer.cc
Normal file
105
src/libutil/tests/xml-writer.cc
Normal file
|
@ -0,0 +1,105 @@
|
|||
#include "xml-writer.hh"
|
||||
#include <gtest/gtest.h>
|
||||
#include <sstream>
|
||||
|
||||
namespace nix {
|
||||
|
||||
/* ----------------------------------------------------------------------------
|
||||
* XMLWriter
|
||||
* --------------------------------------------------------------------------*/
|
||||
|
||||
TEST(XMLWriter, emptyObject) {
|
||||
std::stringstream out;
|
||||
{
|
||||
XMLWriter t(false, out);
|
||||
}
|
||||
|
||||
ASSERT_EQ(out.str(), "<?xml version='1.0' encoding='utf-8'?>\n");
|
||||
}
|
||||
|
||||
TEST(XMLWriter, objectWithEmptyElement) {
|
||||
std::stringstream out;
|
||||
{
|
||||
XMLWriter t(false, out);
|
||||
t.openElement("foobar");
|
||||
}
|
||||
|
||||
ASSERT_EQ(out.str(), "<?xml version='1.0' encoding='utf-8'?>\n<foobar></foobar>");
|
||||
}
|
||||
|
||||
TEST(XMLWriter, objectWithElementWithAttrs) {
|
||||
std::stringstream out;
|
||||
{
|
||||
XMLWriter t(false, out);
|
||||
XMLAttrs attrs = {
|
||||
{ "foo", "bar" }
|
||||
};
|
||||
t.openElement("foobar", attrs);
|
||||
}
|
||||
|
||||
ASSERT_EQ(out.str(), "<?xml version='1.0' encoding='utf-8'?>\n<foobar foo=\"bar\"></foobar>");
|
||||
}
|
||||
|
||||
TEST(XMLWriter, objectWithElementWithEmptyAttrs) {
|
||||
std::stringstream out;
|
||||
{
|
||||
XMLWriter t(false, out);
|
||||
XMLAttrs attrs = {};
|
||||
t.openElement("foobar", attrs);
|
||||
}
|
||||
|
||||
ASSERT_EQ(out.str(), "<?xml version='1.0' encoding='utf-8'?>\n<foobar></foobar>");
|
||||
}
|
||||
|
||||
TEST(XMLWriter, objectWithElementWithAttrsEscaping) {
|
||||
std::stringstream out;
|
||||
{
|
||||
XMLWriter t(false, out);
|
||||
XMLAttrs attrs = {
|
||||
{ "<key>", "<value>" }
|
||||
};
|
||||
t.openElement("foobar", attrs);
|
||||
}
|
||||
|
||||
// XXX: While "<value>" is escaped, "<key>" isn't which I think is a bug.
|
||||
ASSERT_EQ(out.str(), "<?xml version='1.0' encoding='utf-8'?>\n<foobar <key>=\"<value>\"></foobar>");
|
||||
}
|
||||
|
||||
TEST(XMLWriter, objectWithElementWithAttrsIndented) {
|
||||
std::stringstream out;
|
||||
{
|
||||
XMLWriter t(true, out);
|
||||
XMLAttrs attrs = {
|
||||
{ "foo", "bar" }
|
||||
};
|
||||
t.openElement("foobar", attrs);
|
||||
}
|
||||
|
||||
ASSERT_EQ(out.str(), "<?xml version='1.0' encoding='utf-8'?>\n<foobar foo=\"bar\">\n</foobar>\n");
|
||||
}
|
||||
|
||||
TEST(XMLWriter, writeEmptyElement) {
|
||||
std::stringstream out;
|
||||
{
|
||||
XMLWriter t(false, out);
|
||||
t.writeEmptyElement("foobar");
|
||||
}
|
||||
|
||||
ASSERT_EQ(out.str(), "<?xml version='1.0' encoding='utf-8'?>\n<foobar />");
|
||||
}
|
||||
|
||||
TEST(XMLWriter, writeEmptyElementWithAttributes) {
|
||||
std::stringstream out;
|
||||
{
|
||||
XMLWriter t(false, out);
|
||||
XMLAttrs attrs = {
|
||||
{ "foo", "bar" }
|
||||
};
|
||||
t.writeEmptyElement("foobar", attrs);
|
||||
|
||||
}
|
||||
|
||||
ASSERT_EQ(out.str(), "<?xml version='1.0' encoding='utf-8'?>\n<foobar foo=\"bar\" />");
|
||||
}
|
||||
|
||||
}
|
|
@ -4,6 +4,7 @@
|
|||
namespace nix {
|
||||
|
||||
std::regex refRegex(refRegexS, std::regex::ECMAScript);
|
||||
std::regex badGitRefRegex(badGitRefRegexS, std::regex::ECMAScript);
|
||||
std::regex revRegex(revRegexS, std::regex::ECMAScript);
|
||||
std::regex flakeIdRegex(flakeIdRegexS, std::regex::ECMAScript);
|
||||
|
||||
|
|
|
@ -49,6 +49,12 @@ const static std::string pathRegex = "(?:" + segmentRegex + "(?:/" + segmentRege
|
|||
const static std::string refRegexS = "[a-zA-Z0-9][a-zA-Z0-9_.-]*"; // FIXME: check
|
||||
extern std::regex refRegex;
|
||||
|
||||
// Instead of defining what a good Git Ref is, we define what a bad Git Ref is
|
||||
// This is because of the definition of a ref in refs.c in https://github.com/git/git
|
||||
// See tests/fetchGitRefs.sh for the full definition
|
||||
const static std::string badGitRefRegexS = "//|^[./]|/\\.|\\.\\.|[[:cntrl:][:space:]:?^~\[]|\\\\|\\*|\\.lock$|\\.lock/|@\\{|[/.]$|^@$|^$";
|
||||
extern std::regex badGitRefRegex;
|
||||
|
||||
// A Git revision (a SHA-1 commit hash).
|
||||
const static std::string revRegexS = "[0-9a-fA-F]{40}";
|
||||
extern std::regex revRegex;
|
||||
|
|
|
@ -123,8 +123,7 @@ static void getAllExprs(EvalState & state,
|
|||
if (hasSuffix(attrName, ".nix"))
|
||||
attrName = string(attrName, 0, attrName.size() - 4);
|
||||
if (!attrs.insert(attrName).second) {
|
||||
logError(
|
||||
ErrorInfo {
|
||||
logError({
|
||||
.name = "Name collision",
|
||||
.hint = hintfmt("warning: name collision in input Nix expressions, skipping '%1%'", path2)
|
||||
});
|
||||
|
@ -875,8 +874,7 @@ static void queryJSON(Globals & globals, vector<DrvInfo> & elems)
|
|||
auto placeholder = metaObj.placeholder(j);
|
||||
Value * v = i.queryMeta(j);
|
||||
if (!v) {
|
||||
logError(
|
||||
ErrorInfo {
|
||||
logError({
|
||||
.name = "Invalid meta attribute",
|
||||
.hint = hintfmt("derivation '%s' has invalid meta attribute '%s'",
|
||||
i.queryName(), j)
|
||||
|
@ -1131,8 +1129,7 @@ static void opQuery(Globals & globals, Strings opFlags, Strings opArgs)
|
|||
attrs2["name"] = j;
|
||||
Value * v = i.queryMeta(j);
|
||||
if (!v)
|
||||
logError(
|
||||
ErrorInfo {
|
||||
logError({
|
||||
.name = "Invalid meta attribute",
|
||||
.hint = hintfmt(
|
||||
"derivation '%s' has invalid meta attribute '%s'",
|
||||
|
|
|
@ -159,7 +159,8 @@ static int _main(int argc, char * * argv)
|
|||
std::optional<StorePath> storePath;
|
||||
if (args.size() == 2) {
|
||||
expectedHash = Hash(args[1], ht);
|
||||
storePath = store->makeFixedOutputPath(unpack, expectedHash, name);
|
||||
const auto recursive = unpack ? FileIngestionMethod::Recursive : FileIngestionMethod::Flat;
|
||||
storePath = store->makeFixedOutputPath(recursive, expectedHash, name);
|
||||
if (store->isValidPath(*storePath))
|
||||
hash = expectedHash;
|
||||
else
|
||||
|
@ -208,13 +209,15 @@ static int _main(int argc, char * * argv)
|
|||
if (expectedHash != Hash(ht) && expectedHash != hash)
|
||||
throw Error("hash mismatch for '%1%'", uri);
|
||||
|
||||
const auto recursive = unpack ? FileIngestionMethod::Recursive : FileIngestionMethod::Flat;
|
||||
|
||||
/* Copy the file to the Nix store. FIXME: if RemoteStore
|
||||
implemented addToStoreFromDump() and downloadFile()
|
||||
supported a sink, we could stream the download directly
|
||||
into the Nix store. */
|
||||
storePath = store->addToStore(name, tmpFile, unpack, ht);
|
||||
storePath = store->addToStore(name, tmpFile, recursive, ht);
|
||||
|
||||
assert(*storePath == store->makeFixedOutputPath(unpack, hash, name));
|
||||
assert(*storePath == store->makeFixedOutputPath(recursive, hash, name));
|
||||
}
|
||||
|
||||
stopProgressBar();
|
||||
|
|
|
@ -174,10 +174,10 @@ static void opAdd(Strings opFlags, Strings opArgs)
|
|||
store. */
|
||||
static void opAddFixed(Strings opFlags, Strings opArgs)
|
||||
{
|
||||
bool recursive = false;
|
||||
auto recursive = FileIngestionMethod::Flat;
|
||||
|
||||
for (auto & i : opFlags)
|
||||
if (i == "--recursive") recursive = true;
|
||||
if (i == "--recursive") recursive = FileIngestionMethod::Recursive;
|
||||
else throw UsageError("unknown flag '%1%'", i);
|
||||
|
||||
if (opArgs.empty())
|
||||
|
@ -194,10 +194,10 @@ static void opAddFixed(Strings opFlags, Strings opArgs)
|
|||
/* Hack to support caching in `nix-prefetch-url'. */
|
||||
static void opPrintFixedPath(Strings opFlags, Strings opArgs)
|
||||
{
|
||||
bool recursive = false;
|
||||
auto recursive = FileIngestionMethod::Flat;
|
||||
|
||||
for (auto i : opFlags)
|
||||
if (i == "--recursive") recursive = true;
|
||||
if (i == "--recursive") recursive = FileIngestionMethod::Recursive;
|
||||
else throw UsageError("unknown flag '%1%'", i);
|
||||
|
||||
if (opArgs.size() != 3)
|
||||
|
@ -704,7 +704,7 @@ static void opVerify(Strings opFlags, Strings opArgs)
|
|||
else throw UsageError("unknown flag '%1%'", i);
|
||||
|
||||
if (store->verifyStore(checkContents, repair)) {
|
||||
logWarning(ErrorInfo {
|
||||
logWarning({
|
||||
.name = "Store consistency",
|
||||
.description = "not all errors were fixed"
|
||||
});
|
||||
|
@ -729,8 +729,7 @@ static void opVerifyPath(Strings opFlags, Strings opArgs)
|
|||
store->narFromPath(path, sink);
|
||||
auto current = sink.finish();
|
||||
if (current.first != info->narHash) {
|
||||
logError(
|
||||
ErrorInfo {
|
||||
logError({
|
||||
.name = "Hash mismatch",
|
||||
.hint = hintfmt(
|
||||
"path '%s' was modified! expected hash '%s', got '%s'",
|
||||
|
|
|
@ -45,13 +45,15 @@ struct CmdAddToStore : MixDryRun, StoreCommand
|
|||
|
||||
auto narHash = hashString(htSHA256, *sink.s);
|
||||
|
||||
ValidPathInfo info(store->makeFixedOutputPath(true, narHash, *namePart));
|
||||
ValidPathInfo info(store->makeFixedOutputPath(FileIngestionMethod::Recursive, narHash, *namePart));
|
||||
info.narHash = narHash;
|
||||
info.narSize = sink.s->size();
|
||||
info.ca = makeFixedOutputCA(true, info.narHash);
|
||||
info.ca = makeFixedOutputCA(FileIngestionMethod::Recursive, info.narHash);
|
||||
|
||||
if (!dryRun)
|
||||
store->addToStore(info, sink.s);
|
||||
if (!dryRun) {
|
||||
auto source = StringSource { *sink.s };
|
||||
store->addToStore(info, source);
|
||||
}
|
||||
|
||||
logger->stdout("%s", store->printStorePath(info.path));
|
||||
}
|
||||
|
|
|
@ -9,15 +9,14 @@ using namespace nix;
|
|||
|
||||
struct CmdHash : Command
|
||||
{
|
||||
enum Mode { mFile, mPath };
|
||||
Mode mode;
|
||||
FileIngestionMethod mode;
|
||||
Base base = SRI;
|
||||
bool truncate = false;
|
||||
HashType ht = htSHA256;
|
||||
std::vector<std::string> paths;
|
||||
std::optional<std::string> modulus;
|
||||
|
||||
CmdHash(Mode mode) : mode(mode)
|
||||
CmdHash(FileIngestionMethod mode) : mode(mode)
|
||||
{
|
||||
mkFlag(0, "sri", "print hash in SRI format", &base, SRI);
|
||||
mkFlag(0, "base64", "print hash in base-64", &base, Base64);
|
||||
|
@ -36,9 +35,14 @@ struct CmdHash : Command
|
|||
|
||||
std::string description() override
|
||||
{
|
||||
return mode == mFile
|
||||
? "print cryptographic hash of a regular file"
|
||||
: "print cryptographic hash of the NAR serialisation of a path";
|
||||
const char* d;
|
||||
switch (mode) {
|
||||
case FileIngestionMethod::Flat:
|
||||
d = "print cryptographic hash of a regular file";
|
||||
case FileIngestionMethod::Recursive:
|
||||
d = "print cryptographic hash of the NAR serialisation of a path";
|
||||
};
|
||||
return d;
|
||||
}
|
||||
|
||||
Category category() override { return catUtility; }
|
||||
|
@ -53,10 +57,14 @@ struct CmdHash : Command
|
|||
else
|
||||
hashSink = std::make_unique<HashSink>(ht);
|
||||
|
||||
if (mode == mFile)
|
||||
switch (mode) {
|
||||
case FileIngestionMethod::Flat:
|
||||
readFile(path, *hashSink);
|
||||
else
|
||||
break;
|
||||
case FileIngestionMethod::Recursive:
|
||||
dumpPath(path, *hashSink);
|
||||
break;
|
||||
}
|
||||
|
||||
Hash h = hashSink->finish().first;
|
||||
if (truncate && h.hashSize > 20) h = compressHash(h, 20);
|
||||
|
@ -65,8 +73,8 @@ struct CmdHash : Command
|
|||
}
|
||||
};
|
||||
|
||||
static RegisterCommand r1("hash-file", [](){ return make_ref<CmdHash>(CmdHash::mFile); });
|
||||
static RegisterCommand r2("hash-path", [](){ return make_ref<CmdHash>(CmdHash::mPath); });
|
||||
static RegisterCommand r1("hash-file", [](){ return make_ref<CmdHash>(FileIngestionMethod::Flat); });
|
||||
static RegisterCommand r2("hash-path", [](){ return make_ref<CmdHash>(FileIngestionMethod::Recursive); });
|
||||
|
||||
struct CmdToBase : Command
|
||||
{
|
||||
|
@ -137,7 +145,7 @@ static int compatNixHash(int argc, char * * argv)
|
|||
});
|
||||
|
||||
if (op == opHash) {
|
||||
CmdHash cmd(flat ? CmdHash::mFile : CmdHash::mPath);
|
||||
CmdHash cmd(flat ? FileIngestionMethod::Flat : FileIngestionMethod::Recursive);
|
||||
cmd.ht = ht;
|
||||
cmd.base = base32 ? Base32 : Base16;
|
||||
cmd.truncate = truncate;
|
||||
|
|
|
@ -77,12 +77,12 @@ struct CmdMakeContentAddressable : StorePathsCommand, MixJSON
|
|||
|
||||
auto narHash = hashModuloSink.finish().first;
|
||||
|
||||
ValidPathInfo info(store->makeFixedOutputPath(true, narHash, path.name(), references, hasSelfReference));
|
||||
ValidPathInfo info(store->makeFixedOutputPath(FileIngestionMethod::Recursive, narHash, path.name(), references, hasSelfReference));
|
||||
info.references = std::move(references);
|
||||
if (hasSelfReference) info.references.insert(info.path.clone());
|
||||
info.narHash = narHash;
|
||||
info.narSize = sink.s->size();
|
||||
info.ca = makeFixedOutputCA(true, info.narHash);
|
||||
info.ca = makeFixedOutputCA(FileIngestionMethod::Recursive, info.narHash);
|
||||
|
||||
if (!json)
|
||||
printInfo("rewrote '%s' to '%s'", pathS, store->printStorePath(info.path));
|
||||
|
|
|
@ -99,8 +99,7 @@ struct CmdVerify : StorePathsCommand
|
|||
if (hash.first != info->narHash) {
|
||||
corrupted++;
|
||||
act2.result(resCorruptedPath, store->printStorePath(info->path));
|
||||
logError(
|
||||
ErrorInfo {
|
||||
logError({
|
||||
.name = "Hash error - path modified",
|
||||
.hint = hintfmt(
|
||||
"path '%s' was modified! expected hash '%s', got '%s'",
|
||||
|
@ -156,8 +155,7 @@ struct CmdVerify : StorePathsCommand
|
|||
if (!good) {
|
||||
untrusted++;
|
||||
act2.result(resUntrustedPath, store->printStorePath(info->path));
|
||||
logError(
|
||||
ErrorInfo {
|
||||
logError({
|
||||
.name = "Untrusted path",
|
||||
.hint = hintfmt("path '%s' is untrusted",
|
||||
store->printStorePath(info->path))
|
||||
|
|
|
@ -39,8 +39,7 @@ std::set<std::string> runResolver(const Path & filename)
|
|||
throw SysError("statting '%s'", filename);
|
||||
|
||||
if (!S_ISREG(st.st_mode)) {
|
||||
logError(
|
||||
ErrorInfo {
|
||||
logError({
|
||||
.name = "Regular MACH file",
|
||||
.hint = hintfmt("file '%s' is not a regular file", filename)
|
||||
});
|
||||
|
@ -48,8 +47,7 @@ std::set<std::string> runResolver(const Path & filename)
|
|||
}
|
||||
|
||||
if (st.st_size < sizeof(mach_header_64)) {
|
||||
logError(
|
||||
ErrorInfo {
|
||||
logError({
|
||||
.name = "File too short",
|
||||
.hint = hintfmt("file '%s' is too short for a MACH binary", filename)
|
||||
});
|
||||
|
@ -74,8 +72,7 @@ std::set<std::string> runResolver(const Path & filename)
|
|||
}
|
||||
}
|
||||
if (mach64_offset == 0) {
|
||||
logError(
|
||||
ErrorInfo {
|
||||
logError({
|
||||
.name = "No mach64 blobs",
|
||||
.hint = hintfmt("Could not find any mach64 blobs in file '%1%', continuing...", filename)
|
||||
});
|
||||
|
@ -84,8 +81,7 @@ std::set<std::string> runResolver(const Path & filename)
|
|||
} else if (magic == MH_MAGIC_64 || magic == MH_CIGAM_64) {
|
||||
mach64_offset = 0;
|
||||
} else {
|
||||
logError(
|
||||
ErrorInfo {
|
||||
logError({
|
||||
.name = "Magic number",
|
||||
.hint = hintfmt("Object file has unknown magic number '%1%', skipping it...", magic)
|
||||
});
|
||||
|
|
111
tests/fetchGitRefs.sh
Normal file
111
tests/fetchGitRefs.sh
Normal file
|
@ -0,0 +1,111 @@
|
|||
source common.sh
|
||||
|
||||
if [[ -z $(type -p git) ]]; then
|
||||
echo "Git not installed; skipping Git tests"
|
||||
exit 99
|
||||
fi
|
||||
|
||||
clearStore
|
||||
|
||||
repo="$TEST_ROOT/git"
|
||||
|
||||
rm -rf "$repo" "${repo}-tmp" "$TEST_HOME/.cache/nix"
|
||||
|
||||
git init "$repo"
|
||||
git -C "$repo" config user.email "foobar@example.com"
|
||||
git -C "$repo" config user.name "Foobar"
|
||||
|
||||
echo utrecht > "$repo"/hello
|
||||
git -C "$repo" add hello
|
||||
git -C "$repo" commit -m 'Bla1'
|
||||
|
||||
path=$(nix eval --raw "(builtins.fetchGit { url = $repo; ref = \"master\"; }).outPath")
|
||||
|
||||
# Test various combinations of ref names
|
||||
# (taken from the git project)
|
||||
|
||||
# git help check-ref-format
|
||||
# Git imposes the following rules on how references are named:
|
||||
#
|
||||
# 1. They can include slash / for hierarchical (directory) grouping, but no slash-separated component can begin with a dot . or end with the sequence .lock.
|
||||
# 2. They must contain at least one /. This enforces the presence of a category like heads/, tags/ etc. but the actual names are not restricted. If the --allow-onelevel option is used, this rule is waived.
|
||||
# 3. They cannot have two consecutive dots .. anywhere.
|
||||
# 4. They cannot have ASCII control characters (i.e. bytes whose values are lower than \040, or \177 DEL), space, tilde ~, caret ^, or colon : anywhere.
|
||||
# 5. They cannot have question-mark ?, asterisk *, or open bracket [ anywhere. See the --refspec-pattern option below for an exception to this rule.
|
||||
# 6. They cannot begin or end with a slash / or contain multiple consecutive slashes (see the --normalize option below for an exception to this rule)
|
||||
# 7. They cannot end with a dot ..
|
||||
# 8. They cannot contain a sequence @{.
|
||||
# 9. They cannot be the single character @.
|
||||
# 10. They cannot contain a \.
|
||||
|
||||
valid_ref() {
|
||||
{ set +x; printf >&2 '\n>>>>>>>>>> valid_ref %s\b <<<<<<<<<<\n' $(printf %s "$1" | sed -n -e l); set -x; }
|
||||
git check-ref-format --branch "$1" >/dev/null
|
||||
git -C "$repo" branch "$1" master >/dev/null
|
||||
path1=$(nix eval --raw "(builtins.fetchGit { url = $repo; ref = ''$1''; }).outPath")
|
||||
[[ $path1 = $path ]]
|
||||
git -C "$repo" branch -D "$1" >/dev/null
|
||||
}
|
||||
|
||||
invalid_ref() {
|
||||
{ set +x; printf >&2 '\n>>>>>>>>>> invalid_ref %s\b <<<<<<<<<<\n' $(printf %s "$1" | sed -n -e l); set -x; }
|
||||
# special case for a sole @:
|
||||
# --branch @ will try to interpret @ as a branch reference and not fail. Thus we need --allow-onelevel
|
||||
if [ "$1" = "@" ]; then
|
||||
(! git check-ref-format --allow-onelevel "$1" >/dev/null 2>&1)
|
||||
else
|
||||
(! git check-ref-format --branch "$1" >/dev/null 2>&1)
|
||||
fi
|
||||
nix --debug eval --raw "(builtins.fetchGit { url = $repo; ref = ''$1''; }).outPath" 2>&1 | grep 'invalid Git branch/tag name' >/dev/null
|
||||
}
|
||||
|
||||
|
||||
valid_ref 'foox'
|
||||
valid_ref '1337'
|
||||
valid_ref 'foo.baz'
|
||||
valid_ref 'foo/bar/baz'
|
||||
valid_ref 'foo./bar'
|
||||
valid_ref 'heads/foo@bar'
|
||||
valid_ref "$(printf 'heads/fu\303\237')"
|
||||
valid_ref 'foo-bar-baz'
|
||||
valid_ref '$1'
|
||||
valid_ref 'foo.locke'
|
||||
|
||||
invalid_ref 'refs///heads/foo'
|
||||
invalid_ref 'heads/foo/'
|
||||
invalid_ref '///heads/foo'
|
||||
invalid_ref '.foo'
|
||||
invalid_ref './foo'
|
||||
invalid_ref './foo/bar'
|
||||
invalid_ref 'foo/./bar'
|
||||
invalid_ref 'foo/bar/.'
|
||||
invalid_ref 'foo bar'
|
||||
invalid_ref 'foo?bar'
|
||||
invalid_ref 'foo^bar'
|
||||
invalid_ref 'foo~bar'
|
||||
invalid_ref 'foo:bar'
|
||||
invalid_ref 'foo[bar'
|
||||
invalid_ref 'foo/bar/.'
|
||||
invalid_ref '.refs/foo'
|
||||
invalid_ref 'refs/heads/foo.'
|
||||
invalid_ref 'heads/foo..bar'
|
||||
invalid_ref 'heads/foo?bar'
|
||||
invalid_ref 'heads/foo.lock'
|
||||
invalid_ref 'heads///foo.lock'
|
||||
invalid_ref 'foo.lock/bar'
|
||||
invalid_ref 'foo.lock///bar'
|
||||
invalid_ref 'heads/v@{ation'
|
||||
invalid_ref 'heads/foo\.ar' # should fail due to \
|
||||
invalid_ref 'heads/foo\bar' # should fail due to \
|
||||
invalid_ref "$(printf 'heads/foo\t')" # should fail because it has a TAB
|
||||
invalid_ref "$(printf 'heads/foo\177')"
|
||||
invalid_ref '@'
|
||||
|
||||
invalid_ref 'foo/*'
|
||||
invalid_ref '*/foo'
|
||||
invalid_ref 'foo/*/bar'
|
||||
invalid_ref '*'
|
||||
invalid_ref 'foo/*/*'
|
||||
invalid_ref '*/foo/*'
|
||||
invalid_ref '/foo'
|
||||
invalid_ref ''
|
|
@ -18,6 +18,7 @@ nix_tests = \
|
|||
nar-access.sh \
|
||||
structured-attrs.sh \
|
||||
fetchGit.sh \
|
||||
fetchGitRefs.sh \
|
||||
fetchGitSubmodules.sh \
|
||||
fetchMercurial.sh \
|
||||
signing.sh \
|
||||
|
|
Loading…
Reference in a new issue