Merge branch 'master' of github.com:NixOS/nix into trustless-remote-builder-simple

This commit is contained in:
John Ericson 2020-10-12 23:00:18 +00:00
commit bdc7720227
161 changed files with 2017 additions and 1251 deletions

View file

@ -12,8 +12,6 @@ jobs:
- uses: actions/checkout@v2
with:
fetch-depth: 0
- uses: cachix/install-nix-action@v10
with:
skip_adding_nixpkgs_channel: true
- uses: cachix/install-nix-action@v11
#- run: nix flake check
- run: nix-build -A checks.$(if [[ `uname` = Linux ]]; then echo x86_64-linux; else echo x86_64-darwin; fi)

1
.gitignore vendored
View file

@ -5,7 +5,6 @@ perl/Makefile.config
/aclocal.m4
/autom4te.cache
/precompiled-headers.h.gch
/precompiled-headers.h.pch
/config.*
/configure
/stamp-h1

View file

@ -26,7 +26,7 @@ OPTIMIZE = 1
ifeq ($(OPTIMIZE), 1)
GLOBAL_CXXFLAGS += -O3
else
GLOBAL_CXXFLAGS += -O0
GLOBAL_CXXFLAGS += -O0 -U_FORTIFY_SOURCE
endif
include mk/lib.mk

View file

@ -9,7 +9,7 @@ for more details.
## Installation
On Linux and macOS the easiest way to Install Nix is to run the following shell command
On Linux and macOS the easiest way to install Nix is to run the following shell command
(as a user other than root):
```console

View file

@ -52,5 +52,4 @@ in
command:
"Title: nix\n\n"
+ showCommand { command = "nix"; section = "#"; def = command; }
showCommand { command = "nix"; section = "#"; def = command; }

View file

@ -13,7 +13,12 @@ concatStrings (map
then "*empty*"
else if isBool option.value
then (if option.value then "`true`" else "`false`")
else "`" + toString option.value + "`") + "\n\n"
else
# n.b. a StringMap value type is specified as a string, but
# this shows the value type. The empty stringmap is "null" in
# JSON, but that converts to "{ }" here.
(if isAttrs option.value then "`\"\"`"
else "`" + toString option.value + "`")) + "\n\n"
+ (if option.aliases != []
then " **Deprecated alias:** " + (concatStringsSep ", " (map (s: "`${s}`") option.aliases)) + "\n\n"
else "")

View file

@ -18,13 +18,22 @@ dist-files += $(man-pages)
nix-eval = $(bindir)/nix eval --experimental-features nix-command -I nix/corepkgs=corepkgs --store dummy:// --impure --raw --expr
$(d)/%.1: $(d)/src/command-ref/%.md
$(trace-gen) lowdown -sT man $^ -o $@
@printf "Title: %s\n\n" "$$(basename $@ .1)" > $^.tmp
@cat $^ >> $^.tmp
$(trace-gen) lowdown -sT man $^.tmp -o $@
@rm $^.tmp
$(d)/%.8: $(d)/src/command-ref/%.md
$(trace-gen) lowdown -sT man $^ -o $@
@printf "Title: %s\n\n" "$$(basename $@ .8)" > $^.tmp
@cat $^ >> $^.tmp
$(trace-gen) lowdown -sT man $^.tmp -o $@
@rm $^.tmp
$(d)/nix.conf.5: $(d)/src/command-ref/conf-file.md
$(trace-gen) lowdown -sT man $^ -o $@
@printf "Title: %s\n\n" "$$(basename $@ .5)" > $^.tmp
@cat $^ >> $^.tmp
$(trace-gen) lowdown -sT man $^.tmp -o $@
@rm $^.tmp
$(d)/src/command-ref/nix.md: $(d)/nix.json $(d)/generate-manpage.nix $(bindir)/nix
$(trace-gen) $(nix-eval) 'import doc/manual/generate-manpage.nix (builtins.fromJSON (builtins.readFile $<))' > $@.tmp
@ -40,7 +49,7 @@ $(d)/nix.json: $(bindir)/nix
@mv $@.tmp $@
$(d)/conf-file.json: $(bindir)/nix
$(trace-gen) env -i NIX_CONF_DIR=/dummy HOME=/dummy $(bindir)/nix show-config --json --experimental-features nix-command > $@.tmp
$(trace-gen) env -i NIX_CONF_DIR=/dummy HOME=/dummy NIX_SSL_CERT_FILE=/dummy/no-ca-bundle.crt $(bindir)/nix show-config --json --experimental-features nix-command > $@.tmp
@mv $@.tmp $@
$(d)/src/expressions/builtins.md: $(d)/builtins.json $(d)/generate-builtins.nix $(d)/src/expressions/builtins-prefix.md $(bindir)/nix

View file

@ -1,5 +1,3 @@
Title: nix.conf
# Name
`nix.conf` - Nix configuration file

View file

@ -1,5 +1,3 @@
Title: nix-build
# Name
`nix-build` - build a Nix expression

View file

@ -1,5 +1,3 @@
Title: nix-channel
# Name
`nix-channel` - manage Nix channels

View file

@ -1,5 +1,3 @@
Title: nix-collect-garbage
# Name
`nix-collect-garbage` - delete unreachable store paths

View file

@ -1,5 +1,3 @@
Title: nix-copy-closure
# Name
`nix-copy-closure` - copy a closure to or from a remote machine via SSH

View file

@ -1,5 +1,3 @@
Title: nix-daemon
# Name
`nix-daemon` - Nix multi-user support daemon

View file

@ -1,5 +1,3 @@
Title: nix-env
# Name
`nix-env` - manipulate or query Nix user environments

View file

@ -1,5 +1,3 @@
Title: nix-hash
# Name
`nix-hash` - compute the cryptographic hash of a path

View file

@ -1,5 +1,3 @@
Title: nix-instantiate
# Name
`nix-instantiate` - instantiate store derivations from Nix expressions

View file

@ -1,5 +1,3 @@
Title: nix-prefetch-url
# Name
`nix-prefetch-url` - copy a file from a URL into the store and print its hash

View file

@ -1,5 +1,3 @@
Title: nix-shell
# Name
`nix-shell` - start an interactive shell based on a Nix expression

View file

@ -1,5 +1,3 @@
Title: nix-store
# Name
`nix-store` - manipulate or query the Nix store

View file

@ -39,17 +39,17 @@ To build Nix itself in this shell:
```console
[nix-shell]$ ./bootstrap.sh
[nix-shell]$ ./configure $configureFlags --prefix=$(pwd)/inst
[nix-shell]$ ./configure $configureFlags --prefix=$(pwd)/outputs/out
[nix-shell]$ make -j $NIX_BUILD_CORES
```
To install it in `$(pwd)/inst` and test it:
To install it in `$(pwd)/outputs` and test it:
```console
[nix-shell]$ make install
[nix-shell]$ make installcheck
[nix-shell]$ ./inst/bin/nix --version
nix (Nix) 2.4
[nix-shell]$ make installcheck -j $NIX_BUILD_CORES
[nix-shell]$ ./outputs/out/bin/nix --version
nix (Nix) 3.0
```
To run a functional test:
@ -58,6 +58,12 @@ To run a functional test:
make tests/test-name-should-auto-complete.sh.test
```
To run the unit-tests for C++ code:
```
make check
```
If you have a flakes-enabled Nix you can replace:
```console

View file

@ -58,6 +58,7 @@
configureFlags =
lib.optionals stdenv.isLinux [
"--with-sandbox-shell=${sh}/bin/busybox"
"LDFLAGS=-fuse-ld=gold"
];
buildDeps =

View file

@ -11,6 +11,6 @@ GLOBAL_CXXFLAGS += -Wno-deprecated-declarations
$(foreach i, config.h $(wildcard src/lib*/*.hh), \
$(eval $(call install-file-in, $(i), $(includedir)/nix, 0644)))
$(GCH) $(PCH): src/libutil/util.hh config.h
$(GCH): src/libutil/util.hh config.h
GCH_CXXFLAGS = -I src/libutil

View file

@ -4,13 +4,14 @@ function _complete_nix {
_get_comp_words_by_ref -n ':=&' words cword cur
local have_type
while IFS= read -r line; do
local completion=${line%% *}
if [[ -z $have_type ]]; then
have_type=1
if [[ $line = filenames ]]; then
if [[ $completion = filenames ]]; then
compopt -o filenames
fi
else
COMPREPLY+=("$line")
COMPREPLY+=("$completion")
fi
done < <(NIX_GET_COMPLETIONS=$cword "${words[@]}")
__ltrim_colon_completions "$cur"

21
misc/zsh/completion.zsh Normal file
View file

@ -0,0 +1,21 @@
function _nix() {
local ifs_bk="$IFS"
local input=("${(Q)words[@]}")
IFS=$'\n'
local res=($(NIX_GET_COMPLETIONS=$((CURRENT - 1)) "$input[@]"))
IFS="$ifs_bk"
local tpe="${${res[1]}%%> *}"
local -a suggestions
declare -a suggestions
for suggestion in ${res:1}; do
# FIXME: This doesn't work properly if the suggestion word contains a `:`
# itself
suggestions+="${suggestion/ /:}"
done
if [[ "$tpe" == filenames ]]; then
compadd -f
fi
_describe 'nix' suggestions
}
compdef _nix nix

View file

@ -10,33 +10,12 @@ $(GCH): precompiled-headers.h
@mkdir -p "$(dir $@)"
$(trace-gen) $(CXX) -x c++-header -o $@ $< $(GLOBAL_CXXFLAGS) $(GCH_CXXFLAGS)
PCH = $(buildprefix)precompiled-headers.h.pch
$(PCH): precompiled-headers.h
@rm -f $@
@mkdir -p "$(dir $@)"
$(trace-gen) $(CXX) -x c++-header -o $@ $< $(GLOBAL_CXXFLAGS) $(GCH_CXXFLAGS)
clean-files += $(GCH) $(PCH)
clean-files += $(GCH)
ifeq ($(PRECOMPILE_HEADERS), 1)
ifeq ($(findstring g++,$(CXX)), g++)
GLOBAL_CXXFLAGS_PCH += -include $(buildprefix)precompiled-headers.h -Winvalid-pch
GLOBAL_ORDER_AFTER += $(GCH)
else ifeq ($(findstring clang++,$(CXX)), clang++)
GLOBAL_CXXFLAGS_PCH += -include-pch $(PCH) -Winvalid-pch
GLOBAL_ORDER_AFTER += $(PCH)
else
$(error Don't know how to precompile headers on $(CXX))
endif
endif

View file

@ -1,3 +1,4 @@
#[allow(improper_ctypes_definitions)]
#[cfg(not(test))]
mod c;
mod error;

View file

@ -19,9 +19,9 @@ impl StorePath {
}
Self::new_from_base_name(
path.file_name()
.ok_or(Error::BadStorePath(path.into()))?
.ok_or_else(|| Error::BadStorePath(path.into()))?
.to_str()
.ok_or(Error::BadStorePath(path.into()))?,
.ok_or_else(|| Error::BadStorePath(path.into()))?,
)
}
@ -34,7 +34,7 @@ impl StorePath {
pub fn new_from_base_name(base_name: &str) -> Result<Self, Error> {
if base_name.len() < STORE_PATH_HASH_CHARS + 1
|| base_name.as_bytes()[STORE_PATH_HASH_CHARS] != '-' as u8
|| base_name.as_bytes()[STORE_PATH_HASH_CHARS] != b'-'
{
return Err(Error::BadStorePath(base_name.into()));
}
@ -65,7 +65,7 @@ impl StorePathHash {
Ok(Self(bytes))
}
pub fn hash<'a>(&'a self) -> &'a [u8; STORE_PATH_HASH_BYTES] {
pub fn hash(&self) -> &[u8; STORE_PATH_HASH_BYTES] {
&self.0
}
}
@ -98,7 +98,7 @@ pub struct StorePathName(String);
impl StorePathName {
pub fn new(s: &str) -> Result<Self, Error> {
if s.len() == 0 {
if s.is_empty() {
return Err(Error::StorePathNameEmpty);
}
@ -106,8 +106,7 @@ impl StorePathName {
return Err(Error::StorePathNameTooLong);
}
if s.starts_with('.')
|| !s.chars().all(|c| {
let is_good_path_name = s.chars().all(|c| {
c.is_ascii_alphabetic()
|| c.is_ascii_digit()
|| c == '+'
@ -116,15 +115,15 @@ impl StorePathName {
|| c == '_'
|| c == '?'
|| c == '='
})
{
});
if s.starts_with('.') || !is_good_path_name {
return Err(Error::BadStorePathName);
}
Ok(Self(s.to_string()))
}
pub fn name<'a>(&'a self) -> &'a str {
pub fn name(&self) -> &str {
&self.0
}
}

View file

@ -13,7 +13,7 @@ pub fn decoded_len(input_len: usize) -> usize {
input_len * 5 / 8
}
static BASE32_CHARS: &'static [u8; 32] = &b"0123456789abcdfghijklmnpqrsvwxyz";
static BASE32_CHARS: &[u8; 32] = &b"0123456789abcdfghijklmnpqrsvwxyz";
lazy_static! {
static ref BASE32_CHARS_REVERSE: Box<[u8; 256]> = {

View file

@ -2,6 +2,8 @@
set -e
umask 0022
dest="/nix"
self="$(dirname "$0")"
nix="@nix@"

View file

@ -10,6 +10,8 @@ oops() {
exit 1
}
umask 0022
tmpDir="$(mktemp -d -t nix-binary-tarball-unpack.XXXXXXXXXX || \
oops "Can't create temporary directory for downloading the Nix binary tarball")"
cleanup() {

View file

@ -44,7 +44,7 @@ static bool allSupportedLocally(Store & store, const std::set<std::string>& requ
return true;
}
static int _main(int argc, char * * argv)
static int main_build_remote(int argc, char * * argv)
{
{
logger = makeJSONLogger(*logger);
@ -305,4 +305,4 @@ connected:
}
}
static RegisterLegacyCommand s1("build-remote", _main);
static RegisterLegacyCommand r_build_remote("build-remote", main_build_remote);

View file

@ -11,7 +11,7 @@ namespace nix::eval_cache {
MakeError(CachedEvalError, EvalError);
class AttrDb;
struct AttrDb;
class AttrCursor;
class EvalCache : public std::enable_shared_from_this<EvalCache>

View file

@ -2081,7 +2081,7 @@ Strings EvalSettings::getDefaultNixPath()
EvalSettings evalSettings;
static GlobalConfig::Register r1(&evalSettings);
static GlobalConfig::Register rEvalSettings(&evalSettings);
}

View file

@ -12,7 +12,7 @@ using namespace flake;
namespace flake {
typedef std::pair<Tree, FlakeRef> FetchedFlake;
typedef std::pair<fetchers::Tree, FlakeRef> FetchedFlake;
typedef std::vector<std::pair<FlakeRef, FetchedFlake>> FlakeCache;
static std::optional<FetchedFlake> lookupInFlakeCache(
@ -48,17 +48,17 @@ static std::tuple<fetchers::Tree, FlakeRef, FlakeRef> fetchOrSubstituteTree(
resolvedRef = originalRef.resolve(state.store);
auto fetchedResolved = lookupInFlakeCache(flakeCache, originalRef);
if (!fetchedResolved) fetchedResolved.emplace(resolvedRef.fetchTree(state.store));
flakeCache.push_back({resolvedRef, fetchedResolved.value()});
fetched.emplace(fetchedResolved.value());
flakeCache.push_back({resolvedRef, *fetchedResolved});
fetched.emplace(*fetchedResolved);
}
else {
throw Error("'%s' is an indirect flake reference, but registry lookups are not allowed", originalRef);
}
}
flakeCache.push_back({originalRef, fetched.value()});
flakeCache.push_back({originalRef, *fetched});
}
auto [tree, lockedRef] = fetched.value();
auto [tree, lockedRef] = *fetched;
debug("got tree '%s' from '%s'",
state.store->printStorePath(tree.storePath), lockedRef);
@ -215,10 +215,9 @@ static Flake getFlake(
if (auto outputs = vInfo.attrs->get(sOutputs)) {
expectType(state, tLambda, *outputs->value, *outputs->pos);
flake.vOutputs = allocRootValue(outputs->value);
if ((*flake.vOutputs)->lambda.fun->matchAttrs) {
for (auto & formal : (*flake.vOutputs)->lambda.fun->formals->formals) {
if (outputs->value->lambda.fun->matchAttrs) {
for (auto & formal : outputs->value->lambda.fun->formals->formals) {
if (formal.name != state.sSelf)
flake.inputs.emplace(formal.name, FlakeInput {
.ref = parseFlakeRef(formal.name)
@ -248,7 +247,7 @@ Flake getFlake(EvalState & state, const FlakeRef & originalRef, bool allowLookup
}
/* Compute an in-memory lock file for the specified top-level flake,
and optionally write it to file, it the flake is writable. */
and optionally write it to file, if the flake is writable. */
LockedFlake lockFlake(
EvalState & state,
const FlakeRef & topRef,
@ -367,7 +366,7 @@ LockedFlake lockFlake(
/* If we have an --update-input flag for an input
of this input, then we must fetch the flake to
to update it. */
update it. */
auto lb = lockFlags.inputUpdates.lower_bound(inputPath);
auto hasChildUpdate =

View file

@ -34,7 +34,6 @@ struct Flake
std::optional<std::string> description;
std::shared_ptr<const fetchers::Tree> sourceInfo;
FlakeInputs inputs;
RootValue vOutputs;
~Flake();
};

View file

@ -16,10 +16,10 @@ const static std::string subDirRegex = subDirElemRegex + "(?:/" + subDirElemRege
std::string FlakeRef::to_string() const
{
auto url = input.toURL();
std::map<std::string, std::string> extraQuery;
if (subdir != "")
url.query.insert_or_assign("dir", subdir);
return url.to_string();
extraQuery.insert_or_assign("dir", subdir);
return input.toURLString(extraQuery);
}
fetchers::Attrs FlakeRef::toAttrs() const
@ -157,7 +157,8 @@ std::pair<FlakeRef, std::string> parseFlakeRefWithFragment(
} else {
if (!hasPrefix(path, "/"))
throw BadURL("flake reference '%s' is not an absolute path", url);
path = canonPath(path);
auto query = decodeQuery(match[2]);
path = canonPath(path + "/" + get(query, "dir").value_or(""));
}
fetchers::Attrs attrs;

View file

@ -13,12 +13,12 @@ FlakeRef getFlakeRef(
{
auto i = json.find(attr);
if (i != json.end()) {
auto attrs = jsonToAttrs(*i);
auto attrs = fetchers::jsonToAttrs(*i);
// FIXME: remove when we drop support for version 5.
if (info) {
auto j = json.find(info);
if (j != json.end()) {
for (auto k : jsonToAttrs(*j))
for (auto k : fetchers::jsonToAttrs(*j))
attrs.insert_or_assign(k.first, k.second);
}
}

View file

@ -11,8 +11,6 @@ class StorePath;
namespace nix::flake {
using namespace fetchers;
typedef std::vector<FlakeId> InputPath;
struct LockedNode;

View file

@ -115,6 +115,14 @@ public:
{
return handle_value<void(Value&, const char*)>(mkString, val.c_str());
}
#if NLOHMANN_JSON_VERSION_MAJOR >= 3 && NLOHMANN_JSON_VERSION_MINOR >= 8
bool binary(binary_t&)
{
// This function ought to be unreachable
assert(false);
return true;
}
#endif
bool start_object(std::size_t len)
{

View file

@ -614,8 +614,7 @@ Path resolveExprPath(Path path)
// Basic cycle/depth limit to avoid infinite loops.
if (++followCount >= maxFollow)
throw Error("too many symbolic links encountered while traversing the path '%s'", path);
if (lstat(path.c_str(), &st))
throw SysError("getting status of '%s'", path);
st = lstat(path);
if (!S_ISLNK(st.st_mode)) break;
path = absPath(readLink(path), dirOf(path));
}

View file

@ -2236,6 +2236,10 @@ static RegisterPrimOp primop_catAttrs({
static void prim_functionArgs(EvalState & state, const Pos & pos, Value * * args, Value & v)
{
state.forceValue(*args[0], pos);
if (args[0]->type == tPrimOpApp || args[0]->type == tPrimOp) {
state.mkAttrs(v, 0);
return;
}
if (args[0]->type != tLambda)
throw TypeError({
.hint = hintfmt("'functionArgs' requires a function"),

View file

@ -1,3 +1,5 @@
#pragma once
#include "eval.hh"
#include <tuple>

View file

@ -11,7 +11,7 @@ static void prim_unsafeDiscardStringContext(EvalState & state, const Pos & pos,
mkString(v, s, PathSet());
}
static RegisterPrimOp r1("__unsafeDiscardStringContext", 1, prim_unsafeDiscardStringContext);
static RegisterPrimOp primop_unsafeDiscardStringContext("__unsafeDiscardStringContext", 1, prim_unsafeDiscardStringContext);
static void prim_hasContext(EvalState & state, const Pos & pos, Value * * args, Value & v)
@ -21,7 +21,7 @@ static void prim_hasContext(EvalState & state, const Pos & pos, Value * * args,
mkBool(v, !context.empty());
}
static RegisterPrimOp r2("__hasContext", 1, prim_hasContext);
static RegisterPrimOp primop_hasContext("__hasContext", 1, prim_hasContext);
/* Sometimes we want to pass a derivation path (i.e. pkg.drvPath) to a
@ -42,7 +42,7 @@ static void prim_unsafeDiscardOutputDependency(EvalState & state, const Pos & po
mkString(v, s, context2);
}
static RegisterPrimOp r3("__unsafeDiscardOutputDependency", 1, prim_unsafeDiscardOutputDependency);
static RegisterPrimOp primop_unsafeDiscardOutputDependency("__unsafeDiscardOutputDependency", 1, prim_unsafeDiscardOutputDependency);
/* Extract the context of a string as a structured Nix value.
@ -127,7 +127,7 @@ static void prim_getContext(EvalState & state, const Pos & pos, Value * * args,
v.attrs->sort();
}
static RegisterPrimOp r4("__getContext", 1, prim_getContext);
static RegisterPrimOp primop_getContext("__getContext", 1, prim_getContext);
/* Append the given context to a given string.
@ -191,6 +191,6 @@ static void prim_appendContext(EvalState & state, const Pos & pos, Value * * arg
mkString(v, orig, context);
}
static RegisterPrimOp r5("__appendContext", 2, prim_appendContext);
static RegisterPrimOp primop_appendContext("__appendContext", 2, prim_appendContext);
}

View file

@ -87,6 +87,6 @@ static void prim_fetchMercurial(EvalState & state, const Pos & pos, Value * * ar
state.allowedPaths->insert(tree.actualPath);
}
static RegisterPrimOp r("fetchMercurial", 1, prim_fetchMercurial);
static RegisterPrimOp r_fetchMercurial("fetchMercurial", 1, prim_fetchMercurial);
}

View file

@ -152,7 +152,7 @@ static void prim_fetchTree(EvalState & state, const Pos & pos, Value * * args, V
fetchTree(state, pos, args, v, std::nullopt);
}
static RegisterPrimOp r("fetchTree", 1, prim_fetchTree);
static RegisterPrimOp primop_fetchTree("fetchTree", 1, prim_fetchTree);
static void fetch(EvalState & state, const Pos & pos, Value * * args, Value & v,
const string & who, bool unpack, std::string name)

View file

@ -88,6 +88,6 @@ static void prim_fromTOML(EvalState & state, const Pos & pos, Value * * args, Va
}
}
static RegisterPrimOp r("fromTOML", 1, prim_fromTOML);
static RegisterPrimOp primop_fromTOML("fromTOML", 1, prim_fromTOML);
}

View file

@ -69,6 +69,14 @@ ParsedURL Input::toURL() const
return scheme->toURL(*this);
}
std::string Input::toURLString(const std::map<std::string, std::string> & extraQuery) const
{
auto url = toURL();
for (auto & attr : extraQuery)
url.query.insert(attr);
return url.to_string();
}
std::string Input::to_string() const
{
return toURL().to_string();

View file

@ -39,6 +39,8 @@ public:
ParsedURL toURL() const;
std::string toURLString(const std::map<std::string, std::string> & extraQuery = {}) const;
std::string to_string() const;
Attrs toAttrs() const;
@ -73,7 +75,7 @@ public:
StorePath computeStorePath(Store & store) const;
// Convience functions for common attributes.
// Convenience functions for common attributes.
std::string getType() const;
std::optional<Hash> getNarHash() const;
std::optional<std::string> getRef() const;
@ -84,6 +86,9 @@ public:
struct InputScheme
{
virtual ~InputScheme()
{ }
virtual std::optional<Input> inputFromURL(const ParsedURL & url) = 0;
virtual std::optional<Input> inputFromAttrs(const Attrs & attrs) = 0;
@ -119,12 +124,14 @@ DownloadFileResult downloadFile(
ref<Store> store,
const std::string & url,
const std::string & name,
bool immutable);
bool immutable,
const Headers & headers = {});
std::pair<Tree, time_t> downloadTarball(
ref<Store> store,
const std::string & url,
const std::string & name,
bool immutable);
bool immutable,
const Headers & headers = {});
}

View file

@ -452,6 +452,6 @@ struct GitInputScheme : InputScheme
}
};
static auto r1 = OnStartup([] { registerInputScheme(std::make_unique<GitInputScheme>()); });
static auto rGitInputScheme = OnStartup([] { registerInputScheme(std::make_unique<GitInputScheme>()); });
}

View file

@ -3,12 +3,20 @@
#include "fetchers.hh"
#include "globals.hh"
#include "store-api.hh"
#include "types.hh"
#include "url-parts.hh"
#include <optional>
#include <nlohmann/json.hpp>
namespace nix::fetchers {
struct DownloadUrl
{
std::string url;
Headers headers;
};
// A github or gitlab host
const static std::string hostRegexS = "[a-zA-Z0-9.]*"; // FIXME: check
std::regex hostRegex(hostRegexS, std::regex::ECMAScript);
@ -17,6 +25,8 @@ struct GitArchiveInputScheme : InputScheme
{
virtual std::string type() = 0;
virtual std::optional<std::pair<std::string, std::string>> accessHeaderFromToken(const std::string & token) const = 0;
std::optional<Input> inputFromURL(const ParsedURL & url) override
{
if (url.scheme != type()) return {};
@ -130,9 +140,31 @@ struct GitArchiveInputScheme : InputScheme
return input;
}
std::optional<std::string> getAccessToken(const std::string & host) const
{
auto tokens = settings.accessTokens.get();
if (auto token = get(tokens, host))
return *token;
return {};
}
Headers makeHeadersWithAuthTokens(const std::string & host) const
{
Headers headers;
auto accessToken = getAccessToken(host);
if (accessToken) {
auto hdr = accessHeaderFromToken(*accessToken);
if (hdr)
headers.push_back(*hdr);
else
warn("Unrecognized access token for host '%s'", host);
}
return headers;
}
virtual Hash getRevFromRef(nix::ref<Store> store, const Input & input) const = 0;
virtual std::string getDownloadUrl(const Input & input) const = 0;
virtual DownloadUrl getDownloadUrl(const Input & input) const = 0;
std::pair<Tree, Input> fetch(ref<Store> store, const Input & _input) override
{
@ -161,7 +193,7 @@ struct GitArchiveInputScheme : InputScheme
auto url = getDownloadUrl(input);
auto [tree, lastModified] = downloadTarball(store, url, "source", true);
auto [tree, lastModified] = downloadTarball(store, url.url, "source", true, url.headers);
input.attrs.insert_or_assign("lastModified", lastModified);
@ -183,49 +215,52 @@ struct GitHubInputScheme : GitArchiveInputScheme
{
std::string type() override { return "github"; }
void addAccessToken(std::string & url) const
std::optional<std::pair<std::string, std::string>> accessHeaderFromToken(const std::string & token) const override
{
std::string accessToken = settings.githubAccessToken.get();
if (accessToken != "")
url += "?access_token=" + accessToken;
// Github supports PAT/OAuth2 tokens and HTTP Basic
// Authentication. The former simply specifies the token, the
// latter can use the token as the password. Only the first
// is used here. See
// https://developer.github.com/v3/#authentication and
// https://docs.github.com/en/developers/apps/authorizing-oath-apps
return std::pair<std::string, std::string>("Authorization", fmt("token %s", token));
}
Hash getRevFromRef(nix::ref<Store> store, const Input & input) const override
{
auto host_url = maybeGetStrAttr(input.attrs, "url").value_or("github.com");
auto host = maybeGetStrAttr(input.attrs, "host").value_or("github.com");
auto url = fmt("https://api.%s/repos/%s/%s/commits/%s", // FIXME: check
host_url, getStrAttr(input.attrs, "owner"), getStrAttr(input.attrs, "repo"), *input.getRef());
host, getStrAttr(input.attrs, "owner"), getStrAttr(input.attrs, "repo"), *input.getRef());
addAccessToken(url);
Headers headers = makeHeadersWithAuthTokens(host);
auto json = nlohmann::json::parse(
readFile(
store->toRealPath(
downloadFile(store, url, "source", false).storePath)));
downloadFile(store, url, "source", false, headers).storePath)));
auto rev = Hash::parseAny(std::string { json["sha"] }, htSHA1);
debug("HEAD revision for '%s' is %s", url, rev.gitRev());
return rev;
}
std::string getDownloadUrl(const Input & input) const override
DownloadUrl getDownloadUrl(const Input & input) const override
{
// FIXME: use regular /archive URLs instead? api.github.com
// might have stricter rate limits.
auto host_url = maybeGetStrAttr(input.attrs, "host").value_or("github.com");
auto host = maybeGetStrAttr(input.attrs, "host").value_or("github.com");
auto url = fmt("https://api.%s/repos/%s/%s/tarball/%s", // FIXME: check if this is correct for self hosted instances
host_url, getStrAttr(input.attrs, "owner"), getStrAttr(input.attrs, "repo"),
host, getStrAttr(input.attrs, "owner"), getStrAttr(input.attrs, "repo"),
input.getRev()->to_string(Base16, false));
addAccessToken(url);
return url;
Headers headers = makeHeadersWithAuthTokens(host);
return DownloadUrl { url, headers };
}
void clone(const Input & input, const Path & destDir) override
{
auto host_url = maybeGetStrAttr(input.attrs, "url").value_or("github.com");
auto host = maybeGetStrAttr(input.attrs, "host").value_or("github.com");
Input::fromURL(fmt("git+ssh://git@%s/%s/%s.git",
host_url, getStrAttr(input.attrs, "owner"), getStrAttr(input.attrs, "repo")))
host, getStrAttr(input.attrs, "owner"), getStrAttr(input.attrs, "repo")))
.applyOverrides(input.getRef().value_or("HEAD"), input.getRev())
.clone(destDir);
}
@ -235,48 +270,71 @@ struct GitLabInputScheme : GitArchiveInputScheme
{
std::string type() override { return "gitlab"; }
std::optional<std::pair<std::string, std::string>> accessHeaderFromToken(const std::string & token) const override
{
// Gitlab supports 4 kinds of authorization, two of which are
// relevant here: OAuth2 and PAT (Private Access Token). The
// user can indicate which token is used by specifying the
// token as <TYPE>:<VALUE>, where type is "OAuth2" or "PAT".
// If the <TYPE> is unrecognized, this will fall back to
// treating this simply has <HDRNAME>:<HDRVAL>. See
// https://docs.gitlab.com/12.10/ee/api/README.html#authentication
auto fldsplit = token.find_first_of(':');
// n.b. C++20 would allow: if (token.starts_with("OAuth2:")) ...
if ("OAuth2" == token.substr(0, fldsplit))
return std::make_pair("Authorization", fmt("Bearer %s", token.substr(fldsplit+1)));
if ("PAT" == token.substr(0, fldsplit))
return std::make_pair("Private-token", token.substr(fldsplit+1));
warn("Unrecognized GitLab token type %s", token.substr(0, fldsplit));
return std::nullopt;
}
Hash getRevFromRef(nix::ref<Store> store, const Input & input) const override
{
auto host_url = maybeGetStrAttr(input.attrs, "host").value_or("gitlab.com");
auto host = maybeGetStrAttr(input.attrs, "host").value_or("gitlab.com");
// See rate limiting note below
auto url = fmt("https://%s/api/v4/projects/%s%%2F%s/repository/commits?ref_name=%s",
host_url, getStrAttr(input.attrs, "owner"), getStrAttr(input.attrs, "repo"), *input.getRef());
host, getStrAttr(input.attrs, "owner"), getStrAttr(input.attrs, "repo"), *input.getRef());
Headers headers = makeHeadersWithAuthTokens(host);
auto json = nlohmann::json::parse(
readFile(
store->toRealPath(
downloadFile(store, url, "source", false).storePath)));
downloadFile(store, url, "source", false, headers).storePath)));
auto rev = Hash::parseAny(std::string(json[0]["id"]), htSHA1);
debug("HEAD revision for '%s' is %s", url, rev.gitRev());
return rev;
}
std::string getDownloadUrl(const Input & input) const override
DownloadUrl getDownloadUrl(const Input & input) const override
{
// FIXME: This endpoint has a rate limit threshold of 5 requests per minute
auto host_url = maybeGetStrAttr(input.attrs, "url").value_or("gitlab.com");
// This endpoint has a rate limit threshold that may be
// server-specific and vary based whether the user is
// authenticated via an accessToken or not, but the usual rate
// is 10 reqs/sec/ip-addr. See
// https://docs.gitlab.com/ee/user/gitlab_com/index.html#gitlabcom-specific-rate-limits
auto host = maybeGetStrAttr(input.attrs, "host").value_or("gitlab.com");
auto url = fmt("https://%s/api/v4/projects/%s%%2F%s/repository/archive.tar.gz?sha=%s",
host_url, getStrAttr(input.attrs, "owner"), getStrAttr(input.attrs, "repo"),
host, getStrAttr(input.attrs, "owner"), getStrAttr(input.attrs, "repo"),
input.getRev()->to_string(Base16, false));
/* # FIXME: add privat token auth (`curl --header "PRIVATE-TOKEN: <your_access_token>"`)
std::string accessToken = settings.githubAccessToken.get();
if (accessToken != "")
url += "?access_token=" + accessToken;*/
return url;
Headers headers = makeHeadersWithAuthTokens(host);
return DownloadUrl { url, headers };
}
void clone(const Input & input, const Path & destDir) override
{
auto host_url = maybeGetStrAttr(input.attrs, "url").value_or("gitlab.com");
auto host = maybeGetStrAttr(input.attrs, "host").value_or("gitlab.com");
// FIXME: get username somewhere
Input::fromURL(fmt("git+ssh://git@%s/%s/%s.git",
host_url, getStrAttr(input.attrs, "owner"), getStrAttr(input.attrs, "repo")))
host, getStrAttr(input.attrs, "owner"), getStrAttr(input.attrs, "repo")))
.applyOverrides(input.getRef().value_or("HEAD"), input.getRev())
.clone(destDir);
}
};
static auto r1 = OnStartup([] { registerInputScheme(std::make_unique<GitHubInputScheme>()); });
static auto r2 = OnStartup([] { registerInputScheme(std::make_unique<GitLabInputScheme>()); });
static auto rGitHubInputScheme = OnStartup([] { registerInputScheme(std::make_unique<GitHubInputScheme>()); });
static auto rGitLabInputScheme = OnStartup([] { registerInputScheme(std::make_unique<GitLabInputScheme>()); });
}

View file

@ -100,6 +100,6 @@ struct IndirectInputScheme : InputScheme
}
};
static auto r1 = OnStartup([] { registerInputScheme(std::make_unique<IndirectInputScheme>()); });
static auto rIndirectInputScheme = OnStartup([] { registerInputScheme(std::make_unique<IndirectInputScheme>()); });
}

View file

@ -293,6 +293,6 @@ struct MercurialInputScheme : InputScheme
}
};
static auto r1 = OnStartup([] { registerInputScheme(std::make_unique<MercurialInputScheme>()); });
static auto rMercurialInputScheme = OnStartup([] { registerInputScheme(std::make_unique<MercurialInputScheme>()); });
}

View file

@ -102,6 +102,6 @@ struct PathInputScheme : InputScheme
}
};
static auto r1 = OnStartup([] { registerInputScheme(std::make_unique<PathInputScheme>()); });
static auto rPathInputScheme = OnStartup([] { registerInputScheme(std::make_unique<PathInputScheme>()); });
}

View file

@ -3,6 +3,7 @@
#include "util.hh"
#include "globals.hh"
#include "store-api.hh"
#include "local-fs-store.hh"
#include <nlohmann/json.hpp>

View file

@ -5,6 +5,7 @@
#include "store-api.hh"
#include "archive.hh"
#include "tarfile.hh"
#include "types.hh"
namespace nix::fetchers {
@ -12,7 +13,8 @@ DownloadFileResult downloadFile(
ref<Store> store,
const std::string & url,
const std::string & name,
bool immutable)
bool immutable,
const Headers & headers)
{
// FIXME: check store
@ -37,6 +39,7 @@ DownloadFileResult downloadFile(
return useCached();
FileTransferRequest request(url);
request.headers = headers;
if (cached)
request.expectedETag = getStrAttr(cached->infoAttrs, "etag");
FileTransferResult res;
@ -111,7 +114,8 @@ std::pair<Tree, time_t> downloadTarball(
ref<Store> store,
const std::string & url,
const std::string & name,
bool immutable)
bool immutable,
const Headers & headers)
{
Attrs inAttrs({
{"type", "tarball"},
@ -127,7 +131,7 @@ std::pair<Tree, time_t> downloadTarball(
getIntAttr(cached->infoAttrs, "lastModified")
};
auto res = downloadFile(store, url, name, immutable);
auto res = downloadFile(store, url, name, immutable, headers);
std::optional<StorePath> unpackedStorePath;
time_t lastModified;
@ -227,6 +231,6 @@ struct TarballInputScheme : InputScheme
}
};
static auto r1 = OnStartup([] { registerInputScheme(std::make_unique<TarballInputScheme>()); });
static auto rTarballInputScheme = OnStartup([] { registerInputScheme(std::make_unique<TarballInputScheme>()); });
}

View file

@ -44,7 +44,7 @@ MixCommonArgs::MixCommonArgs(const string & programName)
globalConfig.getSettings(settings);
for (auto & s : settings)
if (hasPrefix(s.first, prefix))
completions->insert(s.first);
completions->add(s.first, s.second.description);
}
}
});

View file

@ -256,7 +256,7 @@ public:
}
else if (type == resBuildLogLine || type == resPostBuildLogLine) {
auto lastLine = trim(getS(fields, 0));
auto lastLine = chomp(getS(fields, 0));
if (!lastLine.empty()) {
auto i = state->its.find(act);
assert(i != state->its.end());

View file

@ -386,18 +386,12 @@ RunPager::~RunPager()
}
string showBytes(uint64_t bytes)
{
return (format("%.2f MiB") % (bytes / (1024.0 * 1024.0))).str();
}
PrintFreed::~PrintFreed()
{
if (show)
std::cout << format("%1% store paths deleted, %2% freed\n")
% results.paths.size()
% showBytes(results.bytesFreed);
std::cout << fmt("%d store paths deleted, %s freed\n",
results.paths.size(),
showBytes(results.bytesFreed));
}
Exit::~Exit() { }

View file

@ -142,17 +142,10 @@ struct FileSource : FdSource
}
};
void BinaryCacheStore::addToStore(const ValidPathInfo & info, Source & narSource,
RepairFlag repair, CheckSigsFlag checkSigs)
ref<const ValidPathInfo> BinaryCacheStore::addToStoreCommon(
Source & narSource, RepairFlag repair, CheckSigsFlag checkSigs,
std::function<ValidPathInfo(HashResult)> mkInfo)
{
assert(info.narSize);
if (!repair && isValidPath(info.path)) {
// FIXME: copyNAR -> null sink
narSource.drain();
return;
}
auto [fdTemp, fnTemp] = createTempFile();
AutoDelete autoDelete(fnTemp);
@ -162,13 +155,15 @@ void BinaryCacheStore::addToStore(const ValidPathInfo & info, Source & narSource
/* Read the NAR simultaneously into a CompressionSink+FileSink (to
write the compressed NAR to disk), into a HashSink (to get the
NAR hash), and into a NarAccessor (to get the NAR listing). */
HashSink fileHashSink(htSHA256);
HashSink fileHashSink { htSHA256 };
std::shared_ptr<FSAccessor> narAccessor;
HashSink narHashSink { htSHA256 };
{
FdSink fileSink(fdTemp.get());
TeeSink teeSink(fileSink, fileHashSink);
auto compressionSink = makeCompressionSink(compression, teeSink);
TeeSource teeSource(narSource, *compressionSink);
TeeSink teeSinkCompressed { fileSink, fileHashSink };
auto compressionSink = makeCompressionSink(compression, teeSinkCompressed);
TeeSink teeSinkUncompressed { *compressionSink, narHashSink };
TeeSource teeSource { narSource, teeSinkUncompressed };
narAccessor = makeNarAccessor(teeSource);
compressionSink->finish();
fileSink.flush();
@ -176,9 +171,8 @@ void BinaryCacheStore::addToStore(const ValidPathInfo & info, Source & narSource
auto now2 = std::chrono::steady_clock::now();
auto info = mkInfo(narHashSink.finish());
auto narInfo = make_ref<NarInfo>(info);
narInfo->narSize = info.narSize;
narInfo->narHash = info.narHash;
narInfo->compression = compression;
auto [fileHash, fileSize] = fileHashSink.finish();
narInfo->fileHash = fileHash;
@ -300,6 +294,41 @@ void BinaryCacheStore::addToStore(const ValidPathInfo & info, Source & narSource
writeNarInfo(narInfo);
stats.narInfoWrite++;
return narInfo;
}
void BinaryCacheStore::addToStore(const ValidPathInfo & info, Source & narSource,
RepairFlag repair, CheckSigsFlag checkSigs)
{
if (!repair && isValidPath(info.path)) {
// FIXME: copyNAR -> null sink
narSource.drain();
return;
}
addToStoreCommon(narSource, repair, checkSigs, {[&](HashResult nar) {
/* FIXME reinstate these, once we can correctly do hash modulo sink as
needed. We need to throw here in case we uploaded a corrupted store path. */
// assert(info.narHash == nar.first);
// assert(info.narSize == nar.second);
return info;
}});
}
StorePath BinaryCacheStore::addToStoreFromDump(Source & dump, const string & name,
FileIngestionMethod method, HashType hashAlgo, RepairFlag repair)
{
if (method != FileIngestionMethod::Recursive || hashAlgo != htSHA256)
unsupported("addToStoreFromDump");
return addToStoreCommon(dump, repair, CheckSigs, [&](HashResult nar) {
ValidPathInfo info {
makeFixedOutputPath(method, nar.first, name),
nar.first,
};
info.narSize = nar.second;
return info;
})->path;
}
bool BinaryCacheStore::isValidPathUncached(const StorePath & storePath)
@ -367,50 +396,52 @@ void BinaryCacheStore::queryPathInfoUncached(const StorePath & storePath,
StorePath BinaryCacheStore::addToStore(const string & name, const Path & srcPath,
FileIngestionMethod method, HashType hashAlgo, PathFilter & filter, RepairFlag repair)
{
// FIXME: some cut&paste from LocalStore::addToStore().
/* FIXME: Make BinaryCacheStore::addToStoreCommon support
non-recursive+sha256 so we can just use the default
implementation of this method in terms of addToStoreFromDump. */
/* Read the whole path into memory. This is not a very scalable
method for very large paths, but `copyPath' is mainly used for
small files. */
StringSink sink;
std::optional<Hash> h;
HashSink sink { hashAlgo };
if (method == FileIngestionMethod::Recursive) {
dumpPath(srcPath, sink, filter);
h = hashString(hashAlgo, *sink.s);
} else {
auto s = readFile(srcPath);
dumpString(s, sink);
h = hashString(hashAlgo, s);
readFile(srcPath, sink);
}
auto h = sink.finish().first;
auto source = sinkToSource([&](Sink & sink) {
dumpPath(srcPath, sink, filter);
});
return addToStoreCommon(*source, repair, CheckSigs, [&](HashResult nar) {
ValidPathInfo info {
makeFixedOutputPath(method, *h, name),
Hash::dummy, // Will be fixed in addToStore, which recomputes nar hash
makeFixedOutputPath(method, h, name),
nar.first,
};
auto source = StringSource { *sink.s };
addToStore(info, source, repair, CheckSigs);
return std::move(info.path);
info.narSize = nar.second;
info.ca = FixedOutputHash {
.method = method,
.hash = h,
};
return info;
})->path;
}
StorePath BinaryCacheStore::addTextToStore(const string & name, const string & s,
const StorePathSet & references, RepairFlag repair)
{
ValidPathInfo info {
computeStorePathForText(name, s, references),
Hash::dummy, // Will be fixed in addToStore, which recomputes nar hash
};
auto textHash = hashString(htSHA256, s);
auto path = makeTextPath(name, textHash, references);
if (!repair && isValidPath(path))
return path;
auto source = StringSource { s };
return addToStoreCommon(source, repair, CheckSigs, [&](HashResult nar) {
ValidPathInfo info { path, nar.first };
info.narSize = nar.second;
info.ca = TextHash { textHash };
info.references = references;
if (repair || !isValidPath(info.path)) {
StringSink sink;
dumpString(s, sink);
auto source = StringSource { *sink.s };
addToStore(info, source, repair, CheckSigs);
}
return std::move(info.path);
return info;
})->path;
}
ref<FSAccessor> BinaryCacheStore::getFSAccessor()

View file

@ -72,6 +72,10 @@ private:
void writeNarInfo(ref<NarInfo> narInfo);
ref<const ValidPathInfo> addToStoreCommon(
Source & narSource, RepairFlag repair, CheckSigsFlag checkSigs,
std::function<ValidPathInfo(HashResult)> mkInfo);
public:
bool isValidPathUncached(const StorePath & path) override;
@ -85,6 +89,9 @@ public:
void addToStore(const ValidPathInfo & info, Source & narSource,
RepairFlag repair, CheckSigsFlag checkSigs) override;
StorePath addToStoreFromDump(Source & dump, const string & name,
FileIngestionMethod method, HashType hashAlgo, RepairFlag repair) override;
StorePath addToStore(const string & name, const Path & srcPath,
FileIngestionMethod method, HashType hashAlgo,
PathFilter & filter, RepairFlag repair) override;

View file

@ -296,9 +296,21 @@ public:
~Worker();
/* Make a goal (with caching). */
GoalPtr makeDerivationGoal(const StorePath & drvPath, const StringSet & wantedOutputs, BuildMode buildMode = bmNormal);
std::shared_ptr<DerivationGoal> makeBasicDerivationGoal(const StorePath & drvPath,
const BasicDerivation & drv, BuildMode buildMode = bmNormal);
/* derivation goal */
private:
std::shared_ptr<DerivationGoal> makeDerivationGoalCommon(
const StorePath & drvPath, const StringSet & wantedOutputs,
std::function<std::shared_ptr<DerivationGoal>()> mkDrvGoal);
public:
std::shared_ptr<DerivationGoal> makeDerivationGoal(
const StorePath & drvPath,
const StringSet & wantedOutputs, BuildMode buildMode = bmNormal);
std::shared_ptr<DerivationGoal> makeBasicDerivationGoal(
const StorePath & drvPath, const BasicDerivation & drv,
const StringSet & wantedOutputs, BuildMode buildMode = bmNormal);
/* substitution goal */
GoalPtr makeSubstitutionGoal(const StorePath & storePath, RepairFlag repair = NoRepair, std::optional<ContentAddress> ca = std::nullopt);
/* Remove a dead goal. */
@ -819,6 +831,10 @@ private:
paths to the sandbox as a result of recursive Nix calls. */
AutoCloseFD sandboxMountNamespace;
/* On Linux, whether we're doing the build in its own user
namespace. */
bool usingUserNamespace = true;
/* The build hook. */
std::unique_ptr<HookInstance> hook;
@ -908,8 +924,8 @@ private:
result. */
std::map<Path, ValidPathInfo> prevInfos;
const uid_t sandboxUid = 1000;
const gid_t sandboxGid = 100;
uid_t sandboxUid() { return usingUserNamespace ? 1000 : buildUser->getUID(); }
gid_t sandboxGid() { return usingUserNamespace ? 100 : buildUser->getGID(); }
const static Path homeDir;
@ -949,10 +965,12 @@ private:
friend struct RestrictedStore;
public:
DerivationGoal(const StorePath & drvPath, const StringSet & wantedOutputs,
Worker & worker, BuildMode buildMode = bmNormal);
DerivationGoal(const StorePath & drvPath,
const StringSet & wantedOutputs, Worker & worker,
BuildMode buildMode = bmNormal);
DerivationGoal(const StorePath & drvPath, const BasicDerivation & drv,
Worker & worker, BuildMode buildMode = bmNormal);
const StringSet & wantedOutputs, Worker & worker,
BuildMode buildMode = bmNormal);
~DerivationGoal();
/* Whether we need to perform hash rewriting if there are valid output paths. */
@ -994,6 +1012,8 @@ private:
void tryLocalBuild();
void buildDone();
void resolvedFinished();
/* Is the build hook willing to perform the build? */
HookReply tryBuildHook();
@ -1085,8 +1105,8 @@ private:
const Path DerivationGoal::homeDir = "/homeless-shelter";
DerivationGoal::DerivationGoal(const StorePath & drvPath, const StringSet & wantedOutputs,
Worker & worker, BuildMode buildMode)
DerivationGoal::DerivationGoal(const StorePath & drvPath,
const StringSet & wantedOutputs, Worker & worker, BuildMode buildMode)
: Goal(worker)
, useDerivation(true)
, drvPath(drvPath)
@ -1094,7 +1114,9 @@ DerivationGoal::DerivationGoal(const StorePath & drvPath, const StringSet & want
, buildMode(buildMode)
{
state = &DerivationGoal::getDerivation;
name = fmt("building of '%s'", worker.store.printStorePath(this->drvPath));
name = fmt(
"building of '%s' from .drv file",
StorePathWithOutputs { drvPath, wantedOutputs }.to_string(worker.store));
trace("created");
mcExpectedBuilds = std::make_unique<MaintainCount<uint64_t>>(worker.expectedBuilds);
@ -1103,15 +1125,18 @@ DerivationGoal::DerivationGoal(const StorePath & drvPath, const StringSet & want
DerivationGoal::DerivationGoal(const StorePath & drvPath, const BasicDerivation & drv,
Worker & worker, BuildMode buildMode)
const StringSet & wantedOutputs, Worker & worker, BuildMode buildMode)
: Goal(worker)
, useDerivation(false)
, drvPath(drvPath)
, wantedOutputs(wantedOutputs)
, buildMode(buildMode)
{
this->drv = std::make_unique<BasicDerivation>(BasicDerivation(drv));
state = &DerivationGoal::haveDerivation;
name = fmt("building of %s", StorePathWithOutputs { drvPath, drv.outputNames() }.to_string(worker.store));
name = fmt(
"building of '%s' from in-memory derivation",
StorePathWithOutputs { drvPath, drv.outputNames() }.to_string(worker.store));
trace("created");
mcExpectedBuilds = std::make_unique<MaintainCount<uint64_t>>(worker.expectedBuilds);
@ -1464,8 +1489,40 @@ void DerivationGoal::inputsRealised()
/* Determine the full set of input paths. */
/* First, the input derivations. */
if (useDerivation)
for (auto & [depDrvPath, wantedDepOutputs] : dynamic_cast<Derivation *>(drv.get())->inputDrvs) {
if (useDerivation) {
auto & fullDrv = *dynamic_cast<Derivation *>(drv.get());
if (!fullDrv.inputDrvs.empty() && fullDrv.type() == DerivationType::CAFloating) {
/* We are be able to resolve this derivation based on the
now-known results of dependencies. If so, we become a stub goal
aliasing that resolved derivation goal */
std::optional attempt = fullDrv.tryResolve(worker.store);
assert(attempt);
Derivation drvResolved { *std::move(attempt) };
auto pathResolved = writeDerivation(worker.store, drvResolved);
/* Add to memotable to speed up downstream goal's queries with the
original derivation. */
drvPathResolutions.lock()->insert_or_assign(drvPath, pathResolved);
auto msg = fmt("Resolved derivation: '%s' -> '%s'",
worker.store.printStorePath(drvPath),
worker.store.printStorePath(pathResolved));
act = std::make_unique<Activity>(*logger, lvlInfo, actBuildWaiting, msg,
Logger::Fields {
worker.store.printStorePath(drvPath),
worker.store.printStorePath(pathResolved),
});
auto resolvedGoal = worker.makeDerivationGoal(
pathResolved, wantedOutputs, buildMode);
addWaitee(resolvedGoal);
state = &DerivationGoal::resolvedFinished;
return;
}
for (auto & [depDrvPath, wantedDepOutputs] : fullDrv.inputDrvs) {
/* Add the relevant output closures of the input derivation
`i' as input paths. Only add the closures of output paths
that are specified as inputs. */
@ -1485,6 +1542,7 @@ void DerivationGoal::inputsRealised()
worker.store.printStorePath(drvPath), j, worker.store.printStorePath(drvPath));
}
}
}
/* Second, the input sources. */
worker.store.computeFSClosure(drv->inputSrcs, inputPaths);
@ -1612,6 +1670,13 @@ void DerivationGoal::tryToBuild()
actLock.reset();
state = &DerivationGoal::tryLocalBuild;
worker.wakeUp(shared_from_this());
}
void DerivationGoal::tryLocalBuild() {
bool buildLocally = buildMode != bmNormal || parsedDrv->willBuildLocally(worker.store);
/* Make sure that we are allowed to start a build. If this
derivation prefers to be done locally, do it even if
maxBuildJobs is 0. */
@ -1622,12 +1687,6 @@ void DerivationGoal::tryToBuild()
return;
}
state = &DerivationGoal::tryLocalBuild;
worker.wakeUp(shared_from_this());
}
void DerivationGoal::tryLocalBuild() {
/* If `build-users-group' is not empty, then we have to build as
one of the members of that group. */
if (settings.buildUsersGroup != "" && getuid() == 0) {
@ -1675,7 +1734,34 @@ void DerivationGoal::tryLocalBuild() {
}
void replaceValidPath(const Path & storePath, const Path tmpPath)
static void chmod_(const Path & path, mode_t mode)
{
if (chmod(path.c_str(), mode) == -1)
throw SysError("setting permissions on '%s'", path);
}
/* Move/rename path 'src' to 'dst'. Temporarily make 'src' writable if
it's a directory and we're not root (to be able to update the
directory's parent link ".."). */
static void movePath(const Path & src, const Path & dst)
{
auto st = lstat(src);
bool changePerm = (geteuid() && S_ISDIR(st.st_mode) && !(st.st_mode & S_IWUSR));
if (changePerm)
chmod_(src, st.st_mode | S_IWUSR);
if (rename(src.c_str(), dst.c_str()))
throw SysError("renaming '%1%' to '%2%'", src, dst);
if (changePerm)
chmod_(dst, st.st_mode);
}
void replaceValidPath(const Path & storePath, const Path & tmpPath)
{
/* We can't atomically replace storePath (the original) with
tmpPath (the replacement), so we have to move it out of the
@ -1683,11 +1769,20 @@ void replaceValidPath(const Path & storePath, const Path tmpPath)
we're repairing (say) Glibc, we end up with a broken system. */
Path oldPath = (format("%1%.old-%2%-%3%") % storePath % getpid() % random()).str();
if (pathExists(storePath))
rename(storePath.c_str(), oldPath.c_str());
if (rename(tmpPath.c_str(), storePath.c_str()) == -1) {
rename(oldPath.c_str(), storePath.c_str()); // attempt to recover
throw SysError("moving '%s' to '%s'", tmpPath, storePath);
movePath(storePath, oldPath);
try {
movePath(tmpPath, storePath);
} catch (...) {
try {
// attempt to recover
movePath(oldPath, storePath);
} catch (...) {
ignoreException();
}
throw;
}
deletePath(oldPath);
}
@ -1906,6 +2001,9 @@ void DerivationGoal::buildDone()
done(BuildResult::Built);
}
void DerivationGoal::resolvedFinished() {
done(BuildResult::Built);
}
HookReply DerivationGoal::tryBuildHook()
{
@ -1976,7 +2074,7 @@ HookReply DerivationGoal::tryBuildHook()
/* Tell the hook all the inputs that have to be copied to the
remote system. */
writeStorePaths(worker.store, hook->sink, inputPaths);
worker_proto::write(worker.store, hook->sink, inputPaths);
/* Tell the hooks the missing outputs that have to be copied back
from the remote system. */
@ -1987,7 +2085,7 @@ HookReply DerivationGoal::tryBuildHook()
if (buildMode != bmCheck && status.known->isValid()) continue;
missingPaths.insert(status.known->path);
}
writeStorePaths(worker.store, hook->sink, missingPaths);
worker_proto::write(worker.store, hook->sink, missingPaths);
}
hook->sink = FdSink();
@ -2005,13 +2103,6 @@ HookReply DerivationGoal::tryBuildHook()
}
static void chmod_(const Path & path, mode_t mode)
{
if (chmod(path.c_str(), mode) == -1)
throw SysError("setting permissions on '%s'", path);
}
int childEntry(void * arg)
{
((DerivationGoal *) arg)->runChild();
@ -2268,7 +2359,8 @@ void DerivationGoal::startBuilder()
worker.store.computeFSClosure(worker.store.toStorePath(i.second.source).first, closure);
} catch (InvalidPath & e) {
} catch (Error & e) {
throw Error("while processing 'sandbox-paths': %s", e.what());
e.addTrace({}, "while processing 'sandbox-paths'");
throw;
}
for (auto & i : closure) {
auto p = worker.store.printStorePath(i);
@ -2336,15 +2428,14 @@ void DerivationGoal::startBuilder()
"root:x:0:0:Nix build user:%3%:/noshell\n"
"nixbld:x:%1%:%2%:Nix build user:%3%:/noshell\n"
"nobody:x:65534:65534:Nobody:/:/noshell\n",
sandboxUid, sandboxGid, settings.sandboxBuildDir));
sandboxUid(), sandboxGid(), settings.sandboxBuildDir));
/* Declare the build user's group so that programs get a consistent
view of the system (e.g., "id -gn"). */
writeFile(chrootRootDir + "/etc/group",
(format(
"root:x:0:\n"
fmt("root:x:0:\n"
"nixbld:!:%1%:\n"
"nogroup:x:65534:\n") % sandboxGid).str());
"nogroup:x:65534:\n", sandboxGid()));
/* Create /etc/hosts with localhost entry. */
if (!(derivationIsImpure(derivationType)))
@ -2367,10 +2458,7 @@ void DerivationGoal::startBuilder()
for (auto & i : inputPaths) {
auto p = worker.store.printStorePath(i);
Path r = worker.store.toRealPath(p);
struct stat st;
if (lstat(r.c_str(), &st))
throw SysError("getting attributes of path '%s'", p);
if (S_ISDIR(st.st_mode))
if (S_ISDIR(lstat(r).st_mode))
dirsInChroot.insert_or_assign(p, r);
else
linkOrCopy(r, chrootRootDir + p);
@ -2544,6 +2632,13 @@ void DerivationGoal::startBuilder()
options.allowVfork = false;
Path maxUserNamespaces = "/proc/sys/user/max_user_namespaces";
static bool userNamespacesEnabled =
pathExists(maxUserNamespaces)
&& trim(readFile(maxUserNamespaces)) != "0";
usingUserNamespace = userNamespacesEnabled;
Pid helper = startProcess([&]() {
/* Drop additional groups here because we can't do it
@ -2562,9 +2657,11 @@ void DerivationGoal::startBuilder()
PROT_WRITE | PROT_READ, MAP_PRIVATE | MAP_ANONYMOUS | MAP_STACK, -1, 0);
if (stack == MAP_FAILED) throw SysError("allocating stack");
int flags = CLONE_NEWUSER | CLONE_NEWPID | CLONE_NEWNS | CLONE_NEWIPC | CLONE_NEWUTS | CLONE_PARENT | SIGCHLD;
int flags = CLONE_NEWPID | CLONE_NEWNS | CLONE_NEWIPC | CLONE_NEWUTS | CLONE_PARENT | SIGCHLD;
if (privateNetwork)
flags |= CLONE_NEWNET;
if (usingUserNamespace)
flags |= CLONE_NEWUSER;
pid_t child = clone(childEntry, stack + stackSize, flags, this);
if (child == -1 && errno == EINVAL) {
@ -2573,11 +2670,12 @@ void DerivationGoal::startBuilder()
flags &= ~CLONE_NEWPID;
child = clone(childEntry, stack + stackSize, flags, this);
}
if (child == -1 && (errno == EPERM || errno == EINVAL)) {
if (usingUserNamespace && child == -1 && (errno == EPERM || errno == EINVAL)) {
/* Some distros patch Linux to not allow unprivileged
* user namespaces. If we get EPERM or EINVAL, try
* without CLONE_NEWUSER and see if that works.
*/
usingUserNamespace = false;
flags &= ~CLONE_NEWUSER;
child = clone(childEntry, stack + stackSize, flags, this);
}
@ -2588,7 +2686,8 @@ void DerivationGoal::startBuilder()
_exit(1);
if (child == -1) throw SysError("cloning builder process");
writeFull(builderOut.writeSide.get(), std::to_string(child) + "\n");
writeFull(builderOut.writeSide.get(),
fmt("%d %d\n", usingUserNamespace, child));
_exit(0);
}, options);
@ -2602,10 +2701,20 @@ void DerivationGoal::startBuilder()
userNamespaceSync.readSide = -1;
/* Close the write side to prevent runChild() from hanging
reading from this. */
Finally cleanup([&]() {
userNamespaceSync.writeSide = -1;
});
pid_t tmp;
if (!string2Int<pid_t>(readLine(builderOut.readSide.get()), tmp)) abort();
auto ss = tokenizeString<std::vector<std::string>>(readLine(builderOut.readSide.get()));
assert(ss.size() == 2);
usingUserNamespace = ss[0] == "1";
if (!string2Int<pid_t>(ss[1], tmp)) abort();
pid = tmp;
if (usingUserNamespace) {
/* Set the UID/GID mapping of the builder's user namespace
such that the sandbox user maps to the build user, or to
the calling user (if build users are disabled). */
@ -2613,12 +2722,17 @@ void DerivationGoal::startBuilder()
uid_t hostGid = buildUser ? buildUser->getGID() : getgid();
writeFile("/proc/" + std::to_string(pid) + "/uid_map",
(format("%d %d 1") % sandboxUid % hostUid).str());
fmt("%d %d 1", sandboxUid(), hostUid));
writeFile("/proc/" + std::to_string(pid) + "/setgroups", "deny");
writeFile("/proc/" + std::to_string(pid) + "/gid_map",
(format("%d %d 1") % sandboxGid % hostGid).str());
fmt("%d %d 1", sandboxGid(), hostGid));
} else {
debug("note: not using a user namespace");
if (!buildUser)
throw Error("cannot perform a sandboxed build because user namespaces are not enabled; check /proc/sys/user/max_user_namespaces");
}
/* Save the mount namespace of the child. We have to do this
*before* the child does a chroot. */
@ -2628,7 +2742,6 @@ void DerivationGoal::startBuilder()
/* Signal the builder that we've updated its user namespace. */
writeFull(userNamespaceSync.writeSide.get(), "1");
userNamespaceSync.writeSide = -1;
} else
#endif
@ -2648,11 +2761,14 @@ void DerivationGoal::startBuilder()
/* Check if setting up the build environment failed. */
while (true) {
string msg = readLine(builderOut.readSide.get());
if (string(msg, 0, 1) == "\2") break;
if (string(msg, 0, 1) == "\1") {
if (msg.size() == 1) break;
throw Error(string(msg, 1));
FdSource source(builderOut.readSide.get());
auto ex = readError(source);
ex.addTrace({}, "while setting up the build environment");
throw ex;
}
debug(msg);
debug("sandbox setup: " + msg);
}
}
@ -2950,14 +3066,6 @@ struct RestrictedStore : public LocalFSStore, public virtual RestrictedStoreConf
goal.addDependency(info.path);
}
StorePath addToStoreFromDump(Source & dump, const string & name,
FileIngestionMethod method = FileIngestionMethod::Recursive, HashType hashAlgo = htSHA256, RepairFlag repair = NoRepair) override
{
auto path = next->addToStoreFromDump(dump, name, method, hashAlgo, repair);
goal.addDependency(path);
return path;
}
StorePath addTextToStore(const string & name, const string & s,
const StorePathSet & references, RepairFlag repair = NoRepair) override
{
@ -3152,9 +3260,7 @@ void DerivationGoal::addDependency(const StorePath & path)
if (pathExists(target))
throw Error("store path '%s' already exists in the sandbox", worker.store.printStorePath(path));
struct stat st;
if (lstat(source.c_str(), &st))
throw SysError("getting attributes of path '%s'", source);
auto st = lstat(source);
if (S_ISDIR(st.st_mode)) {
@ -3486,9 +3592,9 @@ void DerivationGoal::runChild()
/* Switch to the sandbox uid/gid in the user namespace,
which corresponds to the build user or calling user in
the parent namespace. */
if (setgid(sandboxGid) == -1)
if (setgid(sandboxGid()) == -1)
throw SysError("setgid failed");
if (setuid(sandboxUid) == -1)
if (setuid(sandboxUid()) == -1)
throw SysError("setuid failed");
setUser = false;
@ -3706,7 +3812,7 @@ void DerivationGoal::runChild()
args.push_back(rewriteStrings(i, inputRewrites));
/* Indicate that we managed to set up the build environment. */
writeFull(STDERR_FILENO, string("\1\n"));
writeFull(STDERR_FILENO, string("\2\n"));
/* Execute the program. This should not return. */
if (drv->isBuiltin()) {
@ -3727,7 +3833,7 @@ void DerivationGoal::runChild()
throw Error("unsupported builtin function '%1%'", string(drv->builder, 8));
_exit(0);
} catch (std::exception & e) {
writeFull(STDERR_FILENO, "error: " + string(e.what()) + "\n");
writeFull(STDERR_FILENO, e.what() + std::string("\n"));
_exit(1);
}
}
@ -3736,36 +3842,16 @@ void DerivationGoal::runChild()
throw SysError("executing '%1%'", drv->builder);
} catch (std::exception & e) {
writeFull(STDERR_FILENO, "\1while setting up the build environment: " + string(e.what()) + "\n");
} catch (Error & e) {
writeFull(STDERR_FILENO, "\1\n");
FdSink sink(STDERR_FILENO);
sink << e;
sink.flush();
_exit(1);
}
}
static void moveCheckToStore(const Path & src, const Path & dst)
{
/* For the rename of directory to succeed, we must be running as root or
the directory must be made temporarily writable (to update the
directory's parent link ".."). */
struct stat st;
if (lstat(src.c_str(), &st) == -1) {
throw SysError("getting attributes of path '%1%'", src);
}
bool changePerm = (geteuid() && S_ISDIR(st.st_mode) && !(st.st_mode & S_IWUSR));
if (changePerm)
chmod_(src, st.st_mode | S_IWUSR);
if (rename(src.c_str(), dst.c_str()))
throw SysError("renaming '%1%' to '%2%'", src, dst);
if (changePerm)
chmod_(dst, st.st_mode);
}
void DerivationGoal::registerOutputs()
{
/* When using a build hook, the build hook can register the output
@ -3866,7 +3952,7 @@ void DerivationGoal::registerOutputs()
something like that. */
canonicalisePathMetaData(actualPath, buildUser ? buildUser->getUID() : -1, inodesSeen);
debug("scanning for references for output %1 in temp location '%1%'", outputName, actualPath);
debug("scanning for references for output '%s' in temp location '%s'", outputName, actualPath);
/* Pass blank Sink as we are not ready to hash data at this stage. */
NullSink blank;
@ -3921,7 +4007,6 @@ void DerivationGoal::registerOutputs()
outputRewrites[std::string { scratchPath.hashPart() }] = std::string { finalStorePath.hashPart() };
};
bool rewritten = false;
std::optional<StorePathSet> referencesOpt = std::visit(overloaded {
[&](AlreadyRegistered skippedFinalPath) -> std::optional<StorePathSet> {
finish(skippedFinalPath.path);
@ -3952,7 +4037,9 @@ void DerivationGoal::registerOutputs()
StringSource source(*sink.s);
restorePath(actualPath, source);
rewritten = true;
/* FIXME: set proper permissions in restorePath() so
we don't have to do another traversal. */
canonicalisePathMetaData(actualPath, -1, inodesSeen);
}
};
@ -4035,7 +4122,7 @@ void DerivationGoal::registerOutputs()
[&](DerivationOutputInputAddressed output) {
/* input-addressed case */
auto requiredFinalPath = output.path;
/* Preemtively add rewrite rule for final hash, as that is
/* Preemptively add rewrite rule for final hash, as that is
what the NAR hash will use rather than normalized-self references */
if (scratchPath != requiredFinalPath)
outputRewrites.insert_or_assign(
@ -4109,44 +4196,21 @@ void DerivationGoal::registerOutputs()
else. No moving needed. */
assert(newInfo.ca);
} else {
/* Temporarily add write perm so we can move, will be fixed
later. */
{
struct stat st;
auto & mode = st.st_mode;
if (lstat(actualPath.c_str(), &st))
throw SysError("getting attributes of path '%1%'", actualPath);
mode |= 0200;
/* Try to change the perms, but only if the file isn't a
symlink as symlinks permissions are mostly ignored and
calling `chmod` on it will just forward the call to the
target of the link. */
if (!S_ISLNK(st.st_mode))
if (chmod(actualPath.c_str(), mode) == -1)
throw SysError("changing mode of '%1%' to %2$o", actualPath, mode);
}
if (rename(
actualPath.c_str(),
worker.store.toRealPath(finalDestPath).c_str()) == -1)
throw SysError("moving build output '%1%' from it's temporary location to the Nix store", finalDestPath);
actualPath = worker.store.toRealPath(finalDestPath);
auto destPath = worker.store.toRealPath(finalDestPath);
movePath(actualPath, destPath);
actualPath = destPath;
}
}
/* Get rid of all weird permissions. This also checks that
all files are owned by the build user, if applicable. */
canonicalisePathMetaData(actualPath,
buildUser && !rewritten ? buildUser->getUID() : -1, inodesSeen);
if (buildMode == bmCheck) {
if (!worker.store.isValidPath(newInfo.path)) continue;
ValidPathInfo oldInfo(*worker.store.queryPathInfo(newInfo.path));
if (newInfo.narHash != oldInfo.narHash) {
worker.checkMismatch = true;
if (settings.runDiffHook || settings.keepFailed) {
Path dst = worker.store.toRealPath(finalDestPath + checkSuffix);
auto dst = worker.store.toRealPath(finalDestPath + checkSuffix);
deletePath(dst);
moveCheckToStore(actualPath, dst);
movePath(actualPath, dst);
handleDiffHook(
buildUser ? buildUser->getUID() : getuid(),
@ -4266,11 +4330,13 @@ void DerivationGoal::registerOutputs()
/* Register each output path as valid, and register the sets of
paths referenced by each of them. If there are cycles in the
outputs, this will fail. */
{
ValidPathInfos infos2;
for (auto & [outputName, newInfo] : infos) {
infos2.push_back(newInfo);
}
worker.store.registerValidPaths(infos2);
}
/* In case of a fixed-output derivation hash mismatch, throw an
exception now that we have registered the output as valid. */
@ -4282,12 +4348,21 @@ void DerivationGoal::registerOutputs()
means it's safe to link the derivation to the output hash. We must do
that for floating CA derivations, which otherwise couldn't be cached,
but it's fine to do in all cases. */
for (auto & [outputName, newInfo] : infos) {
/* FIXME: we will want to track this mapping in the DB whether or
not we have a drv file. */
if (useDerivation)
worker.store.linkDeriverToPath(drvPath, outputName, newInfo.path);
bool isCaFloating = drv->type() == DerivationType::CAFloating;
auto drvPathResolved = drvPath;
if (!useDerivation && isCaFloating) {
/* Once a floating CA derivations reaches this point, it
must already be resolved, so we don't bother trying to
downcast drv to get would would just be an empty
inputDrvs field. */
Derivation drv2 { *drv };
drvPathResolved = writeDerivation(worker.store, drv2);
}
if (useDerivation || isCaFloating)
for (auto & [outputName, newInfo] : infos)
worker.store.linkDeriverToPath(drvPathResolved, outputName, newInfo.path);
}
@ -4577,7 +4652,7 @@ void DerivationGoal::flushLine()
std::map<std::string, std::optional<StorePath>> DerivationGoal::queryPartialDerivationOutputMap()
{
if (drv->type() != DerivationType::CAFloating) {
if (!useDerivation || drv->type() != DerivationType::CAFloating) {
std::map<std::string, std::optional<StorePath>> res;
for (auto & [name, output] : drv->outputs)
res.insert_or_assign(name, output.path(worker.store, drv->name, name));
@ -4589,7 +4664,7 @@ std::map<std::string, std::optional<StorePath>> DerivationGoal::queryPartialDeri
OutputPathMap DerivationGoal::queryDerivationOutputMap()
{
if (drv->type() != DerivationType::CAFloating) {
if (!useDerivation || drv->type() != DerivationType::CAFloating) {
OutputPathMap res;
for (auto & [name, output] : drv->outputsAndOptPaths(worker.store))
res.insert_or_assign(name, *output.second);
@ -5068,35 +5143,52 @@ Worker::~Worker()
}
GoalPtr Worker::makeDerivationGoal(const StorePath & path,
const StringSet & wantedOutputs, BuildMode buildMode)
std::shared_ptr<DerivationGoal> Worker::makeDerivationGoalCommon(
const StorePath & drvPath,
const StringSet & wantedOutputs,
std::function<std::shared_ptr<DerivationGoal>()> mkDrvGoal)
{
GoalPtr goal = derivationGoals[path].lock(); // FIXME
if (!goal) {
goal = std::make_shared<DerivationGoal>(path, wantedOutputs, *this, buildMode);
derivationGoals.insert_or_assign(path, goal);
WeakGoalPtr & abstract_goal_weak = derivationGoals[drvPath];
GoalPtr abstract_goal = abstract_goal_weak.lock(); // FIXME
std::shared_ptr<DerivationGoal> goal;
if (!abstract_goal) {
goal = mkDrvGoal();
abstract_goal_weak = goal;
wakeUp(goal);
} else
(dynamic_cast<DerivationGoal *>(goal.get()))->addWantedOutputs(wantedOutputs);
} else {
goal = std::dynamic_pointer_cast<DerivationGoal>(abstract_goal);
assert(goal);
goal->addWantedOutputs(wantedOutputs);
}
return goal;
}
std::shared_ptr<DerivationGoal> Worker::makeBasicDerivationGoal(const StorePath & drvPath,
const BasicDerivation & drv, BuildMode buildMode)
std::shared_ptr<DerivationGoal> Worker::makeDerivationGoal(const StorePath & drvPath,
const StringSet & wantedOutputs, BuildMode buildMode)
{
auto goal = std::make_shared<DerivationGoal>(drvPath, drv, *this, buildMode);
wakeUp(goal);
return goal;
return makeDerivationGoalCommon(drvPath, wantedOutputs, [&]() {
return std::make_shared<DerivationGoal>(drvPath, wantedOutputs, *this, buildMode);
});
}
std::shared_ptr<DerivationGoal> Worker::makeBasicDerivationGoal(const StorePath & drvPath,
const BasicDerivation & drv, const StringSet & wantedOutputs, BuildMode buildMode)
{
return makeDerivationGoalCommon(drvPath, wantedOutputs, [&]() {
return std::make_shared<DerivationGoal>(drvPath, drv, wantedOutputs, *this, buildMode);
});
}
GoalPtr Worker::makeSubstitutionGoal(const StorePath & path, RepairFlag repair, std::optional<ContentAddress> ca)
{
GoalPtr goal = substitutionGoals[path].lock(); // FIXME
WeakGoalPtr & goal_weak = substitutionGoals[path];
GoalPtr goal = goal_weak.lock(); // FIXME
if (!goal) {
goal = std::make_shared<SubstitutionGoal>(path, *this, repair, ca);
substitutionGoals.insert_or_assign(path, goal);
goal_weak = goal;
wakeUp(goal);
}
return goal;
@ -5527,7 +5619,7 @@ BuildResult LocalStore::buildDerivation(const StorePath & drvPath, const BasicDe
BuildMode buildMode)
{
Worker worker(*this);
auto goal = worker.makeBasicDerivationGoal(drvPath, drv, buildMode);
auto goal = worker.makeBasicDerivationGoal(drvPath, drv, {}, buildMode);
BuildResult result;

View file

@ -4,11 +4,13 @@
namespace nix {
std::string FixedOutputHash::printMethodAlgo() const {
std::string FixedOutputHash::printMethodAlgo() const
{
return makeFileIngestionPrefix(method) + printHashType(hash.type);
}
std::string makeFileIngestionPrefix(const FileIngestionMethod m) {
std::string makeFileIngestionPrefix(const FileIngestionMethod m)
{
switch (m) {
case FileIngestionMethod::Flat:
return "";
@ -26,7 +28,8 @@ std::string makeFixedOutputCA(FileIngestionMethod method, const Hash & hash)
+ hash.to_string(Base32, true);
}
std::string renderContentAddress(ContentAddress ca) {
std::string renderContentAddress(ContentAddress ca)
{
return std::visit(overloaded {
[](TextHash th) {
return "text:" + th.hash.to_string(Base32, true);
@ -37,54 +40,97 @@ std::string renderContentAddress(ContentAddress ca) {
}, ca);
}
ContentAddress parseContentAddress(std::string_view rawCa) {
auto rest = rawCa;
std::string renderContentAddressMethod(ContentAddressMethod cam)
{
return std::visit(overloaded {
[](TextHashMethod &th) {
return std::string{"text:"} + printHashType(htSHA256);
},
[](FixedOutputHashMethod &fshm) {
return "fixed:" + makeFileIngestionPrefix(fshm.fileIngestionMethod) + printHashType(fshm.hashType);
}
}, cam);
}
/*
Parses content address strings up to the hash.
*/
static ContentAddressMethod parseContentAddressMethodPrefix(std::string_view & rest)
{
std::string_view wholeInput { rest };
std::string_view prefix;
{
auto optPrefix = splitPrefixTo(rest, ':');
if (!optPrefix)
throw UsageError("not a content address because it is not in the form '<prefix>:<rest>': %s", rawCa);
throw UsageError("not a content address because it is not in the form '<prefix>:<rest>': %s", wholeInput);
prefix = *optPrefix;
}
auto parseHashType_ = [&](){
auto hashTypeRaw = splitPrefixTo(rest, ':');
if (!hashTypeRaw)
throw UsageError("content address hash must be in form '<algo>:<hash>', but found: %s", rawCa);
throw UsageError("content address hash must be in form '<algo>:<hash>', but found: %s", wholeInput);
HashType hashType = parseHashType(*hashTypeRaw);
return std::move(hashType);
};
// Switch on prefix
if (prefix == "text") {
// No parsing of the method, "text" only support flat.
// No parsing of the ingestion method, "text" only support flat.
HashType hashType = parseHashType_();
if (hashType != htSHA256)
throw Error("text content address hash should use %s, but instead uses %s",
printHashType(htSHA256), printHashType(hashType));
return TextHash {
.hash = Hash::parseNonSRIUnprefixed(rest, std::move(hashType)),
};
return TextHashMethod {};
} else if (prefix == "fixed") {
// Parse method
auto method = FileIngestionMethod::Flat;
if (splitPrefix(rest, "r:"))
method = FileIngestionMethod::Recursive;
HashType hashType = parseHashType_();
return FixedOutputHash {
.method = method,
.hash = Hash::parseNonSRIUnprefixed(rest, std::move(hashType)),
return FixedOutputHashMethod {
.fileIngestionMethod = method,
.hashType = std::move(hashType),
};
} else
throw UsageError("content address prefix '%s' is unrecognized. Recogonized prefixes are 'text' or 'fixed'", prefix);
}
ContentAddress parseContentAddress(std::string_view rawCa) {
auto rest = rawCa;
ContentAddressMethod caMethod = parseContentAddressMethodPrefix(rest);
return std::visit(
overloaded {
[&](TextHashMethod thm) {
return ContentAddress(TextHash {
.hash = Hash::parseNonSRIUnprefixed(rest, htSHA256)
});
},
[&](FixedOutputHashMethod fohMethod) {
return ContentAddress(FixedOutputHash {
.method = fohMethod.fileIngestionMethod,
.hash = Hash::parseNonSRIUnprefixed(rest, std::move(fohMethod.hashType)),
});
},
}, caMethod);
}
ContentAddressMethod parseContentAddressMethod(std::string_view caMethod)
{
std::string_view asPrefix {std::string{caMethod} + ":"};
return parseContentAddressMethodPrefix(asPrefix);
}
std::optional<ContentAddress> parseContentAddressOpt(std::string_view rawCaOpt)
{
return rawCaOpt == "" ? std::optional<ContentAddress>() : parseContentAddress(rawCaOpt);
};
std::optional<ContentAddress> parseContentAddressOpt(std::string_view rawCaOpt) {
return rawCaOpt == "" ? std::optional<ContentAddress> {} : parseContentAddress(rawCaOpt);
};
std::string renderContentAddress(std::optional<ContentAddress> ca) {
std::string renderContentAddress(std::optional<ContentAddress> ca)
{
return ca ? renderContentAddress(*ca) : "";
}

View file

@ -55,4 +55,23 @@ std::optional<ContentAddress> parseContentAddressOpt(std::string_view rawCaOpt);
Hash getContentAddressHash(const ContentAddress & ca);
/*
We only have one way to hash text with references, so this is single-value
type is only useful in std::variant.
*/
struct TextHashMethod { };
struct FixedOutputHashMethod {
FileIngestionMethod fileIngestionMethod;
HashType hashType;
};
typedef std::variant<
TextHashMethod,
FixedOutputHashMethod
> ContentAddressMethod;
ContentAddressMethod parseContentAddressMethod(std::string_view rawCaMethod);
std::string renderContentAddressMethod(ContentAddressMethod caMethod);
}

View file

@ -2,7 +2,6 @@
#include "monitor-fd.hh"
#include "worker-protocol.hh"
#include "store-api.hh"
#include "local-store.hh"
#include "finally.hh"
#include "affinity.hh"
#include "archive.hh"
@ -102,17 +101,20 @@ struct TunnelLogger : public Logger
/* stopWork() means that we're done; stop sending stderr to the
client. */
void stopWork(bool success = true, const string & msg = "", unsigned int status = 0)
void stopWork(const Error * ex = nullptr)
{
auto state(state_.lock());
state->canSendStderr = false;
if (success)
if (!ex)
to << STDERR_LAST;
else {
to << STDERR_ERROR << msg;
if (status != 0) to << status;
if (GET_PROTOCOL_MINOR(clientVersion) >= 26) {
to << STDERR_ERROR << *ex;
} else {
to << STDERR_ERROR << ex->what() << ex->status;
}
}
}
@ -240,6 +242,23 @@ struct ClientSettings
}
};
static void writeValidPathInfo(
ref<Store> store,
unsigned int clientVersion,
Sink & to,
std::shared_ptr<const ValidPathInfo> info)
{
to << (info->deriver ? store->printStorePath(*info->deriver) : "")
<< info->narHash.to_string(Base16, false);
worker_proto::write(*store, to, info->references);
to << info->registrationTime << info->narSize;
if (GET_PROTOCOL_MINOR(clientVersion) >= 16) {
to << info->ultimate
<< info->sigs
<< renderContentAddress(info->ca);
}
}
static void performOp(TunnelLogger * logger, ref<Store> store,
TrustedFlag trusted, RecursiveFlag recursive, unsigned int clientVersion,
Source & from, BufferedSink & to, unsigned int op)
@ -256,11 +275,11 @@ static void performOp(TunnelLogger * logger, ref<Store> store,
}
case wopQueryValidPaths: {
auto paths = readStorePaths<StorePathSet>(*store, from);
auto paths = worker_proto::read(*store, from, Phantom<StorePathSet> {});
logger->startWork();
auto res = store->queryValidPaths(paths);
logger->stopWork();
writeStorePaths(*store, to, res);
worker_proto::write(*store, to, res);
break;
}
@ -276,11 +295,11 @@ static void performOp(TunnelLogger * logger, ref<Store> store,
}
case wopQuerySubstitutablePaths: {
auto paths = readStorePaths<StorePathSet>(*store, from);
auto paths = worker_proto::read(*store, from, Phantom<StorePathSet> {});
logger->startWork();
auto res = store->querySubstitutablePaths(paths);
logger->stopWork();
writeStorePaths(*store, to, res);
worker_proto::write(*store, to, res);
break;
}
@ -309,7 +328,7 @@ static void performOp(TunnelLogger * logger, ref<Store> store,
paths = store->queryValidDerivers(path);
else paths = store->queryDerivationOutputs(path);
logger->stopWork();
writeStorePaths(*store, to, paths);
worker_proto::write(*store, to, paths);
break;
}
@ -350,6 +369,41 @@ static void performOp(TunnelLogger * logger, ref<Store> store,
}
case wopAddToStore: {
if (GET_PROTOCOL_MINOR(clientVersion) >= 25) {
auto name = readString(from);
auto camStr = readString(from);
auto refs = worker_proto::read(*store, from, Phantom<StorePathSet> {});
bool repairBool;
from >> repairBool;
auto repair = RepairFlag{repairBool};
logger->startWork();
auto pathInfo = [&]() {
// NB: FramedSource must be out of scope before logger->stopWork();
ContentAddressMethod contentAddressMethod = parseContentAddressMethod(camStr);
FramedSource source(from);
// TODO this is essentially RemoteStore::addCAToStore. Move it up to Store.
return std::visit(overloaded {
[&](TextHashMethod &_) {
// We could stream this by changing Store
std::string contents = source.drain();
auto path = store->addTextToStore(name, contents, refs, repair);
return store->queryPathInfo(path);
},
[&](FixedOutputHashMethod &fohm) {
if (!refs.empty())
throw UnimplementedError("cannot yet have refs with flat or nar-hashed data");
auto path = store->addToStoreFromDump(source, name, fohm.fileIngestionMethod, fohm.hashType, repair);
return store->queryPathInfo(path);
},
}, contentAddressMethod);
}();
logger->stopWork();
to << store->printStorePath(pathInfo->path);
writeValidPathInfo(store, clientVersion, to, pathInfo);
} else {
HashType hashAlgo;
std::string baseName;
FileIngestionMethod method;
@ -391,13 +445,14 @@ static void performOp(TunnelLogger * logger, ref<Store> store,
logger->stopWork();
to << store->printStorePath(path);
}
break;
}
case wopAddTextToStore: {
string suffix = readString(from);
string s = readString(from);
auto refs = readStorePaths<StorePathSet>(*store, from);
auto refs = worker_proto::read(*store, from, Phantom<StorePathSet> {});
logger->startWork();
auto path = store->addTextToStore(suffix, s, refs, NoRepair);
logger->stopWork();
@ -494,6 +549,20 @@ static void performOp(TunnelLogger * logger, ref<Store> store,
are in fact content-addressed if we don't trust them. */
assert(derivationIsCA(drv.type()) || trusted);
/* Recompute the derivation path when we cannot trust the original. */
if (!trusted) {
/* Recomputing the derivation path for input-address derivations
makes it harder to audit them after the fact, since we need the
original not-necessarily-resolved derivation to verify the drv
derivation as adequate claim to the input-addressed output
paths. */
assert(derivationIsCA(drv.type()));
Derivation drv2;
static_cast<BasicDerivation &>(drv2) = drv;
drvPath = writeDerivation(*store, Derivation { drv2 });
}
auto res = store->buildDerivation(drvPath, drv, buildMode);
logger->stopWork();
to << res.status << res.errorMsg;
@ -556,7 +625,7 @@ static void performOp(TunnelLogger * logger, ref<Store> store,
case wopCollectGarbage: {
GCOptions options;
options.action = (GCOptions::GCAction) readInt(from);
options.pathsToDelete = readStorePaths<StorePathSet>(*store, from);
options.pathsToDelete = worker_proto::read(*store, from, Phantom<StorePathSet> {});
from >> options.ignoreLiveness >> options.maxFreed;
// obsolete fields
readInt(from);
@ -625,7 +694,7 @@ static void performOp(TunnelLogger * logger, ref<Store> store,
else {
to << 1
<< (i->second.deriver ? store->printStorePath(*i->second.deriver) : "");
writeStorePaths(*store, to, i->second.references);
worker_proto::write(*store, to, i->second.references);
to << i->second.downloadSize
<< i->second.narSize;
}
@ -636,11 +705,11 @@ static void performOp(TunnelLogger * logger, ref<Store> store,
SubstitutablePathInfos infos;
StorePathCAMap pathsMap = {};
if (GET_PROTOCOL_MINOR(clientVersion) < 22) {
auto paths = readStorePaths<StorePathSet>(*store, from);
auto paths = worker_proto::read(*store, from, Phantom<StorePathSet> {});
for (auto & path : paths)
pathsMap.emplace(path, std::nullopt);
} else
pathsMap = readStorePathCAMap(*store, from);
pathsMap = worker_proto::read(*store, from, Phantom<StorePathCAMap> {});
logger->startWork();
store->querySubstitutablePathInfos(pathsMap, infos);
logger->stopWork();
@ -648,7 +717,7 @@ static void performOp(TunnelLogger * logger, ref<Store> store,
for (auto & i : infos) {
to << store->printStorePath(i.first)
<< (i.second.deriver ? store->printStorePath(*i.second.deriver) : "");
writeStorePaths(*store, to, i.second.references);
worker_proto::write(*store, to, i.second.references);
to << i.second.downloadSize << i.second.narSize;
}
break;
@ -658,7 +727,7 @@ static void performOp(TunnelLogger * logger, ref<Store> store,
logger->startWork();
auto paths = store->queryAllValidPaths();
logger->stopWork();
writeStorePaths(*store, to, paths);
worker_proto::write(*store, to, paths);
break;
}
@ -675,15 +744,7 @@ static void performOp(TunnelLogger * logger, ref<Store> store,
if (info) {
if (GET_PROTOCOL_MINOR(clientVersion) >= 17)
to << 1;
to << (info->deriver ? store->printStorePath(*info->deriver) : "")
<< info->narHash.to_string(Base16, false);
writeStorePaths(*store, to, info->references);
to << info->registrationTime << info->narSize;
if (GET_PROTOCOL_MINOR(clientVersion) >= 16) {
to << info->ultimate
<< info->sigs
<< renderContentAddress(info->ca);
}
writeValidPathInfo(store, clientVersion, to, info);
} else {
assert(GET_PROTOCOL_MINOR(clientVersion) >= 17);
to << 0;
@ -738,7 +799,7 @@ static void performOp(TunnelLogger * logger, ref<Store> store,
ValidPathInfo info { path, narHash };
if (deriver != "")
info.deriver = store->parseStorePath(deriver);
info.references = readStorePaths<StorePathSet>(*store, from);
info.references = worker_proto::read(*store, from, Phantom<StorePathSet> {});
from >> info.registrationTime >> info.narSize >> info.ultimate;
info.sigs = readStrings<StringSet>(from);
info.ca = parseContentAddressOpt(readString(from));
@ -749,59 +810,12 @@ static void performOp(TunnelLogger * logger, ref<Store> store,
info.ultimate = false;
if (GET_PROTOCOL_MINOR(clientVersion) >= 23) {
struct FramedSource : Source
{
Source & from;
bool eof = false;
std::vector<unsigned char> pending;
size_t pos = 0;
FramedSource(Source & from) : from(from)
{ }
~FramedSource()
{
if (!eof) {
while (true) {
auto n = readInt(from);
if (!n) break;
std::vector<unsigned char> data(n);
from(data.data(), n);
}
}
}
size_t read(unsigned char * data, size_t len) override
{
if (eof) throw EndOfFile("reached end of FramedSource");
if (pos >= pending.size()) {
size_t len = readInt(from);
if (!len) {
eof = true;
return 0;
}
pending = std::vector<unsigned char>(len);
pos = 0;
from(pending.data(), len);
}
auto n = std::min(len, pending.size() - pos);
memcpy(data, pending.data() + pos, n);
pos += n;
return n;
}
};
logger->startWork();
{
FramedSource source(from);
store->addToStore(info, source, (RepairFlag) repair,
dontCheckSigs ? NoCheckSigs : CheckSigs);
}
logger->stopWork();
}
@ -838,9 +852,9 @@ static void performOp(TunnelLogger * logger, ref<Store> store,
uint64_t downloadSize, narSize;
store->queryMissing(targets, willBuild, willSubstitute, unknown, downloadSize, narSize);
logger->stopWork();
writeStorePaths(*store, to, willBuild);
writeStorePaths(*store, to, willSubstitute);
writeStorePaths(*store, to, unknown);
worker_proto::write(*store, to, willBuild);
worker_proto::write(*store, to, willSubstitute);
worker_proto::write(*store, to, unknown);
to << downloadSize << narSize;
break;
}
@ -926,10 +940,11 @@ void processConnection(
during addTextToStore() / importPath(). If that
happens, just send the error message and exit. */
bool errorAllowed = tunnelLogger->state_.lock()->canSendStderr;
tunnelLogger->stopWork(false, e.msg(), e.status);
tunnelLogger->stopWork(&e);
if (!errorAllowed) throw;
} catch (std::bad_alloc & e) {
tunnelLogger->stopWork(false, "Nix daemon out of memory", 1);
auto ex = Error("Nix daemon out of memory");
tunnelLogger->stopWork(&ex);
throw;
}
@ -938,8 +953,13 @@ void processConnection(
assert(!tunnelLogger->state_.lock()->canSendStderr);
};
} catch (Error & e) {
tunnelLogger->stopWork(&e);
to.flush();
return;
} catch (std::exception & e) {
tunnelLogger->stopWork(false, e.what(), 1);
auto ex = Error(e.what());
tunnelLogger->stopWork(&ex);
to.flush();
return;
}

View file

@ -1,3 +1,5 @@
#pragma once
#include "serialise.hh"
#include "store-api.hh"

View file

@ -69,7 +69,7 @@ bool BasicDerivation::isBuiltin() const
StorePath writeDerivation(Store & store,
const Derivation & drv, RepairFlag repair)
const Derivation & drv, RepairFlag repair, bool readOnly)
{
auto references = drv.inputSrcs;
for (auto & i : drv.inputDrvs)
@ -79,7 +79,7 @@ StorePath writeDerivation(Store & store,
held during a garbage collection). */
auto suffix = std::string(drv.name) + drvExtension;
auto contents = drv.unparse(store, false);
return settings.readOnlyMode
return readOnly || settings.readOnlyMode
? store.computeStorePathForText(suffix, contents, references)
: store.addTextToStore(suffix, contents, references, repair);
}
@ -584,7 +584,7 @@ Source & readDerivation(Source & in, const Store & store, BasicDerivation & drv,
drv.outputs.emplace(std::move(name), std::move(output));
}
drv.inputSrcs = readStorePaths<StorePathSet>(store, in);
drv.inputSrcs = worker_proto::read(store, in, Phantom<StorePathSet> {});
in >> drv.platform >> drv.builder;
drv.args = readStrings<Strings>(in);
@ -622,7 +622,7 @@ void writeDerivation(Sink & out, const Store & store, const BasicDerivation & dr
},
}, i.second.output);
}
writeStorePaths(store, out, drv.inputSrcs);
worker_proto::write(store, out, drv.inputSrcs);
out << drv.platform << drv.builder << drv.args;
out << drv.env.size();
for (auto & i : drv.env)
@ -644,4 +644,57 @@ std::string downstreamPlaceholder(const Store & store, const StorePath & drvPath
return "/" + hashString(htSHA256, clearText).to_string(Base32, false);
}
// N.B. Outputs are left unchanged
static void rewriteDerivation(Store & store, BasicDerivation & drv, const StringMap & rewrites) {
debug("Rewriting the derivation");
for (auto &rewrite: rewrites) {
debug("rewriting %s as %s", rewrite.first, rewrite.second);
}
drv.builder = rewriteStrings(drv.builder, rewrites);
for (auto & arg: drv.args) {
arg = rewriteStrings(arg, rewrites);
}
StringPairs newEnv;
for (auto & envVar: drv.env) {
auto envName = rewriteStrings(envVar.first, rewrites);
auto envValue = rewriteStrings(envVar.second, rewrites);
newEnv.emplace(envName, envValue);
}
drv.env = newEnv;
}
Sync<DrvPathResolutions> drvPathResolutions;
std::optional<BasicDerivation> Derivation::tryResolve(Store & store) {
BasicDerivation resolved { *this };
// Input paths that we'll want to rewrite in the derivation
StringMap inputRewrites;
for (auto & input : inputDrvs) {
auto inputDrvOutputs = store.queryPartialDerivationOutputMap(input.first);
StringSet newOutputNames;
for (auto & outputName : input.second) {
auto actualPathOpt = inputDrvOutputs.at(outputName);
if (!actualPathOpt)
return std::nullopt;
auto actualPath = *actualPathOpt;
inputRewrites.emplace(
downstreamPlaceholder(store, input.first, outputName),
store.printStorePath(actualPath));
resolved.inputSrcs.insert(std::move(actualPath));
}
}
rewriteDerivation(store, resolved, inputRewrites);
return resolved;
}
}

View file

@ -4,6 +4,7 @@
#include "types.hh"
#include "hash.hh"
#include "content-address.hh"
#include "sync.hh"
#include <map>
#include <variant>
@ -60,8 +61,6 @@ typedef std::map<string, DerivationOutput> DerivationOutputs;
also contains, for each output, the (optional) store path in which it would
be written. To calculate values of these types, see the corresponding
functions in BasicDerivation */
typedef std::map<string, std::pair<DerivationOutput, StorePath>>
DerivationOutputsAndPaths;
typedef std::map<string, std::pair<DerivationOutput, std::optional<StorePath>>>
DerivationOutputsAndOptPaths;
@ -100,7 +99,7 @@ struct BasicDerivation
StringPairs env;
std::string name;
BasicDerivation() { }
BasicDerivation() = default;
virtual ~BasicDerivation() { };
bool isBuiltin() const;
@ -127,7 +126,17 @@ struct Derivation : BasicDerivation
std::string unparse(const Store & store, bool maskOutputs,
std::map<std::string, StringSet> * actualInputs = nullptr) const;
Derivation() { }
/* Return the underlying basic derivation but with these changes:
1. Input drvs are emptied, but the outputs of them that were used are
added directly to input sources.
2. Input placeholders are replaced with realized input store paths. */
std::optional<BasicDerivation> tryResolve(Store & store);
Derivation() = default;
Derivation(const BasicDerivation & bd) : BasicDerivation(bd) { }
Derivation(BasicDerivation && bd) : BasicDerivation(std::move(bd)) { }
};
@ -137,7 +146,9 @@ enum RepairFlag : bool { NoRepair = false, Repair = true };
/* Write a derivation to the Nix store, and return its path. */
StorePath writeDerivation(Store & store,
const Derivation & drv, RepairFlag repair = NoRepair);
const Derivation & drv,
RepairFlag repair = NoRepair,
bool readOnly = false);
/* Read a derivation from a file. */
Derivation parseDerivation(const Store & store, std::string && s, std::string_view name);
@ -191,6 +202,16 @@ typedef std::map<StorePath, DrvHashModulo> DrvHashes;
extern DrvHashes drvHashes; // FIXME: global, not thread-safe
/* Memoisation of `readDerivation(..).resove()`. */
typedef std::map<
StorePath,
std::optional<StorePath>
> DrvPathResolutions;
// FIXME: global, though at least thread-safe.
// FIXME: arguably overlaps with hashDerivationModulo memo table.
extern Sync<DrvPathResolutions> drvPathResolutions;
bool wantOutput(const string & output, const std::set<string> & wanted);
struct Source;

View file

@ -18,8 +18,7 @@ struct DummyStore : public Store, public virtual DummyStoreConfig
DummyStore(const Params & params)
: StoreConfig(params)
, Store(params)
{
}
{ }
string getUri() override
{
@ -63,6 +62,6 @@ struct DummyStore : public Store, public virtual DummyStoreConfig
{ unsupported("buildDerivation"); }
};
static RegisterStoreImplementation<DummyStore, DummyStoreConfig> regStore;
static RegisterStoreImplementation<DummyStore, DummyStoreConfig> regDummyStore;
}

View file

@ -45,7 +45,7 @@ void Store::exportPath(const StorePath & path, Sink & sink)
teeSink
<< exportMagic
<< printStorePath(path);
writeStorePaths(*this, teeSink, info->references);
worker_proto::write(*this, teeSink, info->references);
teeSink
<< (info->deriver ? printStorePath(*info->deriver) : "")
<< 0;
@ -73,7 +73,7 @@ StorePaths Store::importPaths(Source & source, CheckSigsFlag checkSigs)
//Activity act(*logger, lvlInfo, format("importing path '%s'") % info.path);
auto references = readStorePaths<StorePathSet>(*this, source);
auto references = worker_proto::read(*this, source, Phantom<StorePathSet> {});
auto deriver = readString(source);
auto narHash = hashString(htSHA256, *saved.s);

View file

@ -31,7 +31,7 @@ namespace nix {
FileTransferSettings fileTransferSettings;
static GlobalConfig::Register r1(&fileTransferSettings);
static GlobalConfig::Register rFileTransferSettings(&fileTransferSettings);
std::string resolveUri(const std::string & uri)
{
@ -113,6 +113,9 @@ struct curlFileTransfer : public FileTransfer
requestHeaders = curl_slist_append(requestHeaders, ("If-None-Match: " + request.expectedETag).c_str());
if (!request.mimeType.empty())
requestHeaders = curl_slist_append(requestHeaders, ("Content-Type: " + request.mimeType).c_str());
for (auto it = request.headers.begin(); it != request.headers.end(); ++it){
requestHeaders = curl_slist_append(requestHeaders, fmt("%s: %s", it->first, it->second).c_str());
}
}
~TransferItem()

View file

@ -51,6 +51,7 @@ extern FileTransferSettings fileTransferSettings;
struct FileTransferRequest
{
std::string uri;
Headers headers;
std::string expectedETag;
bool verifyTLS = true;
bool head = false;

View file

@ -1,6 +1,7 @@
#include "derivations.hh"
#include "globals.hh"
#include "local-store.hh"
#include "local-fs-store.hh"
#include "finally.hh"
#include <functional>
@ -663,9 +664,7 @@ void LocalStore::removeUnusedLinks(const GCState & state)
if (name == "." || name == "..") continue;
Path path = linksDir + "/" + name;
struct stat st;
if (lstat(path.c_str(), &st) == -1)
throw SysError("statting '%1%'", path);
auto st = lstat(path);
if (st.st_nlink != 1) {
actualSize += st.st_size;

View file

@ -25,7 +25,7 @@ namespace nix {
Settings settings;
static GlobalConfig::Register r1(&settings);
static GlobalConfig::Register rSettings(&settings);
Settings::Settings()
: nixPrefix(NIX_PREFIX)
@ -42,6 +42,7 @@ Settings::Settings()
{
buildUsersGroup = getuid() == 0 ? "nixbld" : "";
lockCPU = getEnv("NIX_AFFINITY_HACK") == "1";
allowSymlinkedStore = getEnv("NIX_IGNORE_SYMLINK_STORE") == "1";
caFile = getEnv("NIX_SSL_CERT_FILE").value_or(getEnv("SSL_CERT_FILE").value_or(""));
if (caFile == "") {
@ -147,6 +148,12 @@ bool Settings::isWSL1()
const string nixVersion = PACKAGE_VERSION;
NLOHMANN_JSON_SERIALIZE_ENUM(SandboxMode, {
{SandboxMode::smEnabled, true},
{SandboxMode::smRelaxed, "relaxed"},
{SandboxMode::smDisabled, false},
});
template<> void BaseSetting<SandboxMode>::set(const std::string & str)
{
if (str == "true") value = smEnabled;

View file

@ -859,8 +859,54 @@ public:
are loaded as plugins (non-recursively).
)"};
Setting<std::string> githubAccessToken{this, "", "github-access-token",
"GitHub access token to get access to GitHub data through the GitHub API for `github:<..>` flakes."};
Setting<StringMap> accessTokens{this, {}, "access-tokens",
R"(
Access tokens used to access protected GitHub, GitLab, or
other locations requiring token-based authentication.
Access tokens are specified as a string made up of
space-separated `host=token` values. The specific token
used is selected by matching the `host` portion against the
"host" specification of the input. The actual use of the
`token` value is determined by the type of resource being
accessed:
* Github: the token value is the OAUTH-TOKEN string obtained
as the Personal Access Token from the Github server (see
https://docs.github.com/en/developers/apps/authorizing-oath-apps).
* Gitlab: the token value is either the OAuth2 token or the
Personal Access Token (these are different types tokens
for gitlab, see
https://docs.gitlab.com/12.10/ee/api/README.html#authentication).
The `token` value should be `type:tokenstring` where
`type` is either `OAuth2` or `PAT` to indicate which type
of token is being specified.
Example `~/.config/nix/nix.conf`:
```
access-tokens = "github.com=23ac...b289 gitlab.mycompany.com=PAT:A123Bp_Cd..EfG gitlab.com=OAuth2:1jklw3jk"
```
Example `~/code/flake.nix`:
```nix
input.foo = {
type = "gitlab";
host = "gitlab.mycompany.com";
owner = "mycompany";
repo = "pro";
};
```
This example specifies three tokens, one each for accessing
github.com, gitlab.mycompany.com, and sourceforge.net.
The `input.foo` uses the "gitlab" fetcher, which might
requires specifying the token type along with the token
value.
)"};
Setting<Strings> experimentalFeatures{this, {}, "experimental-features",
"Experimental Nix features to enable."};
@ -880,6 +926,19 @@ public:
Setting<std::string> flakeRegistry{this, "https://github.com/NixOS/flake-registry/raw/master/flake-registry.json", "flake-registry",
"Path or URI of the global flake registry."};
Setting<bool> allowSymlinkedStore{
this, false, "allow-symlinked-store",
R"(
If set to `true`, Nix will stop complaining if the store directory
(typically /nix/store) contains symlink components.
This risks making some builds "impure" because builders sometimes
"canonicalise" paths by resolving all symlink components. Problems
occur if those builds are then deployed to machines where /nix/store
resolves to a different location from that of the build machine. You
can enable this setting if you are sure you're not going to do that.
)"};
};

View file

@ -73,6 +73,7 @@ public:
if (forceHttp) ret.insert("file");
return ret;
}
protected:
void maybeDisable()
@ -180,6 +181,6 @@ protected:
};
static RegisterStoreImplementation<HttpBinaryCacheStore, HttpBinaryCacheStoreConfig> regStore;
static RegisterStoreImplementation<HttpBinaryCacheStore, HttpBinaryCacheStoreConfig> regHttpBinaryCacheStore;
}

View file

@ -122,7 +122,7 @@ struct LegacySSHStore : public Store, public virtual LegacySSHStoreConfig
auto deriver = readString(conn->from);
if (deriver != "")
info->deriver = parseStorePath(deriver);
info->references = readStorePaths<StorePathSet>(*this, conn->from);
info->references = worker_proto::read(*this, conn->from, Phantom<StorePathSet> {});
readLongLong(conn->from); // download size
info->narSize = readLongLong(conn->from);
@ -156,7 +156,7 @@ struct LegacySSHStore : public Store, public virtual LegacySSHStoreConfig
<< printStorePath(info.path)
<< (info.deriver ? printStorePath(*info.deriver) : "")
<< info.narHash.to_string(Base16, false);
writeStorePaths(*this, conn->to, info.references);
worker_proto::write(*this, conn->to, info.references);
conn->to
<< info.registrationTime
<< info.narSize
@ -185,7 +185,7 @@ struct LegacySSHStore : public Store, public virtual LegacySSHStoreConfig
conn->to
<< exportMagic
<< printStorePath(info.path);
writeStorePaths(*this, conn->to, info.references);
worker_proto::write(*this, conn->to, info.references);
conn->to
<< (info.deriver ? printStorePath(*info.deriver) : "")
<< 0
@ -301,10 +301,10 @@ public:
conn->to
<< cmdQueryClosure
<< includeOutputs;
writeStorePaths(*this, conn->to, paths);
worker_proto::write(*this, conn->to, paths);
conn->to.flush();
for (auto & i : readStorePaths<StorePathSet>(*this, conn->from))
for (auto & i : worker_proto::read(*this, conn->from, Phantom<StorePathSet> {}))
out.insert(i);
}
@ -317,10 +317,10 @@ public:
<< cmdQueryValidPaths
<< false // lock
<< maybeSubstitute;
writeStorePaths(*this, conn->to, paths);
worker_proto::write(*this, conn->to, paths);
conn->to.flush();
return readStorePaths<StorePathSet>(*this, conn->from);
return worker_proto::read(*this, conn->from, Phantom<StorePathSet> {});
}
void connect() override
@ -335,6 +335,6 @@ public:
}
};
static RegisterStoreImplementation<LegacySSHStore, LegacySSHStoreConfig> regStore;
static RegisterStoreImplementation<LegacySSHStore, LegacySSHStoreConfig> regLegacySSHStore;
}

View file

@ -105,6 +105,6 @@ std::set<std::string> LocalBinaryCacheStore::uriSchemes()
return {"file"};
}
static RegisterStoreImplementation<LocalBinaryCacheStore, LocalBinaryCacheStoreConfig> regStore;
static RegisterStoreImplementation<LocalBinaryCacheStore, LocalBinaryCacheStoreConfig> regLocalBinaryCacheStore;
}

View file

@ -1,6 +1,7 @@
#include "archive.hh"
#include "fs-accessor.hh"
#include "store-api.hh"
#include "local-fs-store.hh"
#include "globals.hh"
#include "compression.hh"
#include "derivations.hh"

View file

@ -0,0 +1,48 @@
#pragma once
#include "store-api.hh"
namespace nix {
struct LocalFSStoreConfig : virtual StoreConfig
{
using StoreConfig::StoreConfig;
// FIXME: the (StoreConfig*) cast works around a bug in gcc that causes
// it to omit the call to the Setting constructor. Clang works fine
// either way.
const PathSetting rootDir{(StoreConfig*) this, true, "",
"root", "directory prefixed to all other paths"};
const PathSetting stateDir{(StoreConfig*) this, false,
rootDir != "" ? rootDir + "/nix/var/nix" : settings.nixStateDir,
"state", "directory where Nix will store state"};
const PathSetting logDir{(StoreConfig*) this, false,
rootDir != "" ? rootDir + "/nix/var/log/nix" : settings.nixLogDir,
"log", "directory where Nix will store state"};
};
class LocalFSStore : public virtual Store, public virtual LocalFSStoreConfig
{
public:
const static string drvsLogDir;
LocalFSStore(const Params & params);
void narFromPath(const StorePath & path, Sink & sink) override;
ref<FSAccessor> getFSAccessor() override;
/* Register a permanent GC root. */
Path addPermRoot(const StorePath & storePath, const Path & gcRoot);
virtual Path getRealStoreDir() { return storeDir; }
Path toRealPath(const Path & storePath) override
{
assert(isInStore(storePath));
return getRealStoreDir() + "/" + std::string(storePath, storeDir.size() + 1);
}
std::shared_ptr<std::string> getBuildLog(const StorePath & path) override;
};
}

View file

@ -110,12 +110,11 @@ LocalStore::LocalStore(const Params & params)
}
/* Ensure that the store and its parents are not symlinks. */
if (getEnv("NIX_IGNORE_SYMLINK_STORE") != "1") {
if (!settings.allowSymlinkedStore) {
Path path = realStoreDir;
struct stat st;
while (path != "/") {
if (lstat(path.c_str(), &st))
throw SysError("getting status of '%1%'", path);
st = lstat(path);
if (S_ISLNK(st.st_mode))
throw Error(
"the path '%1%' is a symlink; "
@ -419,10 +418,7 @@ static void canonicaliseTimestampAndPermissions(const Path & path, const struct
void canonicaliseTimestampAndPermissions(const Path & path)
{
struct stat st;
if (lstat(path.c_str(), &st))
throw SysError("getting attributes of path '%1%'", path);
canonicaliseTimestampAndPermissions(path, st);
canonicaliseTimestampAndPermissions(path, lstat(path));
}
@ -440,9 +436,7 @@ static void canonicalisePathMetaData_(const Path & path, uid_t fromUid, InodesSe
}
#endif
struct stat st;
if (lstat(path.c_str(), &st))
throw SysError("getting attributes of path '%1%'", path);
auto st = lstat(path);
/* Really make sure that the path is of a supported type. */
if (!(S_ISREG(st.st_mode) || S_ISDIR(st.st_mode) || S_ISLNK(st.st_mode)))
@ -478,8 +472,7 @@ static void canonicalisePathMetaData_(const Path & path, uid_t fromUid, InodesSe
ensure that we don't fail on hard links within the same build
(i.e. "touch $out/foo; ln $out/foo $out/bar"). */
if (fromUid != (uid_t) -1 && st.st_uid != fromUid) {
assert(!S_ISDIR(st.st_mode));
if (inodesSeen.find(Inode(st.st_dev, st.st_ino)) == inodesSeen.end())
if (S_ISDIR(st.st_mode) || !inodesSeen.count(Inode(st.st_dev, st.st_ino)))
throw BuildError("invalid ownership on file '%1%'", path);
mode_t mode = st.st_mode & ~S_IFMT;
assert(S_ISLNK(st.st_mode) || (st.st_uid == geteuid() && (mode == 0444 || mode == 0555) && st.st_mtime == mtimeStore));
@ -522,9 +515,7 @@ void canonicalisePathMetaData(const Path & path, uid_t fromUid, InodesSeen & ino
/* On platforms that don't have lchown(), the top-level path can't
be a symlink, since we can't change its ownership. */
struct stat st;
if (lstat(path.c_str(), &st))
throw SysError("getting attributes of path '%1%'", path);
auto st = lstat(path);
if (st.st_uid != geteuid()) {
assert(S_ISLNK(st.st_mode));
@ -730,7 +721,7 @@ uint64_t LocalStore::queryValidPathId(State & state, const StorePath & path)
{
auto use(state.stmtQueryPathInfo.use()(printStorePath(path)));
if (!use.next())
throw Error("path '%s' is not valid", printStorePath(path));
throw InvalidPath("path '%s' is not valid", printStorePath(path));
return use.getInt(0);
}
@ -805,18 +796,58 @@ StorePathSet LocalStore::queryValidDerivers(const StorePath & path)
}
std::map<std::string, std::optional<StorePath>> LocalStore::queryPartialDerivationOutputMap(const StorePath & path)
std::map<std::string, std::optional<StorePath>> LocalStore::queryPartialDerivationOutputMap(const StorePath & path_)
{
auto path = path_;
std::map<std::string, std::optional<StorePath>> outputs;
BasicDerivation drv = readDerivation(path);
Derivation drv = readDerivation(path);
for (auto & [outName, _] : drv.outputs) {
outputs.insert_or_assign(outName, std::nullopt);
}
bool haveCached = false;
{
auto resolutions = drvPathResolutions.lock();
auto resolvedPathOptIter = resolutions->find(path);
if (resolvedPathOptIter != resolutions->end()) {
auto & [_, resolvedPathOpt] = *resolvedPathOptIter;
if (resolvedPathOpt)
path = *resolvedPathOpt;
haveCached = true;
}
}
/* can't just use else-if instead of `!haveCached` because we need to unlock
`drvPathResolutions` before it is locked in `Derivation::resolve`. */
if (!haveCached && drv.type() == DerivationType::CAFloating) {
/* Try resolve drv and use that path instead. */
auto attempt = drv.tryResolve(*this);
if (!attempt)
/* If we cannot resolve the derivation, we cannot have any path
assigned so we return the map of all std::nullopts. */
return outputs;
/* Just compute store path */
auto pathResolved = writeDerivation(*this, *std::move(attempt), NoRepair, true);
/* Store in memo table. */
/* FIXME: memo logic should not be local-store specific, should have
wrapper-method instead. */
drvPathResolutions.lock()->insert_or_assign(path, pathResolved);
path = std::move(pathResolved);
}
return retrySQLite<std::map<std::string, std::optional<StorePath>>>([&]() {
auto state(_state.lock());
auto useQueryDerivationOutputs(state->stmtQueryDerivationOutputs.use()
(queryValidPathId(*state, path)));
uint64_t drvId;
try {
drvId = queryValidPathId(*state, path);
} catch (InvalidPath &) {
/* FIXME? if the derivation doesn't exist, we cannot have a mapping
for it. */
return outputs;
}
auto useQueryDerivationOutputs {
state->stmtQueryDerivationOutputs.use()
(drvId)
};
while (useQueryDerivationOutputs.next())
outputs.insert_or_assign(
@ -1455,7 +1486,7 @@ static void makeMutable(const Path & path)
{
checkInterrupt();
struct stat st = lstat(path);
auto st = lstat(path);
if (!S_ISDIR(st.st_mode) && !S_ISREG(st.st_mode)) return;

View file

@ -4,6 +4,7 @@
#include "pathlocks.hh"
#include "store-api.hh"
#include "local-fs-store.hh"
#include "sync.hh"
#include "util.hh"

View file

@ -17,9 +17,7 @@ namespace nix {
static void makeWritable(const Path & path)
{
struct stat st;
if (lstat(path.c_str(), &st))
throw SysError("getting attributes of path '%1%'", path);
auto st = lstat(path);
if (chmod(path.c_str(), st.st_mode | S_IWUSR) == -1)
throw SysError("changing writability of '%1%'", path);
}
@ -94,9 +92,7 @@ void LocalStore::optimisePath_(Activity * act, OptimiseStats & stats,
{
checkInterrupt();
struct stat st;
if (lstat(path.c_str(), &st))
throw SysError("getting attributes of path '%1%'", path);
auto st = lstat(path);
#if __APPLE__
/* HFS/macOS has some undocumented security feature disabling hardlinking for
@ -187,9 +183,7 @@ void LocalStore::optimisePath_(Activity * act, OptimiseStats & stats,
/* Yes! We've seen a file with the same contents. Replace the
current file with a hard link to that file. */
struct stat stLink;
if (lstat(linkPath.c_str(), &stLink))
throw SysError("getting attributes of path '%1%'", linkPath);
auto stLink = lstat(linkPath);
if (st.st_ino == stLink.st_ino) {
debug(format("'%1%' is already linked to '%2%'") % path % linkPath);
@ -282,21 +276,15 @@ void LocalStore::optimiseStore(OptimiseStats & stats)
}
}
static string showBytes(uint64_t bytes)
{
return (format("%.2f MiB") % (bytes / (1024.0 * 1024.0))).str();
}
void LocalStore::optimiseStore()
{
OptimiseStats stats;
optimiseStore(stats);
printInfo(
format("%1% freed by hard-linking %2% files")
% showBytes(stats.bytesFreed)
% stats.filesLinked);
printInfo("%s freed by hard-linking %d files",
showBytes(stats.bytesFreed),
stats.filesLinked);
}
void LocalStore::optimisePath(const Path & path)

View file

@ -1,3 +1,5 @@
#pragma once
#include "store-api.hh"
#include <nlohmann/json_fwd.hpp>

View file

@ -1,5 +1,6 @@
#include "profiles.hh"
#include "store-api.hh"
#include "local-fs-store.hh"
#include "util.hh"
#include <sys/types.h>
@ -39,13 +40,10 @@ std::pair<Generations, std::optional<GenerationNumber>> findGenerations(Path pro
for (auto & i : readDirectory(profileDir)) {
if (auto n = parseName(profileName, i.name)) {
auto path = profileDir + "/" + i.name;
struct stat st;
if (lstat(path.c_str(), &st) != 0)
throw SysError("statting '%1%'", path);
gens.push_back({
.number = *n,
.path = path,
.creationTime = st.st_mtime
.creationTime = lstat(path).st_mtime
});
}
}

View file

@ -12,59 +12,21 @@
#include "logging.hh"
#include "callback.hh"
#include <sys/types.h>
#include <sys/stat.h>
#include <sys/socket.h>
#include <sys/un.h>
#include <errno.h>
#include <fcntl.h>
#include <unistd.h>
#include <cstring>
namespace nix {
template<> StorePathSet readStorePaths(const Store & store, Source & from)
{
StorePathSet paths;
for (auto & i : readStrings<Strings>(from))
paths.insert(store.parseStorePath(i));
return paths;
}
void writeStorePaths(const Store & store, Sink & out, const StorePathSet & paths)
{
out << paths.size();
for (auto & i : paths)
out << store.printStorePath(i);
}
StorePathCAMap readStorePathCAMap(const Store & store, Source & from)
{
StorePathCAMap paths;
auto count = readNum<size_t>(from);
while (count--) {
auto path = store.parseStorePath(readString(from));
auto ca = parseContentAddressOpt(readString(from));
paths.insert_or_assign(path, ca);
}
return paths;
}
void writeStorePathCAMap(const Store & store, Sink & out, const StorePathCAMap & paths)
{
out << paths.size();
for (auto & i : paths) {
out << store.printStorePath(i.first);
out << renderContentAddress(i.second);
}
}
namespace worker_proto {
std::string read(const Store & store, Source & from, Phantom<std::string> _)
{
return readString(from);
}
void write(const Store & store, Sink & out, const std::string & str)
{
out << str;
}
StorePath read(const Store & store, Source & from, Phantom<StorePath> _)
{
return store.parseStorePath(readString(from));
@ -76,19 +38,39 @@ void write(const Store & store, Sink & out, const StorePath & storePath)
}
template<>
ContentAddress read(const Store & store, Source & from, Phantom<ContentAddress> _)
{
return parseContentAddress(readString(from));
}
void write(const Store & store, Sink & out, const ContentAddress & ca)
{
out << renderContentAddress(ca);
}
std::optional<StorePath> read(const Store & store, Source & from, Phantom<std::optional<StorePath>> _)
{
auto s = readString(from);
return s == "" ? std::optional<StorePath> {} : store.parseStorePath(s);
}
template<>
void write(const Store & store, Sink & out, const std::optional<StorePath> & storePathOpt)
{
out << (storePathOpt ? store.printStorePath(*storePathOpt) : "");
}
std::optional<ContentAddress> read(const Store & store, Source & from, Phantom<std::optional<ContentAddress>> _)
{
return parseContentAddressOpt(readString(from));
}
void write(const Store & store, Sink & out, const std::optional<ContentAddress> & caOpt)
{
out << (caOpt ? renderContentAddress(*caOpt) : "");
}
}
@ -133,69 +115,6 @@ ref<RemoteStore::Connection> RemoteStore::openConnectionWrapper()
}
UDSRemoteStore::UDSRemoteStore(const Params & params)
: StoreConfig(params)
, Store(params)
, LocalFSStore(params)
, RemoteStore(params)
{
}
UDSRemoteStore::UDSRemoteStore(
const std::string scheme,
std::string socket_path,
const Params & params)
: UDSRemoteStore(params)
{
path.emplace(socket_path);
}
std::string UDSRemoteStore::getUri()
{
if (path) {
return std::string("unix://") + *path;
} else {
return "daemon";
}
}
ref<RemoteStore::Connection> UDSRemoteStore::openConnection()
{
auto conn = make_ref<Connection>();
/* Connect to a daemon that does the privileged work for us. */
conn->fd = socket(PF_UNIX, SOCK_STREAM
#ifdef SOCK_CLOEXEC
| SOCK_CLOEXEC
#endif
, 0);
if (!conn->fd)
throw SysError("cannot create Unix domain socket");
closeOnExec(conn->fd.get());
string socketPath = path ? *path : settings.nixDaemonSocketFile;
struct sockaddr_un addr;
addr.sun_family = AF_UNIX;
if (socketPath.size() + 1 >= sizeof(addr.sun_path))
throw Error("socket path '%1%' is too long", socketPath);
strcpy(addr.sun_path, socketPath.c_str());
if (::connect(conn->fd.get(), (struct sockaddr *) &addr, sizeof(addr)) == -1)
throw SysError("cannot connect to daemon at '%1%'", socketPath);
conn->from.fd = conn->fd.get();
conn->to.fd = conn->fd.get();
conn->startTime = std::chrono::steady_clock::now();
return conn;
}
void RemoteStore::initConnection(Connection & conn)
{
/* Send the magic greeting, check for the reply. */
@ -307,6 +226,8 @@ struct ConnectionHandle
std::rethrow_exception(ex);
}
}
void withFramedSink(std::function<void(Sink & sink)> fun);
};
@ -335,9 +256,9 @@ StorePathSet RemoteStore::queryValidPaths(const StorePathSet & paths, Substitute
return res;
} else {
conn->to << wopQueryValidPaths;
writeStorePaths(*this, conn->to, paths);
worker_proto::write(*this, conn->to, paths);
conn.processStderr();
return readStorePaths<StorePathSet>(*this, conn->from);
return worker_proto::read(*this, conn->from, Phantom<StorePathSet> {});
}
}
@ -347,7 +268,7 @@ StorePathSet RemoteStore::queryAllValidPaths()
auto conn(getConnection());
conn->to << wopQueryAllValidPaths;
conn.processStderr();
return readStorePaths<StorePathSet>(*this, conn->from);
return worker_proto::read(*this, conn->from, Phantom<StorePathSet> {});
}
@ -364,9 +285,9 @@ StorePathSet RemoteStore::querySubstitutablePaths(const StorePathSet & paths)
return res;
} else {
conn->to << wopQuerySubstitutablePaths;
writeStorePaths(*this, conn->to, paths);
worker_proto::write(*this, conn->to, paths);
conn.processStderr();
return readStorePaths<StorePathSet>(*this, conn->from);
return worker_proto::read(*this, conn->from, Phantom<StorePathSet> {});
}
}
@ -388,7 +309,7 @@ void RemoteStore::querySubstitutablePathInfos(const StorePathCAMap & pathsMap, S
auto deriver = readString(conn->from);
if (deriver != "")
info.deriver = parseStorePath(deriver);
info.references = readStorePaths<StorePathSet>(*this, conn->from);
info.references = worker_proto::read(*this, conn->from, Phantom<StorePathSet> {});
info.downloadSize = readLongLong(conn->from);
info.narSize = readLongLong(conn->from);
infos.insert_or_assign(i.first, std::move(info));
@ -401,9 +322,9 @@ void RemoteStore::querySubstitutablePathInfos(const StorePathCAMap & pathsMap, S
StorePathSet paths;
for (auto & path : pathsMap)
paths.insert(path.first);
writeStorePaths(*this, conn->to, paths);
worker_proto::write(*this, conn->to, paths);
} else
writeStorePathCAMap(*this, conn->to, pathsMap);
worker_proto::write(*this, conn->to, pathsMap);
conn.processStderr();
size_t count = readNum<size_t>(conn->from);
for (size_t n = 0; n < count; n++) {
@ -411,7 +332,7 @@ void RemoteStore::querySubstitutablePathInfos(const StorePathCAMap & pathsMap, S
auto deriver = readString(conn->from);
if (deriver != "")
info.deriver = parseStorePath(deriver);
info.references = readStorePaths<StorePathSet>(*this, conn->from);
info.references = worker_proto::read(*this, conn->from, Phantom<StorePathSet> {});
info.downloadSize = readLongLong(conn->from);
info.narSize = readLongLong(conn->from);
}
@ -420,11 +341,28 @@ void RemoteStore::querySubstitutablePathInfos(const StorePathCAMap & pathsMap, S
}
ref<const ValidPathInfo> RemoteStore::readValidPathInfo(ConnectionHandle & conn, const StorePath & path)
{
auto deriver = readString(conn->from);
auto narHash = Hash::parseAny(readString(conn->from), htSHA256);
auto info = make_ref<ValidPathInfo>(path, narHash);
if (deriver != "") info->deriver = parseStorePath(deriver);
info->references = worker_proto::read(*this, conn->from, Phantom<StorePathSet> {});
conn->from >> info->registrationTime >> info->narSize;
if (GET_PROTOCOL_MINOR(conn->daemonVersion) >= 16) {
conn->from >> info->ultimate;
info->sigs = readStrings<StringSet>(conn->from);
info->ca = parseContentAddressOpt(readString(conn->from));
}
return info;
}
void RemoteStore::queryPathInfoUncached(const StorePath & path,
Callback<std::shared_ptr<const ValidPathInfo>> callback) noexcept
{
try {
std::shared_ptr<ValidPathInfo> info;
std::shared_ptr<const ValidPathInfo> info;
{
auto conn(getConnection());
conn->to << wopQueryPathInfo << printStorePath(path);
@ -440,17 +378,7 @@ void RemoteStore::queryPathInfoUncached(const StorePath & path,
bool valid; conn->from >> valid;
if (!valid) throw InvalidPath("path '%s' is not valid", printStorePath(path));
}
auto deriver = readString(conn->from);
auto narHash = Hash::parseAny(readString(conn->from), htSHA256);
info = std::make_shared<ValidPathInfo>(path, narHash);
if (deriver != "") info->deriver = parseStorePath(deriver);
info->references = readStorePaths<StorePathSet>(*this, conn->from);
conn->from >> info->registrationTime >> info->narSize;
if (GET_PROTOCOL_MINOR(conn->daemonVersion) >= 16) {
conn->from >> info->ultimate;
info->sigs = readStrings<StringSet>(conn->from);
info->ca = parseContentAddressOpt(readString(conn->from));
}
info = readValidPathInfo(conn, path);
}
callback(std::move(info));
} catch (...) { callback.rethrow(); }
@ -463,7 +391,7 @@ void RemoteStore::queryReferrers(const StorePath & path,
auto conn(getConnection());
conn->to << wopQueryReferrers << printStorePath(path);
conn.processStderr();
for (auto & i : readStorePaths<StorePathSet>(*this, conn->from))
for (auto & i : worker_proto::read(*this, conn->from, Phantom<StorePathSet> {}))
referrers.insert(i);
}
@ -473,7 +401,7 @@ StorePathSet RemoteStore::queryValidDerivers(const StorePath & path)
auto conn(getConnection());
conn->to << wopQueryValidDerivers << printStorePath(path);
conn.processStderr();
return readStorePaths<StorePathSet>(*this, conn->from);
return worker_proto::read(*this, conn->from, Phantom<StorePathSet> {});
}
@ -485,7 +413,7 @@ StorePathSet RemoteStore::queryDerivationOutputs(const StorePath & path)
}
conn->to << wopQueryDerivationOutputs << printStorePath(path);
conn.processStderr();
return readStorePaths<StorePathSet>(*this, conn->from);
return worker_proto::read(*this, conn->from, Phantom<StorePathSet> {});
}
@ -511,7 +439,6 @@ std::map<std::string, std::optional<StorePath>> RemoteStore::queryPartialDerivat
}
return ret;
}
}
std::optional<StorePath> RemoteStore::queryPathFromHashPart(const std::string & hashPart)
@ -525,6 +452,93 @@ std::optional<StorePath> RemoteStore::queryPathFromHashPart(const std::string &
}
ref<const ValidPathInfo> RemoteStore::addCAToStore(
Source & dump,
const string & name,
ContentAddressMethod caMethod,
const StorePathSet & references,
RepairFlag repair)
{
std::optional<ConnectionHandle> conn_(getConnection());
auto & conn = *conn_;
if (GET_PROTOCOL_MINOR(conn->daemonVersion) >= 25) {
conn->to
<< wopAddToStore
<< name
<< renderContentAddressMethod(caMethod);
worker_proto::write(*this, conn->to, references);
conn->to << repair;
conn.withFramedSink([&](Sink & sink) {
dump.drainInto(sink);
});
auto path = parseStorePath(readString(conn->from));
return readValidPathInfo(conn, path);
}
else {
if (repair) throw Error("repairing is not supported when building through the Nix daemon protocol < 1.25");
std::visit(overloaded {
[&](TextHashMethod thm) -> void {
std::string s = dump.drain();
conn->to << wopAddTextToStore << name << s;
worker_proto::write(*this, conn->to, references);
conn.processStderr();
},
[&](FixedOutputHashMethod fohm) -> void {
conn->to
<< wopAddToStore
<< name
<< ((fohm.hashType == htSHA256 && fohm.fileIngestionMethod == FileIngestionMethod::Recursive) ? 0 : 1) /* backwards compatibility hack */
<< (fohm.fileIngestionMethod == FileIngestionMethod::Recursive ? 1 : 0)
<< printHashType(fohm.hashType);
try {
conn->to.written = 0;
conn->to.warn = true;
connections->incCapacity();
{
Finally cleanup([&]() { connections->decCapacity(); });
if (fohm.fileIngestionMethod == FileIngestionMethod::Recursive) {
dump.drainInto(conn->to);
} else {
std::string contents = dump.drain();
dumpString(contents, conn->to);
}
}
conn->to.warn = false;
conn.processStderr();
} catch (SysError & e) {
/* Daemon closed while we were sending the path. Probably OOM
or I/O error. */
if (e.errNo == EPIPE)
try {
conn.processStderr();
} catch (EndOfFile & e) { }
throw;
}
}
}, caMethod);
auto path = parseStorePath(readString(conn->from));
// Release our connection to prevent a deadlock in queryPathInfo().
conn_.reset();
return queryPathInfo(path);
}
}
StorePath RemoteStore::addToStoreFromDump(Source & dump, const string & name,
FileIngestionMethod method, HashType hashType, RepairFlag repair)
{
StorePathSet references;
return addCAToStore(dump, name, FixedOutputHashMethod{ .fileIngestionMethod = method, .hashType = hashType }, references, repair)->path;
}
void RemoteStore::addToStore(const ValidPathInfo & info, Source & source,
RepairFlag repair, CheckSigsFlag checkSigs)
{
@ -540,7 +554,7 @@ void RemoteStore::addToStore(const ValidPathInfo & info, Source & source,
sink
<< exportMagic
<< printStorePath(info.path);
writeStorePaths(*this, sink, info.references);
worker_proto::write(*this, sink, info.references);
sink
<< (info.deriver ? printStorePath(*info.deriver) : "")
<< 0 // == no legacy signature
@ -550,7 +564,7 @@ void RemoteStore::addToStore(const ValidPathInfo & info, Source & source,
conn.processStderr(0, source2.get());
auto importedPaths = readStorePaths<StorePathSet>(*this, conn->from);
auto importedPaths = worker_proto::read(*this, conn->from, Phantom<StorePathSet> {});
assert(importedPaths.size() <= 1);
}
@ -559,84 +573,15 @@ void RemoteStore::addToStore(const ValidPathInfo & info, Source & source,
<< printStorePath(info.path)
<< (info.deriver ? printStorePath(*info.deriver) : "")
<< info.narHash.to_string(Base16, false);
writeStorePaths(*this, conn->to, info.references);
worker_proto::write(*this, conn->to, info.references);
conn->to << info.registrationTime << info.narSize
<< info.ultimate << info.sigs << renderContentAddress(info.ca)
<< repair << !checkSigs;
if (GET_PROTOCOL_MINOR(conn->daemonVersion) >= 23) {
conn->to.flush();
std::exception_ptr ex;
struct FramedSink : BufferedSink
{
ConnectionHandle & conn;
std::exception_ptr & ex;
FramedSink(ConnectionHandle & conn, std::exception_ptr & ex) : conn(conn), ex(ex)
{ }
~FramedSink()
{
try {
conn->to << 0;
conn->to.flush();
} catch (...) {
ignoreException();
}
}
void write(const unsigned char * data, size_t len) override
{
/* Don't send more data if the remote has
encountered an error. */
if (ex) {
auto ex2 = ex;
ex = nullptr;
std::rethrow_exception(ex2);
}
conn->to << len;
conn->to(data, len);
};
};
/* Handle log messages / exceptions from the remote on a
separate thread. */
std::thread stderrThread([&]()
{
try {
conn.processStderr(nullptr, nullptr, false);
} catch (...) {
ex = std::current_exception();
}
});
Finally joinStderrThread([&]()
{
if (stderrThread.joinable()) {
stderrThread.join();
if (ex) {
try {
std::rethrow_exception(ex);
} catch (...) {
ignoreException();
}
}
}
});
{
FramedSink sink(conn, ex);
conn.withFramedSink([&](Sink & sink) {
copyNAR(source, sink);
sink.flush();
}
stderrThread.join();
if (ex)
std::rethrow_exception(ex);
});
} else if (GET_PROTOCOL_MINOR(conn->daemonVersion) >= 21) {
conn.processStderr(0, &source);
} else {
@ -647,57 +592,11 @@ void RemoteStore::addToStore(const ValidPathInfo & info, Source & source,
}
StorePath RemoteStore::addToStore(const string & name, const Path & _srcPath,
FileIngestionMethod method, HashType hashAlgo, PathFilter & filter, RepairFlag repair)
{
if (repair) throw Error("repairing is not supported when building through the Nix daemon");
auto conn(getConnection());
Path srcPath(absPath(_srcPath));
conn->to
<< wopAddToStore
<< name
<< ((hashAlgo == htSHA256 && method == FileIngestionMethod::Recursive) ? 0 : 1) /* backwards compatibility hack */
<< (method == FileIngestionMethod::Recursive ? 1 : 0)
<< printHashType(hashAlgo);
try {
conn->to.written = 0;
conn->to.warn = true;
connections->incCapacity();
{
Finally cleanup([&]() { connections->decCapacity(); });
dumpPath(srcPath, conn->to, filter);
}
conn->to.warn = false;
conn.processStderr();
} catch (SysError & e) {
/* Daemon closed while we were sending the path. Probably OOM
or I/O error. */
if (e.errNo == EPIPE)
try {
conn.processStderr();
} catch (EndOfFile & e) { }
throw;
}
return parseStorePath(readString(conn->from));
}
StorePath RemoteStore::addTextToStore(const string & name, const string & s,
const StorePathSet & references, RepairFlag repair)
{
if (repair) throw Error("repairing is not supported when building through the Nix daemon");
auto conn(getConnection());
conn->to << wopAddTextToStore << name << s;
writeStorePaths(*this, conn->to, references);
conn.processStderr();
return parseStorePath(readString(conn->from));
StringSource source(s);
return addCAToStore(source, name, TextHashMethod{}, references, repair)->path;
}
@ -796,7 +695,7 @@ void RemoteStore::collectGarbage(const GCOptions & options, GCResults & results)
conn->to
<< wopCollectGarbage << options.action;
writeStorePaths(*this, conn->to, options.pathsToDelete);
worker_proto::write(*this, conn->to, options.pathsToDelete);
conn->to << options.ignoreLiveness
<< options.maxFreed
/* removed options */
@ -858,9 +757,9 @@ void RemoteStore::queryMissing(const std::vector<StorePathWithOutputs> & targets
ss.push_back(p.to_string(*this));
conn->to << ss;
conn.processStderr();
willBuild = readStorePaths<StorePathSet>(*this, conn->from);
willSubstitute = readStorePaths<StorePathSet>(*this, conn->from);
unknown = readStorePaths<StorePathSet>(*this, conn->from);
willBuild = worker_proto::read(*this, conn->from, Phantom<StorePathSet> {});
willSubstitute = worker_proto::read(*this, conn->from, Phantom<StorePathSet> {});
unknown = worker_proto::read(*this, conn->from, Phantom<StorePathSet> {});
conn->from >> downloadSize >> narSize;
return;
}
@ -953,10 +852,14 @@ std::exception_ptr RemoteStore::Connection::processStderr(Sink * sink, Source *
}
else if (msg == STDERR_ERROR) {
if (GET_PROTOCOL_MINOR(daemonVersion) >= 26) {
return std::make_exception_ptr(readError(from));
} else {
string error = readString(from);
unsigned int status = readInt(from);
return std::make_exception_ptr(Error(status, error));
}
}
else if (msg == STDERR_NEXT)
printError(chomp(readString(from)));
@ -993,6 +896,47 @@ std::exception_ptr RemoteStore::Connection::processStderr(Sink * sink, Source *
return nullptr;
}
static RegisterStoreImplementation<UDSRemoteStore, UDSRemoteStoreConfig> regStore;
void ConnectionHandle::withFramedSink(std::function<void(Sink &sink)> fun)
{
(*this)->to.flush();
std::exception_ptr ex;
/* Handle log messages / exceptions from the remote on a
separate thread. */
std::thread stderrThread([&]()
{
try {
processStderr(nullptr, nullptr, false);
} catch (...) {
ex = std::current_exception();
}
});
Finally joinStderrThread([&]()
{
if (stderrThread.joinable()) {
stderrThread.join();
if (ex) {
try {
std::rethrow_exception(ex);
} catch (...) {
ignoreException();
}
}
}
});
{
FramedSink sink((*this)->to, ex);
fun(sink);
sink.flush();
}
stderrThread.join();
if (ex)
std::rethrow_exception(ex);
}
}

View file

@ -63,13 +63,21 @@ public:
void querySubstitutablePathInfos(const StorePathCAMap & paths,
SubstitutablePathInfos & infos) override;
/* Add a content-addressable store path. `dump` will be drained. */
ref<const ValidPathInfo> addCAToStore(
Source & dump,
const string & name,
ContentAddressMethod caMethod,
const StorePathSet & references,
RepairFlag repair);
/* Add a content-addressable store path. Does not support references. `dump` will be drained. */
StorePath addToStoreFromDump(Source & dump, const string & name,
FileIngestionMethod method = FileIngestionMethod::Recursive, HashType hashAlgo = htSHA256, RepairFlag repair = NoRepair) override;
void addToStore(const ValidPathInfo & info, Source & nar,
RepairFlag repair, CheckSigsFlag checkSigs) override;
StorePath addToStore(const string & name, const Path & srcPath,
FileIngestionMethod method = FileIngestionMethod::Recursive, HashType hashAlgo = htSHA256,
PathFilter & filter = defaultPathFilter, RepairFlag repair = NoRepair) override;
StorePath addTextToStore(const string & name, const string & s,
const StorePathSet & references, RepairFlag repair) override;
@ -139,55 +147,13 @@ protected:
virtual void narFromPath(const StorePath & path, Sink & sink) override;
ref<const ValidPathInfo> readValidPathInfo(ConnectionHandle & conn, const StorePath & path);
private:
std::atomic_bool failed{false};
};
struct UDSRemoteStoreConfig : virtual LocalFSStoreConfig, virtual RemoteStoreConfig
{
UDSRemoteStoreConfig(const Store::Params & params)
: StoreConfig(params)
, LocalFSStoreConfig(params)
, RemoteStoreConfig(params)
{
}
UDSRemoteStoreConfig()
: UDSRemoteStoreConfig(Store::Params({}))
{
}
const std::string name() override { return "Local Daemon Store"; }
};
class UDSRemoteStore : public LocalFSStore, public RemoteStore, public virtual UDSRemoteStoreConfig
{
public:
UDSRemoteStore(const Params & params);
UDSRemoteStore(const std::string scheme, std::string path, const Params & params);
std::string getUri() override;
static std::set<std::string> uriSchemes()
{ return {"unix"}; }
bool sameMachine() override
{ return true; }
ref<FSAccessor> getFSAccessor() override
{ return LocalFSStore::getFSAccessor(); }
void narFromPath(const StorePath & path, Sink & sink) override
{ LocalFSStore::narFromPath(path, sink); }
private:
ref<RemoteStore::Connection> openConnection() override;
std::optional<std::string> path;
};
}

View file

@ -439,7 +439,7 @@ struct S3BinaryCacheStoreImpl : public S3BinaryCacheStore, virtual S3BinaryCache
};
static RegisterStoreImplementation<S3BinaryCacheStoreImpl, S3BinaryCacheStoreConfig> regStore;
static RegisterStoreImplementation<S3BinaryCacheStoreImpl, S3BinaryCacheStoreConfig> regS3BinaryCacheStore;
}

View file

@ -83,6 +83,6 @@ ref<RemoteStore::Connection> SSHStore::openConnection()
return conn;
}
static RegisterStoreImplementation<SSHStore, SSHStoreConfig> regStore;
static RegisterStoreImplementation<SSHStore, SSHStoreConfig> regSSHStore;
}

View file

@ -1028,7 +1028,7 @@ Derivation Store::readDerivation(const StorePath & drvPath)
#include "local-store.hh"
#include "remote-store.hh"
#include "uds-remote-store.hh"
namespace nix {

View file

@ -194,6 +194,8 @@ struct StoreConfig : public Config
*/
StoreConfig() { assert(false); }
virtual ~StoreConfig() { }
virtual const std::string name() = 0;
const PathSetting storeDir_{this, false, settings.nixStore,
@ -451,13 +453,12 @@ public:
/* Like addToStore(), but the contents of the path are contained
in `dump', which is either a NAR serialisation (if recursive ==
true) or simply the contents of a regular file (if recursive ==
false). */
false).
`dump` may be drained */
// FIXME: remove?
virtual StorePath addToStoreFromDump(Source & dump, const string & name,
FileIngestionMethod method = FileIngestionMethod::Recursive, HashType hashAlgo = htSHA256, RepairFlag repair = NoRepair)
{
throw Error("addToStoreFromDump() is not supported by this store");
}
{ unsupported("addToStoreFromDump"); }
/* Like addToStore, but the contents written to the output path is
a regular file containing the given string. */
@ -480,8 +481,38 @@ public:
BuildMode buildMode = bmNormal);
/* Build a single non-materialized derivation (i.e. not from an
on-disk .drv file). Note that drvPath is only used for
informational purposes. */
on-disk .drv file).
drvPath is used to deduplicate worker goals so it is imperative that
is correct. That said, it doesn't literally need to be store path that
would be calculated from writing this derivation to the store: it is OK
if it instead is that of a Derivation which would resolve to this (by
taking the outputs of it's input derivations and adding them as input
sources) such that the build time referenceable-paths are the same.
In the input-addressed case, we usually *do* use an "original"
unresolved derivations's path, as that is what will be used in the
`buildPaths` case. Also, the input-addressed output paths are verified
only by that contents of that specific unresolved derivation, so it is
nice to keep that information around so if the original derivation is
ever obtained later, it can be verified whether the trusted user in fact
used the proper output path.
In the content-addressed case, we want to always use the
resolved drv path calculated from the provided derivation. This serves
two purposes:
- It keeps the operation trustless, by ruling out a maliciously
invalid drv path corresponding to a non-resolution-equivalent
derivation.
- For the floating case in particular, it ensures that the derivation
to output mapping respects the resolution equivalence relation, so
one cannot choose different resolution-equivalent derivations to
subvert dependency coherence (i.e. the property that one doesn't end
up with multiple different versions of dependencies without
explicitly choosing to allow it).
*/
virtual BuildResult buildDerivation(const StorePath & drvPath, const BasicDerivation & drv,
BuildMode buildMode = bmNormal) = 0;
@ -518,7 +549,7 @@ public:
- The collector isn't running, or it's just started but hasn't
acquired the GC lock yet. In that case we get and release
the lock right away, then exit. The collector scans the
permanent root and sees our's.
permanent root and sees ours.
In either case the permanent root is seen by the collector. */
virtual void syncWithGC() { };
@ -686,47 +717,6 @@ protected:
};
struct LocalFSStoreConfig : virtual StoreConfig
{
using StoreConfig::StoreConfig;
// FIXME: the (StoreConfig*) cast works around a bug in gcc that causes
// it to omit the call to the Setting constructor. Clang works fine
// either way.
const PathSetting rootDir{(StoreConfig*) this, true, "",
"root", "directory prefixed to all other paths"};
const PathSetting stateDir{(StoreConfig*) this, false,
rootDir != "" ? rootDir + "/nix/var/nix" : settings.nixStateDir,
"state", "directory where Nix will store state"};
const PathSetting logDir{(StoreConfig*) this, false,
rootDir != "" ? rootDir + "/nix/var/log/nix" : settings.nixLogDir,
"log", "directory where Nix will store state"};
};
class LocalFSStore : public virtual Store, public virtual LocalFSStoreConfig
{
public:
const static string drvsLogDir;
LocalFSStore(const Params & params);
void narFromPath(const StorePath & path, Sink & sink) override;
ref<FSAccessor> getFSAccessor() override;
/* Register a permanent GC root. */
Path addPermRoot(const StorePath & storePath, const Path & gcRoot);
virtual Path getRealStoreDir() { return storeDir; }
Path toRealPath(const Path & storePath) override
{
assert(isInStore(storePath));
return getRealStoreDir() + "/" + std::string(storePath, storeDir.size() + 1);
}
std::shared_ptr<std::string> getBuildLog(const StorePath & path) override;
};
/* Copy a path from one store to another. */
void copyStorePath(ref<Store> srcStore, ref<Store> dstStore,
@ -803,6 +793,7 @@ struct StoreFactory
std::function<std::shared_ptr<Store> (const std::string & scheme, const std::string & uri, const Store::Params & params)> create;
std::function<std::shared_ptr<StoreConfig> ()> getConfig;
};
struct Implementations
{
static std::vector<StoreFactory> * registered;

View file

@ -0,0 +1,81 @@
#include "uds-remote-store.hh"
#include <sys/types.h>
#include <sys/stat.h>
#include <sys/socket.h>
#include <sys/un.h>
#include <errno.h>
#include <fcntl.h>
#include <unistd.h>
#include <cstring>
namespace nix {
UDSRemoteStore::UDSRemoteStore(const Params & params)
: StoreConfig(params)
, Store(params)
, LocalFSStore(params)
, RemoteStore(params)
{
}
UDSRemoteStore::UDSRemoteStore(
const std::string scheme,
std::string socket_path,
const Params & params)
: UDSRemoteStore(params)
{
path.emplace(socket_path);
}
std::string UDSRemoteStore::getUri()
{
if (path) {
return std::string("unix://") + *path;
} else {
return "daemon";
}
}
ref<RemoteStore::Connection> UDSRemoteStore::openConnection()
{
auto conn = make_ref<Connection>();
/* Connect to a daemon that does the privileged work for us. */
conn->fd = socket(PF_UNIX, SOCK_STREAM
#ifdef SOCK_CLOEXEC
| SOCK_CLOEXEC
#endif
, 0);
if (!conn->fd)
throw SysError("cannot create Unix domain socket");
closeOnExec(conn->fd.get());
string socketPath = path ? *path : settings.nixDaemonSocketFile;
struct sockaddr_un addr;
addr.sun_family = AF_UNIX;
if (socketPath.size() + 1 >= sizeof(addr.sun_path))
throw Error("socket path '%1%' is too long", socketPath);
strcpy(addr.sun_path, socketPath.c_str());
if (::connect(conn->fd.get(), (struct sockaddr *) &addr, sizeof(addr)) == -1)
throw SysError("cannot connect to daemon at '%1%'", socketPath);
conn->from.fd = conn->fd.get();
conn->to.fd = conn->fd.get();
conn->startTime = std::chrono::steady_clock::now();
return conn;
}
static RegisterStoreImplementation<UDSRemoteStore, UDSRemoteStoreConfig> regUDSRemoteStore;
}

View file

@ -0,0 +1,52 @@
#pragma once
#include "remote-store.hh"
#include "local-fs-store.hh"
namespace nix {
struct UDSRemoteStoreConfig : virtual LocalFSStoreConfig, virtual RemoteStoreConfig
{
UDSRemoteStoreConfig(const Store::Params & params)
: StoreConfig(params)
, LocalFSStoreConfig(params)
, RemoteStoreConfig(params)
{
}
UDSRemoteStoreConfig()
: UDSRemoteStoreConfig(Store::Params({}))
{
}
const std::string name() override { return "Local Daemon Store"; }
};
class UDSRemoteStore : public LocalFSStore, public RemoteStore, public virtual UDSRemoteStoreConfig
{
public:
UDSRemoteStore(const Params & params);
UDSRemoteStore(const std::string scheme, std::string path, const Params & params);
std::string getUri() override;
static std::set<std::string> uriSchemes()
{ return {"unix"}; }
bool sameMachine() override
{ return true; }
ref<FSAccessor> getFSAccessor() override
{ return LocalFSStore::getFSAccessor(); }
void narFromPath(const StorePath & path, Sink & sink) override
{ LocalFSStore::narFromPath(path, sink); }
private:
ref<RemoteStore::Connection> openConnection() override;
std::optional<std::string> path;
};
}

View file

@ -6,7 +6,7 @@ namespace nix {
#define WORKER_MAGIC_1 0x6e697863
#define WORKER_MAGIC_2 0x6478696f
#define PROTOCOL_VERSION 0x118
#define PROTOCOL_VERSION 0x11a
#define GET_PROTOCOL_MAJOR(x) ((x) & 0xff00)
#define GET_PROTOCOL_MINOR(x) ((x) & 0x00ff)
@ -18,7 +18,7 @@ typedef enum {
wopQueryReferences = 5, // obsolete
wopQueryReferrers = 6,
wopAddToStore = 7,
wopAddTextToStore = 8,
wopAddTextToStore = 8, // obsolete since 1.25, Nix 3.0. Use wopAddToStore
wopBuildPaths = 9,
wopEnsurePath = 10,
wopAddTempRoot = 11,
@ -66,10 +66,6 @@ typedef enum {
class Store;
struct Source;
template<class T> T readStorePaths(const Store & store, Source & from);
void writeStorePaths(const Store & store, Sink & out, const StorePathSet & paths);
/* To guide overloading */
template<typename T>
struct Phantom {};
@ -78,76 +74,81 @@ struct Phantom {};
namespace worker_proto {
/* FIXME maybe move more stuff inside here */
StorePath read(const Store & store, Source & from, Phantom<StorePath> _);
void write(const Store & store, Sink & out, const StorePath & storePath);
#define MAKE_WORKER_PROTO(TEMPLATE, T) \
TEMPLATE T read(const Store & store, Source & from, Phantom< T > _); \
TEMPLATE void write(const Store & store, Sink & out, const T & str)
template<typename T>
std::map<std::string, T> read(const Store & store, Source & from, Phantom<std::map<std::string, T>> _);
template<typename T>
void write(const Store & store, Sink & out, const std::map<string, T> & resMap);
template<typename T>
std::optional<T> read(const Store & store, Source & from, Phantom<std::optional<T>> _);
template<typename T>
void write(const Store & store, Sink & out, const std::optional<T> & optVal);
MAKE_WORKER_PROTO(, std::string);
MAKE_WORKER_PROTO(, StorePath);
MAKE_WORKER_PROTO(, ContentAddress);
/* Specialization which uses and empty string for the empty case, taking
advantage of the fact StorePaths always serialize to a non-empty string.
This is done primarily for backwards compatability, so that StorePath <=
std::optional<StorePath>, where <= is the compatability partial order.
MAKE_WORKER_PROTO(template<typename T>, std::set<T>);
#define X_ template<typename K, typename V>
#define Y_ std::map<K, V>
MAKE_WORKER_PROTO(X_, Y_);
#undef X_
#undef Y_
/* These use the empty string for the null case, relying on the fact
that the underlying types never serialize to the empty string.
We do this instead of a generic std::optional<T> instance because
ordinal tags (0 or 1, here) are a bit of a compatability hazard. For
the same reason, we don't have a std::variant<T..> instances (ordinal
tags 0...n).
We could the generic instances and then these as specializations for
compatability, but that's proven a bit finnicky, and also makes the
worker protocol harder to implement in other languages where such
specializations may not be allowed.
*/
template<>
void write(const Store & store, Sink & out, const std::optional<StorePath> & optVal);
MAKE_WORKER_PROTO(, std::optional<StorePath>);
MAKE_WORKER_PROTO(, std::optional<ContentAddress>);
template<typename T>
std::map<std::string, T> read(const Store & store, Source & from, Phantom<std::map<std::string, T>> _)
std::set<T> read(const Store & store, Source & from, Phantom<std::set<T>> _)
{
std::map<string, T> resMap;
auto size = (size_t)readInt(from);
std::set<T> resSet;
auto size = readNum<size_t>(from);
while (size--) {
auto thisKey = readString(from);
resMap.insert_or_assign(std::move(thisKey), nix::worker_proto::read(store, from, Phantom<T> {}));
resSet.insert(read(store, from, Phantom<T> {}));
}
return resSet;
}
template<typename T>
void write(const Store & store, Sink & out, const std::set<T> & resSet)
{
out << resSet.size();
for (auto & key : resSet) {
write(store, out, key);
}
}
template<typename K, typename V>
std::map<K, V> read(const Store & store, Source & from, Phantom<std::map<K, V>> _)
{
std::map<K, V> resMap;
auto size = readNum<size_t>(from);
while (size--) {
auto k = read(store, from, Phantom<K> {});
auto v = read(store, from, Phantom<V> {});
resMap.insert_or_assign(std::move(k), std::move(v));
}
return resMap;
}
template<typename T>
void write(const Store & store, Sink & out, const std::map<string, T> & resMap)
template<typename K, typename V>
void write(const Store & store, Sink & out, const std::map<K, V> & resMap)
{
out << resMap.size();
for (auto & i : resMap) {
out << i.first;
nix::worker_proto::write(store, out, i.second);
write(store, out, i.first);
write(store, out, i.second);
}
}
template<typename T>
std::optional<T> read(const Store & store, Source & from, Phantom<std::optional<T>> _)
{
auto tag = readNum<uint8_t>(from);
switch (tag) {
case 0:
return std::nullopt;
case 1:
return nix::worker_proto::read(store, from, Phantom<T> {});
default:
throw Error("got an invalid tag bit for std::optional: %#04x", tag);
}
}
template<typename T>
void write(const Store & store, Sink & out, const std::optional<T> & optVal)
{
out << (optVal ? 1 : 0);
if (optVal)
nix::worker_proto::write(store, out, *optVal);
}
}
StorePathCAMap readStorePathCAMap(const Store & store, Source & from);
void writeStorePathCAMap(const Store & store, Sink & out, const StorePathCAMap & paths);
}

View file

@ -27,11 +27,13 @@ struct ArchiveSettings : Config
#endif
"use-case-hack",
"Whether to enable a Darwin-specific hack for dealing with file name collisions."};
Setting<bool> preallocateContents{this, true, "preallocate-contents",
"Whether to preallocate files when writing objects with known size."};
};
static ArchiveSettings archiveSettings;
static GlobalConfig::Register r1(&archiveSettings);
static GlobalConfig::Register rArchiveSettings(&archiveSettings);
const std::string narVersionMagic1 = "nix-archive-1";
@ -66,9 +68,7 @@ static void dump(const Path & path, Sink & sink, PathFilter & filter)
{
checkInterrupt();
struct stat st;
if (lstat(path.c_str(), &st))
throw SysError("getting attributes of path '%1%'", path);
auto st = lstat(path);
sink << "(";
@ -325,6 +325,9 @@ struct RestoreSink : ParseSink
void preallocateContents(uint64_t len)
{
if (!archiveSettings.preallocateContents)
return;
#if HAVE_POSIX_FALLOCATE
if (len) {
errno = posix_fallocate(fd.get(), 0, len);

View file

@ -17,8 +17,20 @@ void Args::addFlag(Flag && flag_)
if (flag->shortName) shortFlags[flag->shortName] = flag;
}
void Completions::add(std::string completion, std::string description)
{
assert(description.find('\n') == std::string::npos);
insert(Completion {
.completion = completion,
.description = description
});
}
bool Completion::operator<(const Completion & other) const
{ return completion < other.completion || (completion == other.completion && description < other.description); }
bool pathCompletions = false;
std::shared_ptr<std::set<std::string>> completions;
std::shared_ptr<Completions> completions;
std::string completionMarker = "___COMPLETE___";
@ -148,7 +160,7 @@ bool Args::processFlag(Strings::iterator & pos, Strings::iterator end)
for (auto & [name, flag] : longFlags) {
if (!hiddenCategories.count(flag->category)
&& hasPrefix(name, std::string(*prefix, 2)))
completions->insert("--" + name);
completions->add("--" + name, flag->description);
}
}
auto i = longFlags.find(string(*pos, 2));
@ -165,9 +177,9 @@ bool Args::processFlag(Strings::iterator & pos, Strings::iterator end)
if (auto prefix = needsCompletion(*pos)) {
if (prefix == "-") {
completions->insert("--");
for (auto & [flag, _] : shortFlags)
completions->insert(std::string("-") + flag);
completions->add("--");
for (auto & [flagName, flag] : shortFlags)
completions->add(std::string("-") + flagName, flag->description);
}
}
@ -248,7 +260,7 @@ static void hashTypeCompleter(size_t index, std::string_view prefix)
{
for (auto & type : hashTypes)
if (hasPrefix(type, prefix))
completions->insert(type);
completions->add(type);
}
Args::Flag Args::Flag::mkHashTypeFlag(std::string && longName, HashType * ht)
@ -277,7 +289,7 @@ Args::Flag Args::Flag::mkHashTypeOptFlag(std::string && longName, std::optional<
};
}
static void completePath(std::string_view prefix, bool onlyDirs)
static void _completePath(std::string_view prefix, bool onlyDirs)
{
pathCompletions = true;
glob_t globbuf;
@ -292,7 +304,7 @@ static void completePath(std::string_view prefix, bool onlyDirs)
auto st = lstat(globbuf.gl_pathv[i]);
if (!S_ISDIR(st.st_mode)) continue;
}
completions->insert(globbuf.gl_pathv[i]);
completions->add(globbuf.gl_pathv[i]);
}
globfree(&globbuf);
}
@ -300,12 +312,12 @@ static void completePath(std::string_view prefix, bool onlyDirs)
void completePath(size_t, std::string_view prefix)
{
completePath(prefix, false);
_completePath(prefix, false);
}
void completeDir(size_t, std::string_view prefix)
{
completePath(prefix, true);
_completePath(prefix, true);
}
Strings argvToStrings(int argc, char * * argv)
@ -385,7 +397,7 @@ MultiCommand::MultiCommand(const Commands & commands)
if (auto prefix = needsCompletion(s)) {
for (auto & [name, command] : commands)
if (hasPrefix(name, *prefix))
completions->insert(name);
completions->add(name);
}
auto i = commands.find(s);
if (i == commands.end())

View file

@ -192,7 +192,7 @@ public:
{
expectArgs({
.label = label,
.optional = true,
.optional = optional,
.handler = {dest}
});
}
@ -283,7 +283,17 @@ typedef std::vector<std::pair<std::string, std::string>> Table2;
void printTable(std::ostream & out, const Table2 & table);
extern std::shared_ptr<std::set<std::string>> completions;
struct Completion {
std::string completion;
std::string description;
bool operator<(const Completion & other) const;
};
class Completions : public std::set<Completion> {
public:
void add(std::string completion, std::string description = "");
};
extern std::shared_ptr<Completions> completions;
extern bool pathCompletions;
std::optional<std::string> needsCompletion(std::string_view s);

View file

@ -268,6 +268,26 @@ template<> std::string BaseSetting<StringSet>::to_string() const
return concatStringsSep(" ", value);
}
template<> void BaseSetting<StringMap>::set(const std::string & str)
{
auto kvpairs = tokenizeString<Strings>(str);
for (auto & s : kvpairs)
{
auto eq = s.find_first_of('=');
if (std::string::npos != eq)
value.emplace(std::string(s, 0, eq), std::string(s, eq + 1));
// else ignored
}
}
template<> std::string BaseSetting<StringMap>::to_string() const
{
Strings kvstrs;
std::transform(value.begin(), value.end(), back_inserter(kvstrs),
[&](auto kvpair){ return kvpair.first + "=" + kvpair.second; });
return concatStringsSep(" ", kvstrs);
}
template class BaseSetting<int>;
template class BaseSetting<unsigned int>;
template class BaseSetting<long>;
@ -278,6 +298,7 @@ template class BaseSetting<bool>;
template class BaseSetting<std::string>;
template class BaseSetting<Strings>;
template class BaseSetting<StringSet>;
template class BaseSetting<StringMap>;
void PathSetting::set(const std::string & str)
{

View file

@ -107,7 +107,7 @@ struct Trace {
struct ErrorInfo {
Verbosity level;
string name;
string description;
string description; // FIXME: remove? it seems to be barely used
std::optional<hintformat> hint;
std::optional<ErrPos> errPos;
std::list<Trace> traces;
@ -169,7 +169,7 @@ public:
#endif
const string & msg() const { return calcWhat(); }
const ErrorInfo & info() { calcWhat(); return err; }
const ErrorInfo & info() const { calcWhat(); return err; }
template<typename... Args>
BaseError & addTrace(std::optional<ErrPos> e, const string &fs, const Args & ... args)

View file

@ -112,6 +112,10 @@ public:
: fmt(hf.fmt)
{ }
hintformat(format && fmt)
: fmt(std::move(fmt))
{ }
template<class T>
hintformat & operator%(const T & value)
{

View file

@ -1,48 +0,0 @@
#include <exception>
#include <functional>
#include <mutex>
namespace nix {
/* A helper class for lazily-initialized variables.
Lazy<T> var([]() { return value; });
declares a variable of type T that is initialized to 'value' (in a
thread-safe way) on first use, that is, when var() is first
called. If the initialiser code throws an exception, then all
subsequent calls to var() will rethrow that exception. */
template<typename T>
class Lazy
{
typedef std::function<T()> Init;
Init init;
std::once_flag done;
T value;
std::exception_ptr ex;
public:
Lazy(Init init) : init(init)
{ }
const T & operator () ()
{
std::call_once(done, [&]() {
try {
value = init();
} catch (...) {
ex = std::current_exception();
}
});
if (ex) std::rethrow_exception(ex);
return value;
}
};
}

Some files were not shown because too many files have changed in this diff Show more