2006-11-30 19:19:59 +00:00
|
|
|
#include "serialise.hh"
|
|
|
|
#include "util.hh"
|
|
|
|
|
2008-05-21 11:17:31 +00:00
|
|
|
#include <cstring>
|
2011-12-15 12:32:08 +00:00
|
|
|
#include <cerrno>
|
2017-01-16 21:24:29 +00:00
|
|
|
#include <memory>
|
2008-05-21 11:17:31 +00:00
|
|
|
|
2018-03-16 19:22:34 +00:00
|
|
|
#include <boost/coroutine2/coroutine.hpp>
|
|
|
|
|
2006-11-30 19:19:59 +00:00
|
|
|
|
|
|
|
namespace nix {
|
|
|
|
|
|
|
|
|
2020-12-02 13:00:43 +00:00
|
|
|
void BufferedSink::operator () (std::string_view data)
|
2006-11-30 19:19:59 +00:00
|
|
|
{
|
2020-12-02 13:00:43 +00:00
|
|
|
if (!buffer) buffer = decltype(buffer)(new char[bufSize]);
|
2015-07-19 23:16:16 +00:00
|
|
|
|
2020-12-02 13:00:43 +00:00
|
|
|
while (!data.empty()) {
|
2011-12-14 23:30:06 +00:00
|
|
|
/* Optimisation: bypass the buffer if the data exceeds the
|
2011-12-16 19:44:13 +00:00
|
|
|
buffer size. */
|
2020-12-02 13:00:43 +00:00
|
|
|
if (bufPos + data.size() >= bufSize) {
|
2011-12-16 19:44:13 +00:00
|
|
|
flush();
|
2020-12-02 13:00:43 +00:00
|
|
|
write(data);
|
2011-12-14 23:30:06 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
/* Otherwise, copy the bytes to the buffer. Flush the buffer
|
|
|
|
when it's full. */
|
2020-12-02 13:00:43 +00:00
|
|
|
size_t n = bufPos + data.size() > bufSize ? bufSize - bufPos : data.size();
|
|
|
|
memcpy(buffer.get() + bufPos, data.data(), n);
|
|
|
|
data.remove_prefix(n); bufPos += n;
|
2011-12-14 23:30:06 +00:00
|
|
|
if (bufPos == bufSize) flush();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2011-12-15 16:19:53 +00:00
|
|
|
void BufferedSink::flush()
|
2011-12-14 23:30:06 +00:00
|
|
|
{
|
2011-12-15 16:19:53 +00:00
|
|
|
if (bufPos == 0) return;
|
2011-12-16 15:45:42 +00:00
|
|
|
size_t n = bufPos;
|
|
|
|
bufPos = 0; // don't trigger the assert() in ~BufferedSink()
|
2020-12-02 13:00:43 +00:00
|
|
|
write({buffer.get(), n});
|
2011-12-16 15:45:42 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
FdSink::~FdSink()
|
|
|
|
{
|
|
|
|
try { flush(); } catch (...) { ignoreException(); }
|
2006-11-30 19:19:59 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2014-06-10 11:30:09 +00:00
|
|
|
size_t threshold = 256 * 1024 * 1024;
|
|
|
|
|
|
|
|
static void warnLargeDump()
|
|
|
|
{
|
2020-06-15 12:06:58 +00:00
|
|
|
logWarning({
|
2020-05-11 19:02:16 +00:00
|
|
|
.name = "Large path",
|
|
|
|
.description = "dumping very large path (> 256 MiB); this may run out of memory"
|
2020-06-15 12:06:58 +00:00
|
|
|
});
|
2014-06-10 11:30:09 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2020-12-02 13:00:43 +00:00
|
|
|
void FdSink::write(std::string_view data)
|
2011-12-15 16:19:53 +00:00
|
|
|
{
|
2020-12-02 13:00:43 +00:00
|
|
|
written += data.size();
|
2014-06-10 11:30:09 +00:00
|
|
|
static bool warned = false;
|
|
|
|
if (warn && !warned) {
|
|
|
|
if (written > threshold) {
|
|
|
|
warnLargeDump();
|
|
|
|
warned = true;
|
|
|
|
}
|
|
|
|
}
|
2016-02-24 10:39:56 +00:00
|
|
|
try {
|
2020-12-02 13:00:43 +00:00
|
|
|
writeFull(fd, data);
|
2016-02-24 10:39:56 +00:00
|
|
|
} catch (SysError & e) {
|
2018-02-13 11:05:25 +00:00
|
|
|
_good = false;
|
|
|
|
throw;
|
2016-02-24 10:39:56 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
bool FdSink::good()
|
|
|
|
{
|
|
|
|
return _good;
|
2011-12-15 16:19:53 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2020-12-02 13:10:56 +00:00
|
|
|
void Source::operator () (char * data, size_t len)
|
2011-12-16 19:44:13 +00:00
|
|
|
{
|
|
|
|
while (len) {
|
|
|
|
size_t n = read(data, len);
|
|
|
|
data += n; len -= n;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2020-09-13 12:39:11 +00:00
|
|
|
void Source::drainInto(Sink & sink)
|
2018-03-16 19:22:34 +00:00
|
|
|
{
|
|
|
|
std::string s;
|
2020-12-02 13:10:56 +00:00
|
|
|
std::vector<char> buf(8192);
|
2018-03-16 19:22:34 +00:00
|
|
|
while (true) {
|
|
|
|
size_t n;
|
|
|
|
try {
|
|
|
|
n = read(buf.data(), buf.size());
|
2020-12-02 13:10:56 +00:00
|
|
|
sink({buf.data(), n});
|
2018-03-16 19:22:34 +00:00
|
|
|
} catch (EndOfFile &) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
2020-09-13 12:39:11 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
std::string Source::drain()
|
|
|
|
{
|
|
|
|
StringSink s;
|
|
|
|
drainInto(s);
|
|
|
|
return *s.s;
|
2018-03-16 19:22:34 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2020-12-02 13:10:56 +00:00
|
|
|
size_t BufferedSource::read(char * data, size_t len)
|
2006-11-30 19:19:59 +00:00
|
|
|
{
|
2020-12-02 13:10:56 +00:00
|
|
|
if (!buffer) buffer = decltype(buffer)(new char[bufSize]);
|
2011-12-15 12:32:08 +00:00
|
|
|
|
2016-07-13 10:03:37 +00:00
|
|
|
if (!bufPosIn) bufPosIn = readUnbuffered(buffer.get(), bufSize);
|
2015-07-19 23:16:16 +00:00
|
|
|
|
2011-12-16 19:44:13 +00:00
|
|
|
/* Copy out the data in the buffer. */
|
|
|
|
size_t n = len > bufPosIn - bufPosOut ? bufPosIn - bufPosOut : len;
|
2016-07-13 10:03:37 +00:00
|
|
|
memcpy(data, buffer.get() + bufPosOut, n);
|
2011-12-16 19:44:13 +00:00
|
|
|
bufPosOut += n;
|
|
|
|
if (bufPosIn == bufPosOut) bufPosIn = bufPosOut = 0;
|
|
|
|
return n;
|
2006-11-30 19:19:59 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2013-06-07 13:02:14 +00:00
|
|
|
bool BufferedSource::hasData()
|
|
|
|
{
|
|
|
|
return bufPosOut < bufPosIn;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2020-12-02 13:10:56 +00:00
|
|
|
size_t FdSource::readUnbuffered(char * data, size_t len)
|
2011-12-15 16:19:53 +00:00
|
|
|
{
|
|
|
|
ssize_t n;
|
|
|
|
do {
|
|
|
|
checkInterrupt();
|
2020-12-02 13:10:56 +00:00
|
|
|
n = ::read(fd, data, len);
|
2011-12-15 16:19:53 +00:00
|
|
|
} while (n == -1 && errno == EINTR);
|
2016-02-24 10:39:56 +00:00
|
|
|
if (n == -1) { _good = false; throw SysError("reading from file"); }
|
|
|
|
if (n == 0) { _good = false; throw EndOfFile("unexpected end-of-file"); }
|
2016-02-26 15:16:08 +00:00
|
|
|
read += n;
|
2011-12-15 16:19:53 +00:00
|
|
|
return n;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2016-02-24 10:39:56 +00:00
|
|
|
bool FdSource::good()
|
|
|
|
{
|
|
|
|
return _good;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2020-12-02 13:10:56 +00:00
|
|
|
size_t StringSource::read(char * data, size_t len)
|
2011-12-16 19:44:13 +00:00
|
|
|
{
|
|
|
|
if (pos == s.size()) throw EndOfFile("end of string reached");
|
2020-12-02 13:10:56 +00:00
|
|
|
size_t n = s.copy(data, len, pos);
|
2011-12-16 19:44:13 +00:00
|
|
|
pos += n;
|
|
|
|
return n;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2018-07-31 14:35:05 +00:00
|
|
|
#if BOOST_VERSION >= 106300 && BOOST_VERSION < 106600
|
|
|
|
#error Coroutines are broken in this version of Boost!
|
|
|
|
#endif
|
|
|
|
|
2020-10-30 19:55:53 +00:00
|
|
|
/* A concrete datatype allow virtual dispatch of stack allocation methods. */
|
|
|
|
struct VirtualStackAllocator {
|
|
|
|
StackAllocator *allocator = StackAllocator::defaultAllocator;
|
|
|
|
|
|
|
|
boost::context::stack_context allocate() {
|
|
|
|
return allocator->allocate();
|
|
|
|
}
|
|
|
|
|
|
|
|
void deallocate(boost::context::stack_context sctx) {
|
|
|
|
allocator->deallocate(sctx);
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
|
|
/* This class reifies the default boost coroutine stack allocation strategy with
|
|
|
|
a virtual interface. */
|
|
|
|
class DefaultStackAllocator : public StackAllocator {
|
|
|
|
boost::coroutines2::default_stack stack;
|
|
|
|
|
|
|
|
boost::context::stack_context allocate() {
|
|
|
|
return stack.allocate();
|
|
|
|
}
|
|
|
|
|
|
|
|
void deallocate(boost::context::stack_context sctx) {
|
2020-11-10 03:24:55 +00:00
|
|
|
stack.deallocate(sctx);
|
2020-10-30 19:55:53 +00:00
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
static DefaultStackAllocator defaultAllocatorSingleton;
|
|
|
|
|
|
|
|
StackAllocator *StackAllocator::defaultAllocator = &defaultAllocatorSingleton;
|
|
|
|
|
|
|
|
|
2018-08-21 13:22:04 +00:00
|
|
|
std::unique_ptr<Source> sinkToSource(
|
2020-07-15 23:19:41 +00:00
|
|
|
std::function<void(Sink &)> fun,
|
2018-08-21 13:22:04 +00:00
|
|
|
std::function<void()> eof)
|
2018-03-16 19:22:34 +00:00
|
|
|
{
|
|
|
|
struct SinkToSource : Source
|
|
|
|
{
|
2020-07-15 23:19:41 +00:00
|
|
|
typedef boost::coroutines2::coroutine<std::string> coro_t;
|
2018-03-16 19:22:34 +00:00
|
|
|
|
2020-07-15 23:19:41 +00:00
|
|
|
std::function<void(Sink &)> fun;
|
2018-08-21 13:22:04 +00:00
|
|
|
std::function<void()> eof;
|
2019-02-12 12:43:32 +00:00
|
|
|
std::optional<coro_t::pull_type> coro;
|
2018-09-26 19:19:34 +00:00
|
|
|
bool started = false;
|
2018-03-16 19:22:34 +00:00
|
|
|
|
2020-07-15 23:19:41 +00:00
|
|
|
SinkToSource(std::function<void(Sink &)> fun, std::function<void()> eof)
|
2018-09-26 19:19:34 +00:00
|
|
|
: fun(fun), eof(eof)
|
2018-03-16 19:22:34 +00:00
|
|
|
{
|
|
|
|
}
|
|
|
|
|
2020-07-15 23:19:41 +00:00
|
|
|
std::string cur;
|
2018-03-16 19:22:34 +00:00
|
|
|
size_t pos = 0;
|
|
|
|
|
2020-12-02 13:10:56 +00:00
|
|
|
size_t read(char * data, size_t len) override
|
2018-03-16 19:22:34 +00:00
|
|
|
{
|
2020-07-15 23:19:41 +00:00
|
|
|
if (!coro)
|
2020-10-30 19:55:53 +00:00
|
|
|
coro = coro_t::pull_type(VirtualStackAllocator{}, [&](coro_t::push_type & yield) {
|
2020-12-02 13:00:43 +00:00
|
|
|
LambdaSink sink([&](std::string_view data) {
|
|
|
|
if (!data.empty()) yield(std::string(data));
|
|
|
|
});
|
2020-07-15 23:19:41 +00:00
|
|
|
fun(sink);
|
2018-09-26 19:19:34 +00:00
|
|
|
});
|
|
|
|
|
|
|
|
if (!*coro) { eof(); abort(); }
|
2018-03-16 19:22:34 +00:00
|
|
|
|
|
|
|
if (pos == cur.size()) {
|
2018-09-26 19:19:34 +00:00
|
|
|
if (!cur.empty()) (*coro)();
|
|
|
|
cur = coro->get();
|
2018-03-16 19:22:34 +00:00
|
|
|
pos = 0;
|
|
|
|
}
|
|
|
|
|
2020-07-15 23:19:41 +00:00
|
|
|
auto n = std::min(cur.size() - pos, len);
|
2020-12-02 13:10:56 +00:00
|
|
|
memcpy(data, cur.data() + pos, n);
|
2020-07-15 23:19:41 +00:00
|
|
|
pos += n;
|
2018-03-16 19:22:34 +00:00
|
|
|
|
2020-07-15 23:19:41 +00:00
|
|
|
return n;
|
2018-03-16 19:22:34 +00:00
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2018-08-21 13:22:04 +00:00
|
|
|
return std::make_unique<SinkToSource>(fun, eof);
|
2018-03-16 19:22:34 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2011-12-15 16:19:53 +00:00
|
|
|
void writePadding(size_t len, Sink & sink)
|
2006-11-30 19:19:59 +00:00
|
|
|
{
|
|
|
|
if (len % 8) {
|
2020-12-02 13:10:56 +00:00
|
|
|
char zero[8];
|
2006-11-30 19:19:59 +00:00
|
|
|
memset(zero, 0, sizeof(zero));
|
2020-12-02 13:10:56 +00:00
|
|
|
sink({zero, 8 - (len % 8)});
|
2006-11-30 19:19:59 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2020-12-02 13:00:43 +00:00
|
|
|
void writeString(std::string_view data, Sink & sink)
|
2006-11-30 19:19:59 +00:00
|
|
|
{
|
2020-12-02 13:00:43 +00:00
|
|
|
sink << data.size();
|
|
|
|
sink(data);
|
|
|
|
writePadding(data.size(), sink);
|
2006-11-30 19:19:59 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2015-07-19 23:16:16 +00:00
|
|
|
Sink & operator << (Sink & sink, const string & s)
|
2011-12-16 21:29:46 +00:00
|
|
|
{
|
2020-12-02 13:00:43 +00:00
|
|
|
writeString(s, sink);
|
2015-07-19 23:16:16 +00:00
|
|
|
return sink;
|
Allow remote builds without sending the derivation closure
Previously, to build a derivation remotely, we had to copy the entire
closure of the .drv file to the remote machine, even though we only
need the top-level derivation. This is very wasteful: the closure can
contain thousands of store paths, and in some Hydra use cases, include
source paths that are very large (e.g. Git/Mercurial checkouts).
So now there is a new operation, StoreAPI::buildDerivation(), that
performs a build from an in-memory representation of a derivation
(BasicDerivation) rather than from a on-disk .drv file. The only files
that need to be in the Nix store are the sources of the derivation
(drv.inputSrcs), and the needed output paths of the dependencies (as
described by drv.inputDrvs). "nix-store --serve" exposes this
interface.
Note that this is a privileged operation, because you can construct a
derivation that builds any store path whatsoever. Fixing this will
require changing the hashing scheme (i.e., the output paths should be
computed from the other fields in BasicDerivation, allowing them to be
verified without access to other derivations). However, this would be
quite nice because it would allow .drv-free building (e.g. "nix-env
-i" wouldn't have to write any .drv files to disk).
Fixes #173.
2015-07-17 15:57:40 +00:00
|
|
|
}
|
|
|
|
|
2011-12-16 21:29:46 +00:00
|
|
|
|
2011-12-16 22:31:25 +00:00
|
|
|
template<class T> void writeStrings(const T & ss, Sink & sink)
|
2006-11-30 22:43:55 +00:00
|
|
|
{
|
2015-07-19 23:16:16 +00:00
|
|
|
sink << ss.size();
|
2015-07-17 17:24:28 +00:00
|
|
|
for (auto & i : ss)
|
2015-07-19 23:16:16 +00:00
|
|
|
sink << i;
|
2006-11-30 22:43:55 +00:00
|
|
|
}
|
|
|
|
|
2015-07-19 23:16:16 +00:00
|
|
|
Sink & operator << (Sink & sink, const Strings & s)
|
Allow remote builds without sending the derivation closure
Previously, to build a derivation remotely, we had to copy the entire
closure of the .drv file to the remote machine, even though we only
need the top-level derivation. This is very wasteful: the closure can
contain thousands of store paths, and in some Hydra use cases, include
source paths that are very large (e.g. Git/Mercurial checkouts).
So now there is a new operation, StoreAPI::buildDerivation(), that
performs a build from an in-memory representation of a derivation
(BasicDerivation) rather than from a on-disk .drv file. The only files
that need to be in the Nix store are the sources of the derivation
(drv.inputSrcs), and the needed output paths of the dependencies (as
described by drv.inputDrvs). "nix-store --serve" exposes this
interface.
Note that this is a privileged operation, because you can construct a
derivation that builds any store path whatsoever. Fixing this will
require changing the hashing scheme (i.e., the output paths should be
computed from the other fields in BasicDerivation, allowing them to be
verified without access to other derivations). However, this would be
quite nice because it would allow .drv-free building (e.g. "nix-env
-i" wouldn't have to write any .drv files to disk).
Fixes #173.
2015-07-17 15:57:40 +00:00
|
|
|
{
|
2015-07-19 23:16:16 +00:00
|
|
|
writeStrings(s, sink);
|
|
|
|
return sink;
|
Allow remote builds without sending the derivation closure
Previously, to build a derivation remotely, we had to copy the entire
closure of the .drv file to the remote machine, even though we only
need the top-level derivation. This is very wasteful: the closure can
contain thousands of store paths, and in some Hydra use cases, include
source paths that are very large (e.g. Git/Mercurial checkouts).
So now there is a new operation, StoreAPI::buildDerivation(), that
performs a build from an in-memory representation of a derivation
(BasicDerivation) rather than from a on-disk .drv file. The only files
that need to be in the Nix store are the sources of the derivation
(drv.inputSrcs), and the needed output paths of the dependencies (as
described by drv.inputDrvs). "nix-store --serve" exposes this
interface.
Note that this is a privileged operation, because you can construct a
derivation that builds any store path whatsoever. Fixing this will
require changing the hashing scheme (i.e., the output paths should be
computed from the other fields in BasicDerivation, allowing them to be
verified without access to other derivations). However, this would be
quite nice because it would allow .drv-free building (e.g. "nix-env
-i" wouldn't have to write any .drv files to disk).
Fixes #173.
2015-07-17 15:57:40 +00:00
|
|
|
}
|
|
|
|
|
2015-07-19 23:16:16 +00:00
|
|
|
Sink & operator << (Sink & sink, const StringSet & s)
|
Allow remote builds without sending the derivation closure
Previously, to build a derivation remotely, we had to copy the entire
closure of the .drv file to the remote machine, even though we only
need the top-level derivation. This is very wasteful: the closure can
contain thousands of store paths, and in some Hydra use cases, include
source paths that are very large (e.g. Git/Mercurial checkouts).
So now there is a new operation, StoreAPI::buildDerivation(), that
performs a build from an in-memory representation of a derivation
(BasicDerivation) rather than from a on-disk .drv file. The only files
that need to be in the Nix store are the sources of the derivation
(drv.inputSrcs), and the needed output paths of the dependencies (as
described by drv.inputDrvs). "nix-store --serve" exposes this
interface.
Note that this is a privileged operation, because you can construct a
derivation that builds any store path whatsoever. Fixing this will
require changing the hashing scheme (i.e., the output paths should be
computed from the other fields in BasicDerivation, allowing them to be
verified without access to other derivations). However, this would be
quite nice because it would allow .drv-free building (e.g. "nix-env
-i" wouldn't have to write any .drv files to disk).
Fixes #173.
2015-07-17 15:57:40 +00:00
|
|
|
{
|
2015-07-19 23:16:16 +00:00
|
|
|
writeStrings(s, sink);
|
|
|
|
return sink;
|
Allow remote builds without sending the derivation closure
Previously, to build a derivation remotely, we had to copy the entire
closure of the .drv file to the remote machine, even though we only
need the top-level derivation. This is very wasteful: the closure can
contain thousands of store paths, and in some Hydra use cases, include
source paths that are very large (e.g. Git/Mercurial checkouts).
So now there is a new operation, StoreAPI::buildDerivation(), that
performs a build from an in-memory representation of a derivation
(BasicDerivation) rather than from a on-disk .drv file. The only files
that need to be in the Nix store are the sources of the derivation
(drv.inputSrcs), and the needed output paths of the dependencies (as
described by drv.inputDrvs). "nix-store --serve" exposes this
interface.
Note that this is a privileged operation, because you can construct a
derivation that builds any store path whatsoever. Fixing this will
require changing the hashing scheme (i.e., the output paths should be
computed from the other fields in BasicDerivation, allowing them to be
verified without access to other derivations). However, this would be
quite nice because it would allow .drv-free building (e.g. "nix-env
-i" wouldn't have to write any .drv files to disk).
Fixes #173.
2015-07-17 15:57:40 +00:00
|
|
|
}
|
|
|
|
|
2020-10-07 14:34:03 +00:00
|
|
|
Sink & operator << (Sink & sink, const Error & ex)
|
|
|
|
{
|
|
|
|
auto info = ex.info();
|
|
|
|
sink
|
|
|
|
<< "Error"
|
|
|
|
<< info.level
|
|
|
|
<< info.name
|
|
|
|
<< info.description
|
|
|
|
<< (info.hint ? info.hint->str() : "")
|
|
|
|
<< 0 // FIXME: info.errPos
|
|
|
|
<< info.traces.size();
|
|
|
|
for (auto & trace : info.traces) {
|
|
|
|
sink << 0; // FIXME: trace.pos
|
|
|
|
sink << trace.hint.str();
|
|
|
|
}
|
|
|
|
return sink;
|
|
|
|
}
|
|
|
|
|
2006-11-30 22:43:55 +00:00
|
|
|
|
2011-12-15 16:19:53 +00:00
|
|
|
void readPadding(size_t len, Source & source)
|
2006-11-30 19:19:59 +00:00
|
|
|
{
|
|
|
|
if (len % 8) {
|
2020-12-02 13:10:56 +00:00
|
|
|
char zero[8];
|
2011-12-15 16:19:53 +00:00
|
|
|
size_t n = 8 - (len % 8);
|
2006-11-30 19:19:59 +00:00
|
|
|
source(zero, n);
|
|
|
|
for (unsigned int i = 0; i < n; i++)
|
2009-03-22 17:36:43 +00:00
|
|
|
if (zero[i]) throw SerialisationError("non-zero padding");
|
2006-11-30 19:19:59 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2020-12-02 13:10:56 +00:00
|
|
|
size_t readString(char * buf, size_t max, Source & source)
|
2011-12-16 21:29:46 +00:00
|
|
|
{
|
2017-03-01 12:52:54 +00:00
|
|
|
auto len = readNum<size_t>(source);
|
2018-09-26 10:03:58 +00:00
|
|
|
if (len > max) throw SerialisationError("string is too long");
|
2011-12-16 21:29:46 +00:00
|
|
|
source(buf, len);
|
|
|
|
readPadding(len, source);
|
|
|
|
return len;
|
|
|
|
}
|
|
|
|
|
2015-07-19 23:16:16 +00:00
|
|
|
|
2018-09-26 10:03:58 +00:00
|
|
|
string readString(Source & source, size_t max)
|
2006-11-30 19:19:59 +00:00
|
|
|
{
|
2017-03-01 12:52:54 +00:00
|
|
|
auto len = readNum<size_t>(source);
|
2018-09-26 10:03:58 +00:00
|
|
|
if (len > max) throw SerialisationError("string is too long");
|
2017-03-01 13:54:11 +00:00
|
|
|
std::string res(len, 0);
|
2020-12-02 13:10:56 +00:00
|
|
|
source(res.data(), len);
|
2006-11-30 19:19:59 +00:00
|
|
|
readPadding(len, source);
|
2017-03-01 13:54:11 +00:00
|
|
|
return res;
|
2006-11-30 19:19:59 +00:00
|
|
|
}
|
|
|
|
|
Allow remote builds without sending the derivation closure
Previously, to build a derivation remotely, we had to copy the entire
closure of the .drv file to the remote machine, even though we only
need the top-level derivation. This is very wasteful: the closure can
contain thousands of store paths, and in some Hydra use cases, include
source paths that are very large (e.g. Git/Mercurial checkouts).
So now there is a new operation, StoreAPI::buildDerivation(), that
performs a build from an in-memory representation of a derivation
(BasicDerivation) rather than from a on-disk .drv file. The only files
that need to be in the Nix store are the sources of the derivation
(drv.inputSrcs), and the needed output paths of the dependencies (as
described by drv.inputDrvs). "nix-store --serve" exposes this
interface.
Note that this is a privileged operation, because you can construct a
derivation that builds any store path whatsoever. Fixing this will
require changing the hashing scheme (i.e., the output paths should be
computed from the other fields in BasicDerivation, allowing them to be
verified without access to other derivations). However, this would be
quite nice because it would allow .drv-free building (e.g. "nix-env
-i" wouldn't have to write any .drv files to disk).
Fixes #173.
2015-07-17 15:57:40 +00:00
|
|
|
Source & operator >> (Source & in, string & s)
|
|
|
|
{
|
|
|
|
s = readString(in);
|
|
|
|
return in;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2011-12-16 22:31:25 +00:00
|
|
|
template<class T> T readStrings(Source & source)
|
2006-11-30 22:43:55 +00:00
|
|
|
{
|
2017-03-01 12:52:54 +00:00
|
|
|
auto count = readNum<size_t>(source);
|
2011-12-16 22:31:25 +00:00
|
|
|
T ss;
|
2006-11-30 22:43:55 +00:00
|
|
|
while (count--)
|
2011-12-16 22:31:25 +00:00
|
|
|
ss.insert(ss.end(), readString(source));
|
2006-11-30 22:43:55 +00:00
|
|
|
return ss;
|
|
|
|
}
|
|
|
|
|
2011-12-16 22:31:25 +00:00
|
|
|
template Paths readStrings(Source & source);
|
|
|
|
template PathSet readStrings(Source & source);
|
|
|
|
|
2006-11-30 22:43:55 +00:00
|
|
|
|
2020-10-07 14:34:03 +00:00
|
|
|
Error readError(Source & source)
|
|
|
|
{
|
|
|
|
auto type = readString(source);
|
|
|
|
assert(type == "Error");
|
|
|
|
ErrorInfo info;
|
|
|
|
info.level = (Verbosity) readInt(source);
|
|
|
|
info.name = readString(source);
|
|
|
|
info.description = readString(source);
|
|
|
|
auto hint = readString(source);
|
|
|
|
if (hint != "") info.hint = hintformat(std::move(format("%s") % hint));
|
|
|
|
auto havePos = readNum<size_t>(source);
|
|
|
|
assert(havePos == 0);
|
|
|
|
auto nrTraces = readNum<size_t>(source);
|
|
|
|
for (size_t i = 0; i < nrTraces; ++i) {
|
|
|
|
havePos = readNum<size_t>(source);
|
|
|
|
assert(havePos == 0);
|
|
|
|
info.traces.push_back(Trace {
|
|
|
|
.hint = hintformat(std::move(format("%s") % readString(source)))
|
|
|
|
});
|
|
|
|
}
|
|
|
|
return Error(std::move(info));
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2020-12-02 13:00:43 +00:00
|
|
|
void StringSink::operator () (std::string_view data)
|
2014-06-10 11:30:09 +00:00
|
|
|
{
|
|
|
|
static bool warned = false;
|
2016-03-04 15:49:56 +00:00
|
|
|
if (!warned && s->size() > threshold) {
|
2014-06-10 11:30:09 +00:00
|
|
|
warnLargeDump();
|
|
|
|
warned = true;
|
|
|
|
}
|
2020-12-02 13:00:43 +00:00
|
|
|
s->append(data);
|
2014-06-10 11:30:09 +00:00
|
|
|
}
|
|
|
|
|
2020-12-02 13:10:56 +00:00
|
|
|
size_t ChainSource::read(char * data, size_t len)
|
2020-07-15 23:14:30 +00:00
|
|
|
{
|
|
|
|
if (useSecond) {
|
|
|
|
return source2.read(data, len);
|
|
|
|
} else {
|
|
|
|
try {
|
|
|
|
return source1.read(data, len);
|
|
|
|
} catch (EndOfFile &) {
|
|
|
|
useSecond = true;
|
|
|
|
return this->read(data, len);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2014-06-10 11:30:09 +00:00
|
|
|
|
2006-11-30 19:19:59 +00:00
|
|
|
}
|