lix/scripts/download-from-binary-cache.pl.in
Eelco Dolstra 11800e6198 download-from-binary-cache: parallelise fetching of NAR info files
Getting substitute information using the binary cache substituter has
non-trivial latency overhead.  A package or NixOS system configuration
can have hundreds of dependencies, and in the worst case (when the
local info cache is empty) we have to do a separate HTTP request for
each of these.  If the ping time to the server is t, getting N info
files will take tN seconds; e.g., with a ping time of 0.1s to
nixos.org, sequentially downloading 1000 info files (a typical NixOS
config) will take at least 100 seconds.

To fix this problem, the binary cache substituter can now perform
requests in parallel.  This required changing the substituter
interface to support a function querySubstitutablePathInfos() that
queries multiple paths at the same time, and rewriting queryMissing()
to take advantage of parallelism.  (Due to local caching,
parallelising queryMissing() is sufficient for most use cases, since
it's almost always called before building a derivation and thus fills
the local info cache.)

For example, parallelism speeds up querying all 1056 paths in a
particular NixOS system configuration from 116s to 2.6s.  It works so
well because the eccentricity of the top-level derivation in the
dependency graph is only 9.  So we only need 10 round-trips (when
using an unlimited number of parallel connections) to get everything.

Currently we do a maximum of 150 parallel connections to the server.
Thus it's important that the binary cache server (e.g. nixos.org) has
a high connection limit.  Alternatively we could use HTTP pipelining,
but WWW::Curl doesn't support it and libcurl has a hard-coded limit of
5 requests per pipeline.
2012-07-06 19:08:20 -04:00

397 lines
12 KiB
Perl
Raw Blame History

This file contains ambiguous Unicode characters

This file contains Unicode characters that might be confused with other characters. If you think that this is intentional, you can safely ignore this warning. Use the Escape button to reveal them.

#! @perl@ -w @perlFlags@
use DBI;
use File::Basename;
use IO::Select;
use Nix::Config;
use Nix::Store;
use WWW::Curl::Easy;
use WWW::Curl::Multi;
use strict;
my @binaryCacheUrls = map { s/\/+$//; $_ } split(/ /, ($ENV{"NIX_BINARY_CACHES"} || ""));
my $maxParallelRequests = 150;
my ($dbh, $insertNAR, $queryNAR, $insertNegativeNAR, $queryNegativeNAR);
my %cacheIds;
my $curlm = WWW::Curl::Multi->new;
my $activeRequests = 0;
my $curlIdCount = 1;
my %requests;
my %scheduled;
my $caBundle = $ENV{"CURL_CA_BUNDLE"} || $ENV{"OPENSSL_X509_CERT_FILE"};
sub addRequest {
my ($storePath, $url) = @_;
my $curl = WWW::Curl::Easy->new;
my $curlId = $curlIdCount++;
$requests{$curlId} = { storePath => $storePath, url => $url, handle => $curl, content => "" };
$curl->setopt(CURLOPT_PRIVATE, $curlId);
$curl->setopt(CURLOPT_URL, $url);
$curl->setopt(CURLOPT_WRITEDATA, \$requests{$curlId}->{content});
$curl->setopt(CURLOPT_FOLLOWLOCATION, 1);
$curl->setopt(CURLOPT_CAINFO, $caBundle) if defined $caBundle;
if ($activeRequests >= $maxParallelRequests) {
$scheduled{$curlId} = 1;
} else {
$curlm->add_handle($curl);
$activeRequests++;
}
return $requests{$curlId};
}
sub processRequests {
while ($activeRequests) {
my ($rfds, $wfds, $efds) = $curlm->fdset();
#print STDERR "R = @{$rfds}, W = @{$wfds}, E = @{$efds}\n";
# Sleep until we can read or write some data.
if (scalar @{$rfds} + scalar @{$wfds} + scalar @{$efds} > 0) {
IO::Select->select(IO::Select->new(@{$rfds}), IO::Select->new(@{$wfds}), IO::Select->new(@{$efds}), 0.1);
}
if ($curlm->perform() != $activeRequests) {
while (my ($id, $result) = $curlm->info_read) {
if ($id) {
my $handle = $requests{$id}->{handle};
$requests{$id}->{result} = $result;
$requests{$id}->{httpStatus} = $handle->getinfo(CURLINFO_HTTP_CODE);
#print STDERR "\nRequest completed ($id, $result, $requests{$id}->{httpStatus})\n";
$activeRequests--;
delete $requests{$id}->{handle};
if (scalar(keys %scheduled) > 0) {
my $id2 = (keys %scheduled)[0];
$curlm->add_handle($requests{$id2}->{handle});
$activeRequests++;
delete $scheduled{$id2};
}
}
}
}
}
}
sub initCache {
my $dbPath = "$Nix::Config::stateDir/binary-cache-v1.sqlite";
# Open/create the database.
$dbh = DBI->connect("dbi:SQLite:dbname=$dbPath", "", "")
or die "cannot open database `$dbPath'";
$dbh->{RaiseError} = 1;
$dbh->{PrintError} = 0;
$dbh->do("pragma synchronous = off"); # we can always reproduce the cache
$dbh->do("pragma journal_mode = truncate");
# Initialise the database schema, if necessary.
$dbh->do(<<EOF);
create table if not exists BinaryCaches (
id integer primary key autoincrement not null,
url text unique not null
);
EOF
$dbh->do(<<EOF);
create table if not exists NARs (
cache integer not null,
storePath text not null,
url text not null,
compression text not null,
fileHash text,
fileSize integer,
narHash text,
narSize integer,
refs text,
deriver text,
system text,
timestamp integer not null,
primary key (cache, storePath),
foreign key (cache) references BinaryCaches(id) on delete cascade
);
EOF
$dbh->do(<<EOF);
create table if not exists NegativeNARs (
cache integer not null,
storePath text not null,
timestamp integer not null,
primary key (cache, storePath),
foreign key (cache) references BinaryCaches(id) on delete cascade
);
EOF
$insertNAR = $dbh->prepare(
"insert or replace into NARs(cache, storePath, url, compression, fileHash, fileSize, narHash, " .
"narSize, refs, deriver, system, timestamp) values (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)") or die;
$queryNAR = $dbh->prepare("select * from NARs where cache = ? and storePath = ?") or die;
$insertNegativeNAR = $dbh->prepare(
"insert or replace into NegativeNARs(cache, storePath, timestamp) values (?, ?, ?)") or die;
$queryNegativeNAR = $dbh->prepare("select 1 from NegativeNARs where cache = ? and storePath = ?") or die;
}
sub negativeHit {
my ($storePath, $binaryCacheUrl) = @_;
$queryNegativeNAR->execute(getCacheId($binaryCacheUrl), basename($storePath));
return @{$queryNegativeNAR->fetchall_arrayref()} != 0;
}
sub processNARInfo {
my ($storePath, $binaryCacheUrl, $request) = @_;
my $cacheId = getCacheId($binaryCacheUrl);
if ($request->{result} != 0 || $request->{httpStatus} != 200) {
if ($request->{httpStatus} != 404) {
print STDERR "could not download $request->{url} (" .
($request->{result} != 0 ? "Curl error $request->{result}" : "HTTP status $request->{httpStatus}") . ")\n";
} else {
$insertNegativeNAR->execute($cacheId, basename($storePath), time());
}
return undef;
}
my ($storePath2, $url, $fileHash, $fileSize, $narHash, $narSize, $deriver, $system);
my $compression = "bzip2";
my @refs;
foreach my $line (split "\n", $request->{content}) {
$line =~ /^(.*): (.*)$/ or return undef;
if ($1 eq "StorePath") { $storePath2 = $2; }
elsif ($1 eq "URL") { $url = $2; }
elsif ($1 eq "Compression") { $compression = $2; }
elsif ($1 eq "FileHash") { $fileHash = $2; }
elsif ($1 eq "FileSize") { $fileSize = int($2); }
elsif ($1 eq "NarHash") { $narHash = $2; }
elsif ($1 eq "NarSize") { $narSize = int($2); }
elsif ($1 eq "References") { @refs = split / /, $2; }
elsif ($1 eq "Deriver") { $deriver = $2; }
elsif ($1 eq "System") { $system = $2; }
}
return undef if $storePath ne $storePath2;
if ($storePath ne $storePath2 || !defined $url || !defined $narHash) {
print STDERR "bad NAR info file $request->{url}\n";
return undef;
}
# Cache the result.
$insertNAR->execute(
$cacheId, basename($storePath), $url, $compression, $fileHash, $fileSize,
$narHash, $narSize, join(" ", @refs), $deriver, $system, time());
return
{ url => $url
, compression => $compression
, fileHash => $fileHash
, fileSize => $fileSize
, narHash => $narHash
, narSize => $narSize
, refs => [ @refs ]
, deriver => $deriver
, system => $system
};
}
sub getCacheId {
my ($binaryCacheUrl) = @_;
my $cacheId = $cacheIds{$binaryCacheUrl};
return $cacheId if defined $cacheId;
# FIXME: not atomic.
my @res = @{$dbh->selectcol_arrayref("select id from BinaryCaches where url = ?", {}, $binaryCacheUrl)};
if (scalar @res == 1) {
$cacheId = $res[0];
} else {
$dbh->do("insert into BinaryCaches(url) values (?)",
{}, $binaryCacheUrl);
$cacheId = $dbh->last_insert_id("", "", "", "");
}
$cacheIds{$binaryCacheUrl} = $cacheId;
return $cacheId;
}
sub cachedGetInfoFrom {
my ($storePath, $pathHash, $binaryCacheUrl) = @_;
$queryNAR->execute(getCacheId($binaryCacheUrl), basename($storePath));
my $res = $queryNAR->fetchrow_hashref();
return undef unless defined $res;
return
{ url => $res->{url}
, compression => $res->{compression}
, fileHash => $res->{fileHash}
, fileSize => $res->{fileSize}
, narHash => $res->{narHash}
, narSize => $res->{narSize}
, refs => [ split " ", $res->{refs} ]
, deriver => $res->{deriver}
} if defined $res;
}
sub printInfo {
my ($storePath, $info) = @_;
print "$storePath\n";
print $info->{deriver} ? "$Nix::Config::storeDir/$info->{deriver}" : "", "\n";
print scalar @{$info->{refs}}, "\n";
print "$Nix::Config::storeDir/$_\n" foreach @{$info->{refs}};
print $info->{fileSize} || 0, "\n";
print $info->{narSize} || 0, "\n";
}
sub printInfoParallel {
my @paths = @_;
# First print all paths for which we have cached info.
my @left;
foreach my $storePath (@paths) {
my $pathHash = substr(basename($storePath), 0, 32);
my $found = 0;
foreach my $binaryCacheUrl (@binaryCacheUrls) {
my $info = cachedGetInfoFrom($storePath, $pathHash, $binaryCacheUrl);
if (defined $info) {
printInfo($storePath, $info);
$found = 1;
last;
}
}
push @left, $storePath if !$found;
}
return if scalar @left == 0;
foreach my $binaryCacheUrl (@binaryCacheUrls) {
my @left2;
%requests = ();
foreach my $storePath (@left) {
my $pathHash = substr(basename($storePath), 0, 32);
if (negativeHit($storePath, $binaryCacheUrl)) {
push @left2, $storePath;
next;
}
my $infoUrl = "$binaryCacheUrl/$pathHash.narinfo";
addRequest($storePath, $infoUrl);
}
processRequests;
foreach my $request (values %requests) {
my $info = processNARInfo($request->{storePath}, $binaryCacheUrl, $request);
if (defined $info) {
printInfo($request->{storePath}, $info);
} else {
push @left2, $request->{storePath};
}
}
@left = @left2;
}
}
sub downloadBinary {
my ($storePath) = @_;
my $pathHash = substr(basename($storePath), 0, 32);
cache: foreach my $binaryCacheUrl (@binaryCacheUrls) {
my $info = cachedGetInfoFrom($storePath, $pathHash, $binaryCacheUrl);
unless (defined $info) {
next if negativeHit($storePath, $binaryCacheUrl);
my $request = addRequest($storePath, "$binaryCacheUrl/$pathHash.narinfo");
processRequests;
$info = processNARInfo($storePath, $binaryCacheUrl, $request);
}
next unless defined $info;
my $decompressor;
if ($info->{compression} eq "bzip2") { $decompressor = "$Nix::Config::bzip2 -d"; }
elsif ($info->{compression} eq "xz") { $decompressor = "$Nix::Config::xz -d"; }
else {
print STDERR "unknown compression method $info->{compression}\n";
next;
}
print STDERR "\n*** Downloading $info->{url} into $storePath...\n";
if (system("$Nix::Config::curl --fail --location $binaryCacheUrl/$info->{url} | $decompressor | $Nix::Config::binDir/nix-store --restore $storePath") != 0) {
die "download of `$info->{url}' failed" . ($! ? ": $!" : "") . "\n" unless $? == 0;
next;
}
# The hash in the manifest can be either in base-16 or
# base-32. Handle both.
$info->{narHash} =~ /^sha256:(.*)$/ or die "invalid hash";
my $hash = $1;
my $hash2 = hashPath("sha256", 1, $storePath);
die "hash mismatch in downloaded path $storePath; expected $hash, got $hash2\n"
if $hash ne $hash2;
print STDERR "\n";
return 1;
}
return 0;
}
initCache();
if ($ARGV[0] eq "--query") {
while (<STDIN>) {
chomp;
my ($cmd, @args) = split " ", $_;
if ($cmd eq "have") {
my $storePath = <STDIN>; chomp $storePath;
# FIXME: want to give correct info here, but it's too slow.
print "0\n";
#my $info = getInfo($storePath);
#if (defined $info) { print "1\n"; } else { print "0\n"; }
}
elsif ($cmd eq "info") {
printInfoParallel(@args);
print "\n";
}
else { die "unknown command `$cmd'"; }
flush STDOUT;
}
}
elsif ($ARGV[0] eq "--substitute") {
my $storePath = $ARGV[1] or die;
if (!downloadBinary($storePath)) {
print STDERR "could not download $storePath from any binary cache\n";
}
}
else {
die;
}