* Parallelize mirror-channel.pl by forking several worker processes to do
the compression / uploading. git-svn-id: https://nixos.org/repos/nix/release/trunk/channels@34738 70bd8c7a-acb8-0310-9f0d-9cc1c95dcdbb
This commit is contained in:
parent
9ff5956af0
commit
b6d6feaef3
|
@ -10,6 +10,8 @@ use Nix::Store;
|
||||||
use File::Basename;
|
use File::Basename;
|
||||||
use File::stat;
|
use File::stat;
|
||||||
use Net::Amazon::S3;
|
use Net::Amazon::S3;
|
||||||
|
use List::MoreUtils qw(part);
|
||||||
|
use Forks::Super;
|
||||||
|
|
||||||
|
|
||||||
if (scalar @ARGV < 4 || scalar @ARGV > 6) {
|
if (scalar @ARGV < 4 || scalar @ARGV > 6) {
|
||||||
|
@ -19,6 +21,8 @@ if (scalar @ARGV < 4 || scalar @ARGV > 6) {
|
||||||
|
|
||||||
my $curl = "curl --location --silent --show-error --fail";
|
my $curl = "curl --location --silent --show-error --fail";
|
||||||
|
|
||||||
|
my $nrProcesses = 8;
|
||||||
|
|
||||||
my $srcChannelURL = $ARGV[0];
|
my $srcChannelURL = $ARGV[0];
|
||||||
my $dstChannelPath = $ARGV[1];
|
my $dstChannelPath = $ARGV[1];
|
||||||
my $bucketName = $ARGV[2];
|
my $bucketName = $ARGV[2];
|
||||||
|
@ -98,14 +102,15 @@ sub queryPathHash16 {
|
||||||
# to point to the mirror. Also fill in the size and hash fields in
|
# to point to the mirror. Also fill in the size and hash fields in
|
||||||
# the manifest in order to be compatible with Nix < 0.13.
|
# the manifest in order to be compatible with Nix < 0.13.
|
||||||
|
|
||||||
foreach my $storePath (permute(keys %narFiles)) {
|
sub mirrorStorePath {
|
||||||
|
my ($storePath) = @_;
|
||||||
my $nars = $narFiles{$storePath};
|
my $nars = $narFiles{$storePath};
|
||||||
die if scalar @{$nars} != 1;
|
die if scalar @{$nars} != 1;
|
||||||
my $nar = $$nars[0];
|
my $nar = $$nars[0];
|
||||||
my $pathHash = substr(basename($storePath), 0, 32);
|
my $pathHash = substr(basename($storePath), 0, 32);
|
||||||
my $narInfoFile = "$pathHash.narinfo";
|
my $narInfoFile = "$pathHash.narinfo";
|
||||||
|
|
||||||
print STDERR "checking $narInfoFile\n";
|
print STDERR "$$: checking $narInfoFile\n";
|
||||||
my $get = $bucket->get_key_filename("$pathHash.narinfo", "GET");
|
my $get = $bucket->get_key_filename("$pathHash.narinfo", "GET");
|
||||||
my $narInfo;
|
my $narInfo;
|
||||||
|
|
||||||
|
@ -117,7 +122,7 @@ foreach my $storePath (permute(keys %narFiles)) {
|
||||||
$nar->{narSize} = $narInfo->{narSize};
|
$nar->{narSize} = $narInfo->{narSize};
|
||||||
$nar->{url} = "$cacheURL/$narInfo->{url}";
|
$nar->{url} = "$cacheURL/$narInfo->{url}";
|
||||||
} else {
|
} else {
|
||||||
my $dstFileTmp = "/tmp/nar";
|
my $dstFileTmp = "/tmp/nar.$$";
|
||||||
my $ext;
|
my $ext;
|
||||||
|
|
||||||
if (isValidPath($storePath) && queryPathHash16($storePath) eq $nar->{narHash}) {
|
if (isValidPath($storePath) && queryPathHash16($storePath) eq $nar->{narHash}) {
|
||||||
|
@ -172,6 +177,20 @@ foreach my $storePath (permute(keys %narFiles)) {
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
# Spawn a bunch of children to mirror paths in parallel.
|
||||||
|
my $i = 0;
|
||||||
|
my @filesPerProcess = part { $i++ % $nrProcesses } permute(keys %narFiles);
|
||||||
|
for (my $n = 0; $n < $nrProcesses; $n++) {
|
||||||
|
my $pid = fork { sub => sub { mirrorStorePath($_) foreach @{$filesPerProcess[$n]}; } };
|
||||||
|
}
|
||||||
|
|
||||||
|
for (my $n = 0; $n < $nrProcesses; $n++) {
|
||||||
|
my $pid = wait;
|
||||||
|
die if $pid == -1;
|
||||||
|
die "worker process failed: $?" if $? != 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
# Read all the old patches and propagate the useful ones. We use the
|
# Read all the old patches and propagate the useful ones. We use the
|
||||||
# file "all-patches" to keep track of all patches that have been
|
# file "all-patches" to keep track of all patches that have been
|
||||||
# generated in the past, so that patches are not lost if (for
|
# generated in the past, so that patches are not lost if (for
|
||||||
|
|
Loading…
Reference in a new issue