Merge pull request #1152 from DeterminateSystems/parallel-tests
Parallel tests, fix a hydra-queue-runner race condition
This commit is contained in:
commit
d0bc0d0eda
12
flake.nix
12
flake.nix
|
@ -41,10 +41,12 @@
|
||||||
perlPackages = prev.perlPackages // {
|
perlPackages = prev.perlPackages // {
|
||||||
TestPostgreSQL = final.perlPackages.buildPerlModule {
|
TestPostgreSQL = final.perlPackages.buildPerlModule {
|
||||||
pname = "Test-PostgreSQL";
|
pname = "Test-PostgreSQL";
|
||||||
version = "1.27";
|
version = "1.28-1";
|
||||||
src = final.fetchurl {
|
src = final.fetchFromGitHub {
|
||||||
url = "mirror://cpan/authors/id/T/TJ/TJC/Test-PostgreSQL-1.27.tar.gz";
|
owner = "grahamc";
|
||||||
sha256 = "b1bd231693100cc40905fb0ba3173173201621de9c8301f21c5b593b0a46f907";
|
repo = "Test-postgresql";
|
||||||
|
rev = "release-1.28-1";
|
||||||
|
hash = "sha256-SFC1C3q3dbcBos18CYd/s0TIcfJW4g04ld0+XQXVToQ=";
|
||||||
};
|
};
|
||||||
buildInputs = with final.perlPackages; [ ModuleBuildTiny TestSharedFork pkgs.postgresql ];
|
buildInputs = with final.perlPackages; [ ModuleBuildTiny TestSharedFork pkgs.postgresql ];
|
||||||
propagatedBuildInputs = with final.perlPackages; [ DBDPg DBI FileWhich FunctionParameters Moo TieHashMethod TryTiny TypeTiny ];
|
propagatedBuildInputs = with final.perlPackages; [ DBDPg DBI FileWhich FunctionParameters Moo TieHashMethod TryTiny TypeTiny ];
|
||||||
|
@ -52,7 +54,7 @@
|
||||||
makeMakerFlags = "POSTGRES_HOME=${final.postgresql}";
|
makeMakerFlags = "POSTGRES_HOME=${final.postgresql}";
|
||||||
|
|
||||||
meta = {
|
meta = {
|
||||||
homepage = https://github.com/TJC/Test-postgresql;
|
homepage = "https://github.com/grahamc/Test-postgresql/releases/tag/release-1.28-1";
|
||||||
description = "PostgreSQL runner for tests";
|
description = "PostgreSQL runner for tests";
|
||||||
license = with final.lib.licenses; [ artistic2 ];
|
license = with final.lib.licenses; [ artistic2 ];
|
||||||
};
|
};
|
||||||
|
|
|
@ -31,8 +31,10 @@ void State::makeRunnable(Step::ptr step)
|
||||||
|
|
||||||
void State::dispatcher()
|
void State::dispatcher()
|
||||||
{
|
{
|
||||||
while (true) {
|
printMsg(lvlDebug, "Waiting for the machines parsing to have completed at least once");
|
||||||
|
machinesReadyLock.lock();
|
||||||
|
|
||||||
|
while (true) {
|
||||||
try {
|
try {
|
||||||
printMsg(lvlDebug, "dispatcher woken up");
|
printMsg(lvlDebug, "dispatcher woken up");
|
||||||
nrDispatcherWakeups++;
|
nrDispatcherWakeups++;
|
||||||
|
|
|
@ -158,6 +158,7 @@ void State::monitorMachinesFile()
|
||||||
(settings.thisSystem == "x86_64-linux" ? "x86_64-linux,i686-linux" : settings.thisSystem.get())
|
(settings.thisSystem == "x86_64-linux" ? "x86_64-linux,i686-linux" : settings.thisSystem.get())
|
||||||
+ " - " + std::to_string(settings.maxBuildJobs) + " 1 "
|
+ " - " + std::to_string(settings.maxBuildJobs) + " 1 "
|
||||||
+ concatStringsSep(",", settings.systemFeatures.get()));
|
+ concatStringsSep(",", settings.systemFeatures.get()));
|
||||||
|
machinesReadyLock.unlock();
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -203,9 +204,15 @@ void State::monitorMachinesFile()
|
||||||
parseMachines(contents);
|
parseMachines(contents);
|
||||||
};
|
};
|
||||||
|
|
||||||
|
auto firstParse = true;
|
||||||
|
|
||||||
while (true) {
|
while (true) {
|
||||||
try {
|
try {
|
||||||
readMachinesFiles();
|
readMachinesFiles();
|
||||||
|
if (firstParse) {
|
||||||
|
machinesReadyLock.unlock();
|
||||||
|
firstParse = false;
|
||||||
|
}
|
||||||
// FIXME: use inotify.
|
// FIXME: use inotify.
|
||||||
sleep(30);
|
sleep(30);
|
||||||
} catch (std::exception & e) {
|
} catch (std::exception & e) {
|
||||||
|
@ -321,7 +328,7 @@ int State::createSubstitutionStep(pqxx::work & txn, time_t startTime, time_t sto
|
||||||
|
|
||||||
txn.exec_params0
|
txn.exec_params0
|
||||||
("insert into BuildStepOutputs (build, stepnr, name, path) values ($1, $2, $3, $4)",
|
("insert into BuildStepOutputs (build, stepnr, name, path) values ($1, $2, $3, $4)",
|
||||||
build->id, stepNr, outputName,
|
build->id, stepNr, outputName,
|
||||||
localStore->printStorePath(storePath));
|
localStore->printStorePath(storePath));
|
||||||
|
|
||||||
return stepNr;
|
return stepNr;
|
||||||
|
@ -770,6 +777,7 @@ void State::run(BuildID buildOne)
|
||||||
dumpStatus(*conn);
|
dumpStatus(*conn);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
machinesReadyLock.lock();
|
||||||
std::thread(&State::monitorMachinesFile, this).detach();
|
std::thread(&State::monitorMachinesFile, this).detach();
|
||||||
|
|
||||||
std::thread(&State::queueMonitor, this).detach();
|
std::thread(&State::queueMonitor, this).detach();
|
||||||
|
|
|
@ -342,6 +342,7 @@ private:
|
||||||
nix::Pool<Connection> dbPool;
|
nix::Pool<Connection> dbPool;
|
||||||
|
|
||||||
/* The build machines. */
|
/* The build machines. */
|
||||||
|
std::mutex machinesReadyLock;
|
||||||
typedef std::map<std::string, Machine::ptr> Machines;
|
typedef std::map<std::string, Machine::ptr> Machines;
|
||||||
nix::Sync<Machines> machines; // FIXME: use atomic_shared_ptr
|
nix::Sync<Machines> machines; // FIXME: use atomic_shared_ptr
|
||||||
|
|
||||||
|
|
|
@ -60,6 +60,16 @@ if (!defined($pid = fork())) {
|
||||||
kill('INT', $pid);
|
kill('INT', $pid);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
# We expect $ctx{jobsdir}/server.py to create the file at $filename, but the time it
|
||||||
|
# takes to do so is non-deterministic. We need to give it _some_ time to hopefully
|
||||||
|
# settle -- but not too much that it drastically slows things down.
|
||||||
|
for my $i (1..10) {
|
||||||
|
if (! -f $filename) {
|
||||||
|
diag("$filename does not yet exist");
|
||||||
|
sleep(1);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
open(my $fh, "<", $filename) or die ("Can't open(): $!\n");
|
open(my $fh, "<", $filename) or die ("Can't open(): $!\n");
|
||||||
my $i = 0;
|
my $i = 0;
|
||||||
my $uri = <$fh>;
|
my $uri = <$fh>;
|
||||||
|
|
|
@ -39,7 +39,7 @@ subtest "Building, caching, and then garbage collecting the underlying job" => s
|
||||||
|
|
||||||
ok(unlink(Hydra::Helper::Nix::gcRootFor($path)), "Unlinking the GC root for underlying Dependency succeeds");
|
ok(unlink(Hydra::Helper::Nix::gcRootFor($path)), "Unlinking the GC root for underlying Dependency succeeds");
|
||||||
|
|
||||||
(my $ret, my $stdout, my $stderr) = captureStdoutStderr(1, "nix-store", "--delete", $path);
|
(my $ret, my $stdout, my $stderr) = captureStdoutStderr(5, "nix-store", "--delete", $path);
|
||||||
is($ret, 0, "Deleting the underlying dependency should succeed");
|
is($ret, 0, "Deleting the underlying dependency should succeed");
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -11,6 +11,15 @@ BEGIN {
|
||||||
$App::Yath::Script::SCRIPT = which 'yath';
|
$App::Yath::Script::SCRIPT = which 'yath';
|
||||||
}
|
}
|
||||||
use App::Yath::Util qw/find_yath/;
|
use App::Yath::Util qw/find_yath/;
|
||||||
|
use List::SomeUtils qw(none);
|
||||||
|
|
||||||
|
if (defined($ENV{"NIX_BUILD_CORES"})
|
||||||
|
and not defined($ENV{"YATH_JOB_COUNT"})
|
||||||
|
and not defined($ENV{"T2_HARNESS_JOB_COUNT"})
|
||||||
|
and not defined($ENV{"HARNESS_JOB_COUNT"})) {
|
||||||
|
$ENV{"YATH_JOB_COUNT"} = $ENV{"NIX_BUILD_CORES"};
|
||||||
|
print STDERR "test.pl: Defaulting \$YATH_JOB_COUNT to \$NIX_BUILD_CORES (${\$ENV{'NIX_BUILD_CORES'}})\n";
|
||||||
|
}
|
||||||
|
|
||||||
system($^X, find_yath(), '-D', 'test', '--default-search' => './', @ARGV);
|
system($^X, find_yath(), '-D', 'test', '--default-search' => './', @ARGV);
|
||||||
my $exit = $?;
|
my $exit = $?;
|
||||||
|
|
Loading…
Reference in a new issue