hydra/tests/Setup.pm

89 lines
2.8 KiB
Perl
Raw Normal View History

2011-03-16 13:18:12 +00:00
package Setup;
use strict;
use Exporter;
2011-03-17 13:25:27 +00:00
use Hydra::Helper::Nix;
2012-03-13 12:18:40 +00:00
use Hydra::Model::DB;
2011-03-17 13:25:27 +00:00
use Hydra::Helper::AddBuilds;
use Cwd;
2011-03-16 13:18:12 +00:00
our @ISA = qw(Exporter);
2012-04-15 01:17:35 +00:00
our @EXPORT = qw(hydra_setup nrBuildsForJobset queuedBuildsForJobset nrQueuedBuildsForJobset createBaseJobset createJobsetWithOneInput evalSucceeds runBuild updateRepository);
2011-03-16 13:18:12 +00:00
sub hydra_setup {
2013-01-23 12:49:26 +00:00
my ($db) = @_;
$db->resultset('Users')->create({ username => "root", emailaddress => 'root@invalid.org', password => '' });
2011-03-16 13:18:12 +00:00
}
2011-03-17 11:55:39 +00:00
sub nrBuildsForJobset {
2013-01-23 12:49:26 +00:00
my ($jobset) = @_;
return $jobset->builds->search({},{})->count ;
2011-03-17 11:55:39 +00:00
}
2011-03-17 13:25:27 +00:00
sub queuedBuildsForJobset {
2013-01-23 12:49:26 +00:00
my ($jobset) = @_;
return $jobset->builds->search({finished => 0});
2011-03-17 13:25:27 +00:00
}
2011-03-17 11:55:39 +00:00
sub nrQueuedBuildsForJobset {
2013-01-23 12:49:26 +00:00
my ($jobset) = @_;
return queuedBuildsForJobset($jobset)->count ;
2011-03-17 11:55:39 +00:00
}
sub createBaseJobset {
2013-01-23 12:49:26 +00:00
my ($jobsetName, $nixexprpath) = @_;
2013-01-22 13:41:02 +00:00
2013-01-23 12:49:26 +00:00
my $db = Hydra::Model::DB->new;
my $project = $db->resultset('Projects')->update_or_create({name => "tests", displayname => "", owner => "root"});
my $jobset = $project->jobsets->create({name => $jobsetName, nixexprinput => "jobs", nixexprpath => $nixexprpath, emailoverride => ""});
2011-03-17 11:55:39 +00:00
2013-01-23 12:49:26 +00:00
my $jobsetinput;
my $jobsetinputals;
2011-03-17 11:55:39 +00:00
2013-01-23 12:49:26 +00:00
$jobsetinput = $jobset->jobsetinputs->create({name => "jobs", type => "path"});
$jobsetinputals = $jobsetinput->jobsetinputalts->create({altnr => 0, value => getcwd."/jobs"});
2011-03-17 11:55:39 +00:00
2013-01-23 12:49:26 +00:00
return $jobset;
2011-03-17 11:55:39 +00:00
}
sub createJobsetWithOneInput {
2013-01-23 12:49:26 +00:00
my ($jobsetName, $nixexprpath, $name, $type, $uri) = @_;
my $jobset = createBaseJobset($jobsetName, $nixexprpath);
2011-03-17 11:55:39 +00:00
2013-01-23 12:49:26 +00:00
my $jobsetinput;
my $jobsetinputals;
2011-03-17 11:55:39 +00:00
2013-01-23 12:49:26 +00:00
$jobsetinput = $jobset->jobsetinputs->create({name => $name, type => $type});
$jobsetinputals = $jobsetinput->jobsetinputalts->create({altnr => 0, value => $uri});
2011-03-17 11:55:39 +00:00
2013-01-23 12:49:26 +00:00
return $jobset;
2011-03-17 11:55:39 +00:00
}
sub evalSucceeds {
2013-01-23 12:49:26 +00:00
my ($jobset) = @_;
Add a plugin for backing up builds in s3 In your hydra config, you can add an arbitrary number of <s3config> sections, with the following options: * name (required): Bucket name * jobs (required): A regex to match job names (in project:jobset:job format) that should be backed up to this bucket * compression_type: bzip2 (default), xz, or none * prefix: String to prepend to all hydra-created s3 keys (if this is meant to represent a directory, you should include the trailing slash, e.g. "cache/"). Default "". After each build with an output (i.e. successful or failed-with-output builds), the output path and its closure are uploaded to the bucket as .nar files, with corresponding .narinfos to enable use as a binary cache. This plugin requires that s3 credentials be available. It uses Net::Amazon::S3, which as of this commit the nixpkgs version can retrieve s3 credentials from the AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY environment variables, or from ec2 instance metadata when using an IAM role. This commit also adds a hydra-s3-backup-collect-garbage program, which uses hydra's gc roots directory to determine which paths are live, and then deletes all files except nix-cache-info and any .nar or .narinfo files corresponding to live paths. hydra-s3-backup-collect-garbage respects the prefix configuration option, so it won't delete anything outside of the hierarchy you give it, and it has the same credential requirements as the plugin. Probably a timer unit running the garbage collection periodically should be added to hydra-module.nix Note that two of the added tests fail, due to a bug in the interaction between Net::Amazon::S3 and fake-s3. Those behaviors work against real s3 though, so I'm committing this even with the broken tests. Signed-off-by: Shea Levy <shea@shealevy.com>
2013-09-03 14:53:56 +00:00
my ($res, $stdout, $stderr) = captureStdoutStderr(60, ("hydra-evaluator", $jobset->project->name, $jobset->name));
2013-01-23 12:49:26 +00:00
chomp $stdout; chomp $stderr;
print STDERR "Evaluation errors for jobset ".$jobset->project->name.":".$jobset->name.": \n".$jobset->errormsg."\n" if $jobset->errormsg;
print STDERR "STDOUT: $stdout\n" if $stdout ne "";
print STDERR "STDERR: $stderr\n" if $stderr ne "";
return !$res;
2011-03-17 11:55:39 +00:00
}
2011-03-17 13:25:27 +00:00
sub runBuild {
2013-01-23 12:49:26 +00:00
my ($build) = @_;
Add a plugin for backing up builds in s3 In your hydra config, you can add an arbitrary number of <s3config> sections, with the following options: * name (required): Bucket name * jobs (required): A regex to match job names (in project:jobset:job format) that should be backed up to this bucket * compression_type: bzip2 (default), xz, or none * prefix: String to prepend to all hydra-created s3 keys (if this is meant to represent a directory, you should include the trailing slash, e.g. "cache/"). Default "". After each build with an output (i.e. successful or failed-with-output builds), the output path and its closure are uploaded to the bucket as .nar files, with corresponding .narinfos to enable use as a binary cache. This plugin requires that s3 credentials be available. It uses Net::Amazon::S3, which as of this commit the nixpkgs version can retrieve s3 credentials from the AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY environment variables, or from ec2 instance metadata when using an IAM role. This commit also adds a hydra-s3-backup-collect-garbage program, which uses hydra's gc roots directory to determine which paths are live, and then deletes all files except nix-cache-info and any .nar or .narinfo files corresponding to live paths. hydra-s3-backup-collect-garbage respects the prefix configuration option, so it won't delete anything outside of the hierarchy you give it, and it has the same credential requirements as the plugin. Probably a timer unit running the garbage collection periodically should be added to hydra-module.nix Note that two of the added tests fail, due to a bug in the interaction between Net::Amazon::S3 and fake-s3. Those behaviors work against real s3 though, so I'm committing this even with the broken tests. Signed-off-by: Shea Levy <shea@shealevy.com>
2013-09-03 14:53:56 +00:00
my ($res, $stdout, $stderr) = captureStdoutStderr(60, ("hydra-build", $build->id));
print "STDERR: $stderr" if $stderr ne "";
return !$res;
2011-03-17 13:25:27 +00:00
}
2012-04-15 01:17:35 +00:00
sub updateRepository {
2013-01-23 12:49:26 +00:00
my ($scm, $update) = @_;
my ($res, $stdout, $stderr) = captureStdoutStderr(60, ($update, $scm));
die "unexpected update error with $scm: $stderr\n" if $res;
2013-01-23 12:49:26 +00:00
my ($message, $loop, $status) = $stdout =~ m/::(.*) -- (.*) -- (.*)::/;
print STDOUT "Update $scm repository: $message\n";
return ($loop eq "continue", $status eq "updated");
2012-04-15 01:17:35 +00:00
}
2011-03-16 13:18:12 +00:00
1;