Store aggregate members in the database

For presentation purposes, we need to know what builds are part of an
aggregate build.  So at evaluation time, look at the "members"
attribute, find the corresponding builds in the eval, and create a
mapping in the AggregateMembers table.
This commit is contained in:
Eelco Dolstra 2013-08-14 01:59:29 +02:00
parent c27f4bbaf5
commit d58142b3f0
8 changed files with 226 additions and 25 deletions

View file

@ -160,6 +160,23 @@ static void findJobsWrapped(EvalState & state, XMLWriter & doc,
} }
xmlAttrs["maintainers"] = maintainers; xmlAttrs["maintainers"] = maintainers;
/* If this is an aggregate, then get its members. */
Bindings::iterator a = v.attrs->find(state.symbols.create("_hydraAggregate"));
if (a != v.attrs->end() && state.forceBool(*a->value)) {
Bindings::iterator a = v.attrs->find(state.symbols.create("members"));
if (a == v.attrs->end())
throw EvalError("derivation must have a members attribute");
PathSet context;
state.coerceToString(*a->value, context, true, false);
PathSet drvs;
foreach (PathSet::iterator, i, context)
if (i->at(0) == '!') {
size_t index = i->find("!", 1);
drvs.insert(string(*i, index + 1));
}
xmlAttrs["members"] = concatStringsSep(" ", drvs);
}
/* Register the derivation as a GC root. !!! This /* Register the derivation as a GC root. !!! This
registers roots for jobs that we may have already registers roots for jobs that we may have already
done. */ done. */

View file

@ -578,7 +578,7 @@ sub clone_submit : Chained('buildChain') PathPart('clone/submit') Args(0) {
my %currentBuilds; my %currentBuilds;
my $newBuild = checkBuild( my $newBuild = checkBuild(
$c->model('DB'), $build->project, $build->jobset, $c->model('DB'), $build->jobset,
$inputInfo, $nixExprInput, $job, \%currentBuilds, undef, {}, $c->hydra_plugins); $inputInfo, $nixExprInput, $job, \%currentBuilds, undef, {}, $c->hydra_plugins);
error($c, "This build has already been performed.") unless $newBuild; error($c, "This build has already been performed.") unless $newBuild;

View file

@ -288,13 +288,9 @@ sub evalJobs {
my $validJob = 1; my $validJob = 1;
foreach my $arg (@{$job->{arg}}) { foreach my $arg (@{$job->{arg}}) {
my $input = $inputInfo->{$arg->{name}}->[$arg->{altnr}]; my $input = $inputInfo->{$arg->{name}}->[$arg->{altnr}];
if ($input->{type} eq "sysbuild" && $input->{system} ne $job->{system}) { $validJob = 0 if $input->{type} eq "sysbuild" && $input->{system} ne $job->{system};
$validJob = 0;
}
}
if ($validJob) {
push(@filteredJobs, $job);
} }
push(@filteredJobs, $job) if $validJob;
} }
$jobs->{job} = \@filteredJobs; $jobs->{job} = \@filteredJobs;
@ -390,7 +386,7 @@ sub getPrevJobsetEval {
# Check whether to add the build described by $buildInfo. # Check whether to add the build described by $buildInfo.
sub checkBuild { sub checkBuild {
my ($db, $project, $jobset, $inputInfo, $nixExprInput, $buildInfo, $buildIds, $prevEval, $jobOutPathMap, $plugins) = @_; my ($db, $jobset, $inputInfo, $nixExprInput, $buildInfo, $buildMap, $prevEval, $jobOutPathMap, $plugins) = @_;
my @outputNames = sort keys %{$buildInfo->{output}}; my @outputNames = sort keys %{$buildInfo->{output}};
die unless scalar @outputNames; die unless scalar @outputNames;
@ -411,9 +407,7 @@ sub checkBuild {
my $build; my $build;
txn_do($db, sub { txn_do($db, sub {
my $job = $jobset->jobs->update_or_create( my $job = $jobset->jobs->update_or_create({ name => $jobName });
{ name => $jobName
});
# Don't add a build that has already been scheduled for this # Don't add a build that has already been scheduled for this
# job, or has been built but is still a "current" build for # job, or has been built but is still a "current" build for
@ -434,19 +428,19 @@ sub checkBuild {
# semantically unnecessary (because they're implied by # semantically unnecessary (because they're implied by
# the eval), but they give a factor 1000 speedup on # the eval), but they give a factor 1000 speedup on
# the Nixpkgs jobset with PostgreSQL. # the Nixpkgs jobset with PostgreSQL.
{ project => $project->name, jobset => $jobset->name, job => $job->name, { project => $jobset->project->name, jobset => $jobset->name, job => $jobName,
name => $firstOutputName, path => $firstOutputPath }, name => $firstOutputName, path => $firstOutputPath },
{ rows => 1, columns => ['id'], join => ['buildoutputs'] }); { rows => 1, columns => ['id'], join => ['buildoutputs'] });
if (defined $prevBuild) { if (defined $prevBuild) {
print STDERR " already scheduled/built as build ", $prevBuild->id, "\n"; print STDERR " already scheduled/built as build ", $prevBuild->id, "\n";
$buildIds->{$prevBuild->id} = 0; $buildMap->{$prevBuild->id} = { new => 0, drvPath => $drvPath };
return; return;
} }
} }
# Prevent multiple builds with the same (job, outPath) from # Prevent multiple builds with the same (job, outPath) from
# being added. # being added.
my $prev = $$jobOutPathMap{$job->name . "\t" . $firstOutputPath}; my $prev = $$jobOutPathMap{$jobName . "\t" . $firstOutputPath};
if (defined $prev) { if (defined $prev) {
print STDERR " already scheduled as build ", $prev, "\n"; print STDERR " already scheduled as build ", $prev, "\n";
return; return;
@ -512,8 +506,8 @@ sub checkBuild {
$build->buildoutputs->create({ name => $_, path => $buildInfo->{output}->{$_}->{path} }) $build->buildoutputs->create({ name => $_, path => $buildInfo->{output}->{$_}->{path} })
foreach @outputNames; foreach @outputNames;
$buildIds->{$build->id} = 1; $buildMap->{$build->id} = { new => 1, drvPath => $drvPath };
$$jobOutPathMap{$job->name . "\t" . $firstOutputPath} = $build->id; $$jobOutPathMap{$jobName . "\t" . $firstOutputPath} = $build->id;
if ($build->iscachedbuild) { if ($build->iscachedbuild) {
print STDERR " marked as cached build ", $build->id, "\n"; print STDERR " marked as cached build ", $build->id, "\n";

View file

@ -0,0 +1,111 @@
use utf8;
package Hydra::Schema::AggregateMembers;
# Created by DBIx::Class::Schema::Loader
# DO NOT MODIFY THE FIRST PART OF THIS FILE
=head1 NAME
Hydra::Schema::AggregateMembers
=cut
use strict;
use warnings;
use base 'DBIx::Class::Core';
=head1 COMPONENTS LOADED
=over 4
=item * L<Hydra::Component::ToJSON>
=back
=cut
__PACKAGE__->load_components("+Hydra::Component::ToJSON");
=head1 TABLE: C<AggregateMembers>
=cut
__PACKAGE__->table("AggregateMembers");
=head1 ACCESSORS
=head2 aggregate
data_type: 'integer'
is_foreign_key: 1
is_nullable: 0
=head2 member
data_type: 'integer'
is_foreign_key: 1
is_nullable: 0
=cut
__PACKAGE__->add_columns(
"aggregate",
{ data_type => "integer", is_foreign_key => 1, is_nullable => 0 },
"member",
{ data_type => "integer", is_foreign_key => 1, is_nullable => 0 },
);
=head1 PRIMARY KEY
=over 4
=item * L</aggregate>
=item * L</member>
=back
=cut
__PACKAGE__->set_primary_key("aggregate", "member");
=head1 RELATIONS
=head2 aggregate
Type: belongs_to
Related object: L<Hydra::Schema::Builds>
=cut
__PACKAGE__->belongs_to(
"aggregate",
"Hydra::Schema::Builds",
{ id => "aggregate" },
{ is_deferrable => 0, on_delete => "CASCADE", on_update => "NO ACTION" },
);
=head2 member
Type: belongs_to
Related object: L<Hydra::Schema::Builds>
=cut
__PACKAGE__->belongs_to(
"member",
"Hydra::Schema::Builds",
{ id => "member" },
{ is_deferrable => 0, on_delete => "CASCADE", on_update => "NO ACTION" },
);
# Created by DBIx::Class::Schema::Loader v0.07033 @ 2013-08-13 22:17:52
# DO NOT MODIFY THIS OR ANYTHING ABOVE! md5sum:jHJtO2baXiprv0OcWCLZ+w
# You can replace this text with custom code or comments, and it will be preserved on regeneration
1;

View file

@ -288,6 +288,36 @@ __PACKAGE__->set_primary_key("id");
=head1 RELATIONS =head1 RELATIONS
=head2 aggregatemembers_aggregates
Type: has_many
Related object: L<Hydra::Schema::AggregateMembers>
=cut
__PACKAGE__->has_many(
"aggregatemembers_aggregates",
"Hydra::Schema::AggregateMembers",
{ "foreign.aggregate" => "self.id" },
undef,
);
=head2 aggregatemembers_members
Type: has_many
Related object: L<Hydra::Schema::AggregateMembers>
=cut
__PACKAGE__->has_many(
"aggregatemembers_members",
"Hydra::Schema::AggregateMembers",
{ "foreign.member" => "self.id" },
undef,
);
=head2 buildinputs_builds =head2 buildinputs_builds
Type: has_many Type: has_many
@ -468,9 +498,29 @@ __PACKAGE__->has_many(
undef, undef,
); );
=head2 aggregates
# Created by DBIx::Class::Schema::Loader v0.07033 @ 2013-06-13 01:54:50 Type: many_to_many
# DO NOT MODIFY THIS OR ANYTHING ABOVE! md5sum:isCEXACY/PwkvgKHcXvAIg
Composing rels: L</aggregatemembers_members> -> aggregate
=cut
__PACKAGE__->many_to_many("aggregates", "aggregatemembers_members", "aggregate");
=head2 members
Type: many_to_many
Composing rels: L</aggregatemembers_members> -> member
=cut
__PACKAGE__->many_to_many("members", "aggregatemembers_members", "member");
# Created by DBIx::Class::Schema::Loader v0.07033 @ 2013-08-13 22:17:52
# DO NOT MODIFY THIS OR ANYTHING ABOVE! md5sum:9jqsol/evbHYjusT09hLtw
__PACKAGE__->has_many( __PACKAGE__->has_many(
"dependents", "dependents",

View file

@ -144,11 +144,11 @@ sub checkJobsetWrapped {
$jobset->builds->search({iscurrent => 1})->update({iscurrent => 0}); $jobset->builds->search({iscurrent => 1})->update({iscurrent => 0});
# Schedule each successfully evaluated job. # Schedule each successfully evaluated job.
my %buildIds; my %buildMap;
foreach my $job (permute @{$jobs->{job}}) { foreach my $job (permute @{$jobs->{job}}) {
next if $job->{jobName} eq ""; next if $job->{jobName} eq "";
print STDERR " considering job " . $project->name, ":", $jobset->name, ":", $job->{jobName} . "\n"; print STDERR " considering job " . $project->name, ":", $jobset->name, ":", $job->{jobName} . "\n";
checkBuild($db, $project, $jobset, $inputInfo, $nixExprInput, $job, \%buildIds, $prevEval, $jobOutPathMap, $plugins); checkBuild($db, $jobset, $inputInfo, $nixExprInput, $job, \%buildMap, $prevEval, $jobOutPathMap, $plugins);
} }
# Update the last checked times and error messages for each # Update the last checked times and error messages for each
@ -162,8 +162,8 @@ sub checkJobsetWrapped {
foreach $jobset->jobs->all; foreach $jobset->jobs->all;
my $hasNewBuilds = 0; my $hasNewBuilds = 0;
while (my ($id, $new) = each %buildIds) { while (my ($id, $x) = each %buildMap) {
$hasNewBuilds = 1 if $new; $hasNewBuilds = 1 if $x->{new};
} }
my $ev = $jobset->jobsetevals->create( my $ev = $jobset->jobsetevals->create(
@ -172,12 +172,29 @@ sub checkJobsetWrapped {
, checkouttime => abs($checkoutStop - $checkoutStart) , checkouttime => abs($checkoutStop - $checkoutStart)
, evaltime => abs($evalStop - $evalStart) , evaltime => abs($evalStop - $evalStart)
, hasnewbuilds => $hasNewBuilds , hasnewbuilds => $hasNewBuilds
, nrbuilds => $hasNewBuilds ? scalar(keys %buildIds) : undef , nrbuilds => $hasNewBuilds ? scalar(keys %buildMap) : undef
}); });
if ($hasNewBuilds) { if ($hasNewBuilds) {
while (my ($id, $new) = each %buildIds) { # Create JobsetEvalMembers mappings.
$ev->jobsetevalmembers->create({ build => $id, isnew => $new }); my %drvPathToId;
while (my ($id, $x) = each %buildMap) {
$ev->jobsetevalmembers->create({ build => $id, isnew => $x->{new} });
$drvPathToId{$x->{drvPath}} = $id;
}
# Create AggregateMembers mappings.
foreach my $job (@{$jobs->{job}}) {
next unless $job->{members};
my $id = $drvPathToId{$job->{drvPath}} or die;
foreach my $drvPath (split / /, $job->{members}) {
my $member = $drvPathToId{$drvPath};
if (defined $member) {
$db->resultset('AggregateMembers')->update_or_create({aggregate => $id, member => $member});
} else {
warn "aggregate job $job->{jobName} has a member $drvPath that doesn't correspond to a Hydra build\n";
}
}
} }
foreach my $name (keys %{$inputInfo}) { foreach my $name (keys %{$inputInfo}) {

View file

@ -514,6 +514,13 @@ create table NewsItems (
); );
create table AggregateMembers (
aggregate integer not null references Builds(id) on delete cascade,
member integer not null references Builds(id) on delete cascade,
primary key (aggregate, member)
);
-- Cache of the number of finished builds. -- Cache of the number of finished builds.
create table NrBuilds ( create table NrBuilds (
what text primary key not null, what text primary key not null,

5
src/sql/upgrade-19.sql Normal file
View file

@ -0,0 +1,5 @@
create table AggregateMembers (
aggregate integer not null references Builds(id) on delete cascade,
member integer not null references Builds(id) on delete cascade,
primary key (aggregate, member)
);