- [% IF project.jobsets.size > 0 %]
+ [% IF project.jobsets %]
This project has the following jobsets:
@@ -30,18 +47,18 @@
[% IF j.get_column('nrscheduled') > 0 %]
-
+
[% ELSIF j.get_column('nrfailed') == 0 %]
-
+
[% ELSIF j.get_column('nrfailed') > 0 && j.get_column('nrsucceeded') > 0 %]
-
+
[% ELSE %]
-
+
[% END %]
[% INCLUDE renderJobsetName project=project.name jobset=j.name inRow=1 %]
[% HTML.escape(j.description) %]
- [% INCLUDE renderDateTime timestamp = j.lastcheckedtime %]
+ [% IF j.lastcheckedtime; INCLUDE renderDateTime timestamp = j.lastcheckedtime; ELSE; "-"; END %]
[% IF j.get_column('nrtotal') > 0 %]
[% successrate = ( j.get_column('nrsucceeded') / j.get_column('nrtotal') )*100 %]
[% IF j.get_column('nrscheduled') > 0 %]
@@ -78,7 +95,6 @@
-
Edit
Display name:
@@ -138,12 +154,6 @@
[% END %]
- [% IF c.user_exists %]
-
- Create a release
-
- [% END %]
-
@@ -176,4 +186,18 @@
+
+
[% END %]
diff --git a/src/root/reproduce.tt b/src/root/reproduce.tt
index d4624b39..6846ee9f 100644
--- a/src/root/reproduce.tt
+++ b/src/root/reproduce.tt
@@ -141,11 +141,14 @@ fi
args+=(--arg '[% input.name %]' "{ outPath = $inputDir; rev = \"[% input.revision %]\"; }")
[% ELSIF input.type == "string" %]
-args+=(--arg '[% input.name %]' '"[% input.value %]"')
+args+=(--arg '[% input.name %]' '"[% input.value %]"') # FIXME: escape
[% ELSIF input.type == "boolean" %]
args+=(--arg '[% input.name %]' '[% input.value %]')
+[% ELSIF input.type == "nix" %]
+args+=(--arg '[% input.name %]' '[% input.value %]') # FIXME: escape
+
[% ELSE %]
echo "$0: input ‘[% input.name %]’ has unsupported type ‘[% input.type %]’" >&2
exit 1
diff --git a/src/root/static/css/hydra.css b/src/root/static/css/hydra.css
index f3a24f18..4fac2c3c 100644
--- a/src/root/static/css/hydra.css
+++ b/src/root/static/css/hydra.css
@@ -91,3 +91,20 @@ div.news-item:not(:first-child) {
td.nowrap {
white-space: nowrap;
}
+
+.override-link a {
+ color: inherit;
+}
+
+.actions {
+ font-weight: bold;
+}
+
+.star {
+ color: black;
+ font-size: 110%;
+}
+
+.star:hover {
+ cursor: pointer;
+}
\ No newline at end of file
diff --git a/src/root/static/css/rotated-th.css b/src/root/static/css/rotated-th.css
new file mode 100644
index 00000000..8f4784b1
--- /dev/null
+++ b/src/root/static/css/rotated-th.css
@@ -0,0 +1,52 @@
+/* Rotated table headers, borrowed from http://jimmybonney.com/articles/column_header_rotation_css */
+
+.tab-content {
+ margin-right: 5em;
+ overflow: visible;
+}
+
+td.centered {
+ text-align: center;
+}
+
+.table-header-rotated th.rotate-45{
+ height: 80px;
+ width: 40px;
+ min-width: 40px;
+ max-width: 40px;
+ position: relative;
+ vertical-align: bottom;
+ padding: 0;
+ font-size: 100%;
+ line-height: 0.9;
+}
+
+.table-header-rotated th.rotate-45 > div {
+ position: relative;
+ top: 0px;
+ left: 40px; /* 80 * tan(45) / 2 = 40 where 80 is the height on the cell and 45 is the transform angle*/
+ height: 100%;
+ -ms-transform:skew(-45deg,0deg);
+ -moz-transform:skew(-45deg,0deg);
+ -webkit-transform:skew(-45deg,0deg);
+ -o-transform:skew(-45deg,0deg);
+ transform:skew(-45deg,0deg);
+ overflow: hidden;
+ border-left: 1px solid #dddddd;
+}
+
+.table-header-rotated th.rotate-45 span {
+ -ms-transform:skew(45deg,0deg) rotate(315deg);
+ -moz-transform:skew(45deg,0deg) rotate(315deg);
+ -webkit-transform:skew(45deg,0deg) rotate(315deg);
+ -o-transform:skew(45deg,0deg) rotate(315deg);
+ transform:skew(45deg,0deg) rotate(315deg);
+ position: absolute;
+ bottom: 30px; /* 40 cos(45) = 28 with an additional 2px margin*/
+ left: -25px; /*Because it looked good, but there is probably a mathematical link here as well*/
+ display: inline-block;
+ // width: 100%;
+ width: 85px; /* 80 / cos(45) - 40 cos (45) = 85 where 80 is the height of the cell, 40 the width of the cell and 45 the transform angle*/
+ text-align: left;
+ // white-space: nowrap; /*whether to display in one line or not*/
+}
diff --git a/src/root/static/images/forbidden_16.png b/src/root/static/images/forbidden_16.png
new file mode 100644
index 00000000..dde3195a
Binary files /dev/null and b/src/root/static/images/forbidden_16.png differ
diff --git a/src/root/static/images/warning_16.png b/src/root/static/images/warning_16.png
new file mode 100644
index 00000000..7a56d4eb
Binary files /dev/null and b/src/root/static/images/warning_16.png differ
diff --git a/src/root/static/js/common.js b/src/root/static/js/common.js
index 44f52462..891fd4e6 100644
--- a/src/root/static/js/common.js
+++ b/src/root/static/js/common.js
@@ -50,18 +50,98 @@ $(document).ready(function() {
$(".hydra-popover").popover({});
- /* Ensure that pressing the back button on another page
- navigates back to the previously selected tab on this
- page. */
+ $(function() {
+ if (window.location.hash) {
+ $(".nav-tabs a[href='" + window.location.hash + "']").tab('show');
+ }
+
+ /* If no tab is active, show the first one. */
+ $(".nav-tabs").each(function() {
+ if ($("li.active", this).length > 0) return;
+ $("a", $(this).children("li:not(.dropdown)").first()).tab('show');
+ });
+
+ /* Ensure that pressing the back button on another page
+ navigates back to the previously selected tab on this
+ page. */
+ $('.nav-tabs').bind('show', function(e) {
+ var pattern = /#.+/gi;
+ var id = e.target.toString().match(pattern)[0];
+ history.replaceState(null, "", id);
+ });
+ });
+
+ /* Automatically set Bootstrap radio buttons from hidden form controls. */
+ $('div[data-toggle="buttons-radio"] input[type="hidden"]').map(function(){
+ $('button[value="' + $(this).val() + '"]', $(this).parent()).addClass('active');
+ });
+
+ /* Automatically update hidden form controls from Bootstrap radio buttons. */
+ $('div[data-toggle="buttons-radio"] .btn').click(function(){
+ $('input', $(this).parent()).val($(this).val());
+ });
+
+ $(".star").click(function(event) {
+ var star = $(this);
+ var active = star.text() != '★';
+ requestJSON({
+ url: star.attr("data-post"),
+ data: active ? "star=1" : "star=0",
+ type: 'POST',
+ success: function(res) {
+ if (active) {
+ star.text('★');
+ } else {
+ star.text('☆');
+ }
+ }
+ });
+ });
+});
+
+var tabsLoaded = {};
+
+function makeLazyTab(tabName, uri) {
$('.nav-tabs').bind('show', function(e) {
var pattern = /#.+/gi;
var id = e.target.toString().match(pattern)[0];
- history.replaceState(null, "", id);
- });
-
- $(function() {
- if (window.location.hash) {
- $(".nav a[href='" + window.location.hash + "']").tab('show');
+ if (id == '#' + tabName && !tabsLoaded[id]) {
+ tabsLoaded[id] = 1;
+ $('#' + tabName).load(uri, function(response, status, xhr) {
+ if (status == "error") {
+ $('#' + tabName).html("Error loading tab: " + xhr.status + " " + xhr.statusText + "
");
+ }
+ });
}
- })
-});
+ });
+};
+
+function escapeHTML(s) {
+ return $('
').text(s).html();
+};
+
+function requestJSON(args) {
+ args.dataType = 'json';
+ args.error = function(data) {
+ json = {};
+ try {
+ if (data.responseText)
+ json = $.parseJSON(data.responseText);
+ } catch (err) {
+ }
+ if (json.error)
+ bootbox.alert(escapeHTML(json.error));
+ else if (data.responseText)
+ bootbox.alert("Server error: " + escapeHTML(data.responseText));
+ else
+ bootbox.alert("Unknown server error!");
+ };
+ return $.ajax(args);
+};
+
+function redirectJSON(args) {
+ args.success = function(data) {
+ window.location = data.redirect;
+ };
+ return requestJSON(args);
+};
diff --git a/src/root/topbar.tt b/src/root/topbar.tt
index 153b0b1a..097fd0c6 100644
--- a/src/root/topbar.tt
+++ b/src/root/topbar.tt
@@ -1,19 +1,17 @@
-[% BLOCK menuItem %]
-
- uri) %]>[% title %]
-
-[% END %]
-
[% BLOCK makeSubMenu %]
[% title %]
-
[% END %]
-
[% IF c.user_exists %]
diff --git a/src/root/user.tt b/src/root/user.tt
index 718a1442..edd279a3 100644
--- a/src/root/user.tt
+++ b/src/root/user.tt
@@ -9,7 +9,7 @@
[% BREAK IF checked %]
[% END %]
[% IF checked %]
- SELECTED
+ selected="selected"
[% END %]
>[% role %]
[% END %]
@@ -22,7 +22,7 @@
[% END %]
@@ -30,7 +30,7 @@
@@ -38,14 +38,14 @@
[% END %]
@@ -54,7 +54,7 @@
-->
@@ -63,7 +63,7 @@
@@ -73,7 +73,7 @@
Roles
-
+
[% INCLUDE roleoption role="admin" %]
[% INCLUDE roleoption role="create-project" %]
@@ -91,7 +91,7 @@
[% END %]
@@ -136,8 +136,9 @@
});
[% END %]
-
-
+
+
+
diff --git a/src/root/view.tt b/src/root/view.tt
index a0d152db..e845e185 100644
--- a/src/root/view.tt
+++ b/src/root/view.tt
@@ -27,11 +27,11 @@
[% IF result.status == 0 %]
-
+
[% ELSIF result.status == 1 %]
-
+
[% ELSIF result.status == 2 %]
-
+
[% END %]
[% result.id %]
@@ -48,9 +48,9 @@
[% IF j.build %]
[% IF j.build.get_column('buildstatus') == 0 %]
-
+
[% ELSE %]
-
+
[% END %]
[% END %]
diff --git a/src/script/Makefile.am b/src/script/Makefile.am
index 8cc48484..3994684c 100644
--- a/src/script/Makefile.am
+++ b/src/script/Makefile.am
@@ -10,6 +10,7 @@ distributable_scripts = \
hydra-queue-runner \
hydra-server \
hydra-update-gc-roots \
+ hydra-s3-backup-collect-garbage \
nix-prefetch-git \
nix-prefetch-bzr \
nix-prefetch-hg
diff --git a/src/script/hydra-build b/src/script/hydra-build
index ab2650db..edd0d121 100755
--- a/src/script/hydra-build
+++ b/src/script/hydra-build
@@ -8,8 +8,10 @@ use Nix::Store;
use Hydra::Plugin;
use Hydra::Schema;
use Hydra::Helper::Nix;
+use Hydra::Helper::PluginHooks;
use Hydra::Model::DB;
use Hydra::Helper::AddBuilds;
+use Set::Scalar;
STDOUT->autoflush();
@@ -40,16 +42,18 @@ sub failDependents {
my ($drvPath, $status, $errorMsg, $dependents) = @_;
# Get the referrer closure of $drvPath.
- my @dependentDrvs = computeFSClosure(1, 0, $drvPath);
+ my $dependentDrvs = Set::Scalar->new(computeFSClosure(1, 0, $drvPath));
my $time = time();
txn_do($db, sub {
my @dependentBuilds = $db->resultset('Builds')->search(
- { drvpath => [ @dependentDrvs ], finished => 0, busy => 0 });
+ { finished => 0, busy => 0 },
+ { columns => ["id", "project", "jobset", "job", "drvpath", "finished", "busy"] });
for my $d (@dependentBuilds) {
+ next unless $dependentDrvs->has($d->drvpath);
print STDERR "failing dependent build ", $d->id, " of ", $d->project->name, ":", $d->jobset->name, ":", $d->job->name, "\n";
$d->update(
{ finished => 1
@@ -67,8 +71,8 @@ sub failDependents {
, drvpath => $drvPath
, busy => 0
, status => $status
- , starttime => time
- , stoptime => time
+ , starttime => $time
+ , stoptime => $time
, errormsg => $errorMsg
});
addBuildStepOutputs($step);
@@ -80,19 +84,6 @@ sub failDependents {
}
-sub notify {
- my ($build, $dependents) = @_;
- foreach my $plugin (@plugins) {
- eval {
- $plugin->buildFinished($build, $dependents);
- };
- if ($@) {
- print STDERR "$plugin->buildFinished: $@\n";
- }
- }
-}
-
-
sub doBuild {
my ($build) = @_;
@@ -132,7 +123,9 @@ sub doBuild {
# associated log files, etc.
my $cmd = "nix-store --realise $drvPath " .
"--timeout $timeout " .
- "--max-silent-time $maxsilent --keep-going --fallback " .
+ "--max-silent-time $maxsilent " .
+ "--option build-max-log-size 67108864 " .
+ "--keep-going --fallback " .
"--no-build-output --log-type flat --print-build-trace " .
"--add-root " . gcRootFor($outputs{out} // $outputs{(sort keys %outputs)[0]}) . " 2>&1";
@@ -149,6 +142,22 @@ sub doBuild {
next;
}
+ # Hack to handle timeouts, which Nix doesn't report
+ # properly when they occur remotely. If we get a "hook
+ # failed" error and $maxsilent seconds have passed since
+ # the start of the build step, then assume that a timeout
+ # occured.
+ if (/^@\s+hook-failed\s+(\S+)\s+(\S+)\s+(\S+)\s+(.*)$/ && $3 eq "256") {
+ my $drvPathStep = $1;
+ if ($buildSteps{$drvPathStep}) {
+ my $step = $build->buildsteps->find({stepnr => $buildSteps{$drvPathStep}}) or die;
+ print STDERR $step->starttime, " ", time(), "\n";
+ if ($step->starttime + $maxsilent <= time) {
+ $_ = "@ build-failed $1 $2 timeout $4";
+ }
+ }
+ }
+
if (/^@\s+build-started\s+(\S+)\s+(\S+)\s+(\S+)\s+(\S+)$/) {
my $drvPathStep = $1;
txn_do($db, sub {
@@ -319,7 +328,7 @@ sub doBuild {
});
- notify($build, $dependents);
+ notifyBuildFinished(\@plugins, $build, $dependents);
}
@@ -328,7 +337,7 @@ print STDERR "performing build $buildId\n";
if ($ENV{'HYDRA_MAIL_TEST'}) {
my $build = $db->resultset('Builds')->find($buildId);
- notify($build, []);
+ notifyBuildFinished(\@plugins, $build, []);
exit 0;
}
@@ -345,8 +354,8 @@ txn_do($db, sub {
die "build $buildId is already being built";
}
$build->update({busy => 1, locker => $$});
- $build->buildsteps->search({busy => 1})->delete_all;
- $build->buildproducts->delete_all;
+ $build->buildsteps->search({busy => 1})->delete;
+ $build->buildproducts->delete;
});
die unless $build;
diff --git a/src/script/hydra-evaluator b/src/script/hydra-evaluator
index 50e0e448..a63a27f6 100755
--- a/src/script/hydra-evaluator
+++ b/src/script/hydra-evaluator
@@ -2,6 +2,7 @@
use strict;
use feature 'switch';
+use utf8;
use Hydra::Schema;
use Hydra::Plugin;
use Hydra::Helper::Nix;
@@ -33,7 +34,7 @@ sub fetchInputs {
foreach my $input ($jobset->jobsetinputs->all) {
foreach my $alt ($input->jobsetinputalts->all) {
push @{$$inputInfo{$input->name}}, $_
- foreach fetchInput($plugins, $db, $project, $jobset, $input->name, $input->type, $alt->value);
+ foreach fetchInput($plugins, $db, $project, $jobset, $input->name, $input->type, $alt->value, $input->emailresponsible);
}
}
}
@@ -41,12 +42,16 @@ sub fetchInputs {
sub setJobsetError {
my ($jobset, $errorMsg) = @_;
+ my $prevError = $jobset->errormsg;
+
eval {
txn_do($db, sub {
- $jobset->update({errormsg => $errorMsg, errortime => time});
+ $jobset->update({ errormsg => $errorMsg, errortime => time, fetcherrormsg => undef });
});
};
- sendJobsetErrorNotification($jobset, $errorMsg);
+ if (defined $errorMsg && $errorMsg ne ($prevError // "")) {
+ sendJobsetErrorNotification($jobset, $errorMsg);
+ }
}
@@ -65,7 +70,7 @@ sub sendJobsetErrorNotification() {
my $body = "Hi,\n"
. "\n"
- . "This is to let you know that Hydra jobset evalation of $projectName:$jobsetName "
+ . "This is to let you know that Hydra jobset evaluation of $projectName:$jobsetName "
. "resulted in the following error:\n"
. "\n"
. "$errorMsg"
@@ -110,7 +115,17 @@ sub checkJobsetWrapped {
# Fetch all values for all inputs.
my $checkoutStart = time;
- fetchInputs($project, $jobset, $inputInfo);
+ eval {
+ fetchInputs($project, $jobset, $inputInfo);
+ };
+ if ($@) {
+ my $msg = $@;
+ print STDERR $msg;
+ txn_do($db, sub {
+ $jobset->update({ lastcheckedtime => time, fetcherrormsg => $msg });
+ });
+ return;
+ }
my $checkoutStop = time;
# Hash the arguments to hydra-eval-jobs and check the
@@ -122,14 +137,14 @@ sub checkJobsetWrapped {
if (defined $prevEval && $prevEval->hash eq $argsHash) {
print STDERR " jobset is unchanged, skipping\n";
txn_do($db, sub {
- $jobset->update({lastcheckedtime => time});
+ $jobset->update({ lastcheckedtime => time, fetcherrormsg => undef });
});
return;
}
# Evaluate the job expression.
my $evalStart = time;
- my ($jobs, $nixExprInput) = evalJobs($inputInfo, $exprType, $jobset->nixexprinput, $jobset->nixexprpath);
+ my ($jobs, $nixExprInput, $msg) = evalJobs($inputInfo, $exprType, $jobset->nixexprinput, $jobset->nixexprpath);
my $evalStop = time;
my $jobOutPathMap = {};
@@ -144,11 +159,11 @@ sub checkJobsetWrapped {
$jobset->builds->search({iscurrent => 1})->update({iscurrent => 0});
# Schedule each successfully evaluated job.
- my %buildIds;
+ my %buildMap;
foreach my $job (permute @{$jobs->{job}}) {
next if $job->{jobName} eq "";
print STDERR " considering job " . $project->name, ":", $jobset->name, ":", $job->{jobName} . "\n";
- checkBuild($db, $project, $jobset, $inputInfo, $nixExprInput, $job, \%buildIds, $prevEval, $jobOutPathMap);
+ checkBuild($db, $jobset, $inputInfo, $nixExprInput, $job, \%buildMap, $prevEval, $jobOutPathMap, $plugins);
}
# Update the last checked times and error messages for each
@@ -161,23 +176,51 @@ sub checkJobsetWrapped {
$_->update({ errormsg => $failedJobNames{$_->name} ? join '\n', @{$failedJobNames{$_->name}} : undef })
foreach $jobset->jobs->all;
- my $hasNewBuilds = 0;
- while (my ($id, $new) = each %buildIds) {
- $hasNewBuilds = 1 if $new;
- }
+ # Have any builds been added or removed since last time?
+ my $jobsetChanged =
+ (scalar(grep { $_->{new} } values(%buildMap)) > 0)
+ || (defined $prevEval && $prevEval->jobsetevalmembers->count != scalar(keys %buildMap));
my $ev = $jobset->jobsetevals->create(
{ hash => $argsHash
, timestamp => time
, checkouttime => abs($checkoutStop - $checkoutStart)
, evaltime => abs($evalStop - $evalStart)
- , hasnewbuilds => $hasNewBuilds
- , nrbuilds => $hasNewBuilds ? scalar(keys %buildIds) : undef
+ , hasnewbuilds => $jobsetChanged ? 1 : 0
+ , nrbuilds => $jobsetChanged ? scalar(keys %buildMap) : undef
});
- if ($hasNewBuilds) {
- while (my ($id, $new) = each %buildIds) {
- $ev->jobsetevalmembers->create({ build => $id, isnew => $new });
+ if ($jobsetChanged) {
+ # Create JobsetEvalMembers mappings.
+ while (my ($id, $x) = each %buildMap) {
+ $ev->jobsetevalmembers->create({ build => $id, isnew => $x->{new} });
+ }
+
+ # Create AggregateConstituents mappings. Since there can
+ # be jobs that alias each other, if there are multiple
+ # builds for the same derivation, pick the one with the
+ # shortest name.
+ my %drvPathToId;
+ while (my ($id, $x) = each %buildMap) {
+ my $y = $drvPathToId{$x->{drvPath}};
+ if (defined $y) {
+ next if length $x->{jobName} > length $y->{jobName};
+ next if length $x->{jobName} == length $y->{jobName} && $x->{jobName} ge $y->{jobName};
+ }
+ $drvPathToId{$x->{drvPath}} = $x;
+ }
+
+ foreach my $job (@{$jobs->{job}}) {
+ next unless $job->{constituents};
+ my $x = $drvPathToId{$job->{drvPath}} or die;
+ foreach my $drvPath (split / /, $job->{constituents}) {
+ my $constituent = $drvPathToId{$drvPath};
+ if (defined $constituent) {
+ $db->resultset('AggregateConstituents')->update_or_create({aggregate => $x->{id}, constituent => $constituent->{id}});
+ } else {
+ warn "aggregate job ‘$job->{jobName}’ has a constituent ‘$drvPath’ that doesn't correspond to a Hydra build\n";
+ }
+ }
}
foreach my $name (keys %{$inputInfo}) {
@@ -203,23 +246,16 @@ sub checkJobsetWrapped {
print STDERR " created cached eval ", $ev->id, "\n";
$prevEval->builds->update({iscurrent => 1}) if defined $prevEval;
}
+
+ # If this is a one-shot jobset, disable it now.
+ $jobset->update({ enabled => 0 }) if $jobset->enabled == 2;
});
# Store the error messages for jobs that failed to evaluate.
- my $msg = "";
foreach my $error (@{$jobs->{error}}) {
- my $bindings = "";
- foreach my $arg (@{$error->{arg}}) {
- my $input = $inputInfo->{$arg->{name}}->[$arg->{altnr}] or die "invalid input";
- $bindings .= ", " if $bindings ne "";
- $bindings .= $arg->{name} . " = ";
- given ($input->{type}) {
- when ("string") { $bindings .= "\"" . $input->{value} . "\""; }
- when ("boolean") { $bindings .= $input->{value}; }
- default { $bindings .= "..."; }
- }
- }
- $msg .= "at `" . $error->{location} . "' [$bindings]:\n" . $error->{msg} . "\n\n";
+ $msg .=
+ ($error->{location} ne "" ? "in job ‘$error->{location}’" : "at top-level") .
+ ":\n" . $error->{msg} . "\n\n";
}
setJobsetError($jobset, $msg);
}
@@ -241,7 +277,7 @@ sub checkJobset {
if ($@) {
my $msg = $@;
- print STDERR "error evaluating jobset ", $jobset->name, ": $msg";
+ print STDERR $msg;
txn_do($db, sub {
$jobset->update({lastcheckedtime => time});
setJobsetError($jobset, $msg);
@@ -272,7 +308,7 @@ sub checkSomeJobset {
# longest time (but don't check more often than the jobset's
# minimal check interval).
($jobset) = $db->resultset('Jobsets')->search(
- { 'project.enabled' => 1, 'me.enabled' => 1,
+ { 'project.enabled' => 1, 'me.enabled' => { '!=' => 0 },
, 'checkinterval' => { '!=', 0 }
, -or => [ 'lastcheckedtime' => undef, 'lastcheckedtime' => { '<', \ (time() . " - me.checkinterval") } ] },
{ join => 'project', order_by => [ 'lastcheckedtime nulls first' ], rows => 1 })
@@ -280,13 +316,10 @@ sub checkSomeJobset {
return 0 unless defined $jobset;
- checkJobset($jobset);
-
- return 1;
+ return system($0, $jobset->project->name, $jobset->name) == 0;
}
-# For testing: evaluate a single jobset, then exit.
if (scalar @ARGV == 2) {
my $projectName = $ARGV[0];
my $jobsetName = $ARGV[1];
diff --git a/src/script/hydra-init b/src/script/hydra-init
index 25abde15..33c60408 100755
--- a/src/script/hydra-init
+++ b/src/script/hydra-init
@@ -51,12 +51,12 @@ for (my $n = $schemaVersion; $n < $maxSchemaVersion; $n++) {
my @statements = $sql_splitter->split($schema);
eval {
$dbh->begin_work;
- sub run {
+ sub run_ {
my ($stm) = @_;
print STDERR "executing SQL statement: $stm\n";
$dbh->do($_);
}
- run($_) foreach @statements;
+ run_($_) foreach @statements;
$db->resultset('SchemaVersion')->update({version => $m});
$dbh->commit;
};
diff --git a/src/script/hydra-queue-runner b/src/script/hydra-queue-runner
index 9443c8b2..f67d8cc2 100755
--- a/src/script/hydra-queue-runner
+++ b/src/script/hydra-queue-runner
@@ -9,6 +9,7 @@ use Hydra::Helper::Nix;
use Hydra::Model::DB;
use IO::Handle;
use Nix::Store;
+use Set::Scalar;
chdir Hydra::Model::DB::getHydraPath or die;
my $db = Hydra::Model::DB->new();
@@ -36,7 +37,7 @@ sub unlockDeadBuilds {
}
if ($unlock) {
print "build ", $build->id, " pid $pid died, unlocking\n";
- $build->update({ busy => 0, locker => ""});
+ $build->update({ busy => 0, locker => "" });
$build->buildsteps->search({ busy => 1 })->update({ busy => 0, status => 4, stoptime => time });
}
}
@@ -52,14 +53,25 @@ sub findBuildDependencyInQueue {
my @deps = grep { /\.drv$/ && $_ ne $build->drvpath } computeFSClosure(0, 0, $build->drvpath);
return unless scalar @deps > 0;
foreach my $d (@deps) {
- my $b = $buildsByDrv->{$d};
- next unless defined $b;
- return $db->resultset('Builds')->find($b);
+ my $bs = $buildsByDrv->{$d};
+ next unless defined $bs;
+ return $db->resultset('Builds')->find((@$bs)[0]);
}
return undef;
}
+sub blockBuilds {
+ my ($buildsByDrv, $blockedBuilds, $build) = @_;
+ my @rdeps = grep { /\.drv$/ && $_ ne $build->drvpath } computeFSClosure(1, 0, $build->drvpath);
+ foreach my $drv (@rdeps) {
+ my $bs = $buildsByDrv->{$drv};
+ next if !defined $bs;
+ $blockedBuilds->insert($_) foreach @$bs;
+ }
+}
+
+
sub checkBuilds {
# print "looking for runnable builds...\n";
@@ -70,27 +82,34 @@ sub checkBuilds {
my %maxConcurrent;
foreach my $machineName (keys %{$machines}) {
- foreach my $system (${$machines}{$machineName}{'systemTypes'}) {
+ foreach my $system (@{${$machines}{$machineName}{'systemTypes'}}) {
$maxConcurrent{$system} = (${$machines}{$machineName}{'maxJobs'} or 0) + ($maxConcurrent{$system} or 0)
}
}
txn_do($db, sub {
- # Cache scheduled by derivation path to speed up
+ # Cache scheduled builds by derivation path to speed up
# findBuildDependencyInQueue.
my $buildsByDrv = {};
- $buildsByDrv->{$_->drvpath} = $_->id
- foreach $db->resultset('Builds')->search({ finished => 0, enabled => 1 }, { join => ['project'] });
+ push @{$buildsByDrv->{$_->drvpath}}, $_->id
+ foreach $db->resultset('Builds')->search({ finished => 0 });
+
+ # Builds in the queue of which a dependency is already building.
+ my $blockedBuilds = Set::Scalar->new();
+ blockBuilds($buildsByDrv, $blockedBuilds, $_)
+ foreach $db->resultset('Builds')->search({ finished => 0, busy => 1 });
# Get the system types for the runnable builds.
my @systemTypes = $db->resultset('Builds')->search(
- { finished => 0, busy => 0, enabled => 1 },
+ { finished => 0, busy => 0 },
{ join => ['project'], select => ['system'], as => ['system'], distinct => 1 });
+ # Get the total number of scheduling shares.
+ my $totalShares = getTotalShares($db);
+
# For each system type, select up to the maximum number of
- # concurrent build for that system type. Choose the highest
- # priority builds first, then the oldest builds.
+ # concurrent build for that system type.
foreach my $system (@systemTypes) {
# How many builds are already currently executing for this
# system type?
@@ -101,51 +120,111 @@ sub checkBuilds {
my $max = defined $systemTypeInfo ? $systemTypeInfo->maxconcurrent : $maxConcurrent{$system->system} // 2;
my $extraAllowed = $max - $nrActive;
- $extraAllowed = 0 if $extraAllowed < 0;
+ next if $extraAllowed <= 0;
- # Select the highest-priority builds to start.
- my @builds = $extraAllowed == 0 ? () : $db->resultset('Builds')->search(
- { finished => 0, busy => 0, system => $system->system, enabled => 1 },
- { join => ['project'], order_by => ["priority DESC", "id"] });
+ print STDERR "starting at most $extraAllowed builds for system ${\$system->system}\n";
- my $started = 0;
- foreach my $build (@builds) {
- # Find a dependency of $build that has no queued
- # dependencies itself. This isn't strictly necessary,
- # but it ensures that Nix builds are done as part of
- # their corresponding Hydra builds, rather than as a
- # dependency of some other Hydra build.
- while (my $dep = findBuildDependencyInQueue($buildsByDrv, $build)) {
- $build = $dep;
+ my $timeSpentPerJobset;
+
+ j: while ($extraAllowed-- > 0) {
+
+ my @runnableJobsets = $db->resultset('Builds')->search(
+ { finished => 0, busy => 0, system => $system->system },
+ { select => ['project', 'jobset'], distinct => 1 });
+
+ next if @runnableJobsets == 0;
+
+ my $windowSize = 24 * 3600;
+ my $costPerBuild = 30;
+ my $totalWindowSize = $windowSize * $max;
+
+ my @res;
+
+ foreach my $b (@runnableJobsets) {
+ my $jobset = $db->resultset('Jobsets')->find($b->get_column('project'), $b->get_column('jobset')) or die;
+
+ my $timeSpent = $timeSpentPerJobset->{$b->get_column('project')}->{$b->get_column('jobset')};
+
+ if (!defined $timeSpent) {
+ $timeSpent = $jobset->builds->search(
+ { },
+ { where => \ ("(finished = 0 or (me.stoptime >= " . (time() - $windowSize) . "))")
+ , join => 'buildsteps'
+ , select => \ "sum(coalesce(buildsteps.stoptime, ${\time}) - buildsteps.starttime)"
+ , as => "sum" })->single->get_column("sum") // 0;
+
+ # Add a 30s penalty for each started build. This
+ # is to account for jobsets that have running
+ # builds but no build steps yet.
+ $timeSpent += $jobset->builds->search({ finished => 0, busy => 1 })->count * $costPerBuild;
+
+ $timeSpentPerJobset->{$b->get_column('project')}->{$b->get_column('jobset')} = $timeSpent;
+ }
+
+ my $share = $jobset->schedulingshares || 1; # prevent division by zero
+ my $used = $timeSpent / ($totalWindowSize * ($share / $totalShares));
+
+ #printf STDERR "%s:%s: %d s, total used = %.2f%%, share used = %.2f%%\n", $jobset->get_column('project'), $jobset->name, $timeSpent, $timeSpent / $totalWindowSize * 100, $used * 100;
+
+ push @res, { jobset => $jobset, used => $used };
}
- next if $build->busy;
- my $logfile = getcwd . "/logs/" . $build->id;
- mkdir(dirname $logfile);
- unlink($logfile);
- $build->update(
- { busy => 1
- , locker => $$
- , logfile => $logfile
- , starttime => time()
- });
- push @buildsStarted, $build;
+ foreach my $r (sort { $a->{used} <=> $b->{used} } @res) {
+ my $jobset = $r->{jobset};
+ #print STDERR "selected ", $jobset->get_column('project'), ':', $jobset->name, "\n";
- last if ++$started >= $extraAllowed;
- }
+ # Select the highest-priority build for this jobset.
+ my @builds = $jobset->builds->search(
+ { finished => 0, busy => 0, system => $system->system },
+ { order_by => ["priority DESC", "id"] });
- if ($started > 0) {
- print STDERR "system type `", $system->system,
- "': $nrActive active, $max allowed, started $started builds\n";
+ foreach my $build (@builds) {
+ next if $blockedBuilds->has($build->id);
+
+ # Find a dependency of $build that has no queued
+ # dependencies itself. This isn't strictly necessary,
+ # but it ensures that Nix builds are done as part of
+ # their corresponding Hydra builds, rather than as a
+ # dependency of some other Hydra build.
+ while (my $dep = findBuildDependencyInQueue($buildsByDrv, $build)) {
+ $build = $dep;
+ }
+ next if $build->busy;
+
+ printf STDERR "starting build %d (%s:%s:%s) on %s; jobset at %.2f%% of its share\n",
+ $build->id, $build->project->name, $build->jobset->name, $build->job->name, $build->system, $r->{used} * 100;
+
+ my $logfile = getcwd . "/logs/" . $build->id;
+ mkdir(dirname $logfile);
+ unlink($logfile);
+ $build->update(
+ { busy => 1
+ , locker => $$
+ , logfile => $logfile
+ });
+ push @buildsStarted, $build;
+
+ $timeSpentPerJobset->{$jobset->get_column('project')}->{$jobset->name} += $costPerBuild;
+
+ blockBuilds($buildsByDrv, $blockedBuilds, $build);
+
+ next j;
+ }
+ }
+
+ last; # nothing found, give up on this system type
}
}
+
+ $lastTime = time();
+
+ $_->update({ starttime => time() }) foreach @buildsStarted;
});
# Actually start the builds we just selected. We need to do this
# outside the transaction in case it aborts or something.
foreach my $build (@buildsStarted) {
my $id = $build->id;
- print "starting build $id (", $build->project->name, ":", $build->jobset->name, ':', $build->job->name, ") on ", $build->system, "\n";
eval {
my $logfile = $build->logfile;
my $child = fork();
@@ -164,9 +243,7 @@ sub checkBuilds {
if ($@) {
warn $@;
txn_do($db, sub {
- $build->busy(0);
- $build->locker($$);
- $build->update;
+ $build->update({ busy => 0, locker => $$ });
});
}
}
@@ -187,8 +264,6 @@ while (1) {
unlockDeadBuilds;
checkBuilds;
-
- $lastTime = time();
};
warn $@ if $@;
diff --git a/src/script/hydra-s3-backup-collect-garbage b/src/script/hydra-s3-backup-collect-garbage
new file mode 100755
index 00000000..9de97be8
--- /dev/null
+++ b/src/script/hydra-s3-backup-collect-garbage
@@ -0,0 +1,58 @@
+#! /var/run/current-system/sw/bin/perl -w
+
+use strict;
+use File::Basename;
+use Fcntl;
+use IO::File;
+use Net::Amazon::S3;
+use Net::Amazon::S3::Client;
+use Nix::Config;
+use Nix::Store;
+use Hydra::Model::DB;
+use Hydra::Helper::Nix;
+
+my $cfg = getHydraConfig()->{s3backup};
+my @config = defined $cfg ? ref $cfg eq "ARRAY" ? @$cfg : ($cfg) : ();
+
+exit 0 unless @config;
+
+my $lockfile = Hydra::Model::DB::getHydraPath . "/.hydra-s3backup.lock";
+my $lockhandle = IO::File->new;
+open($lockhandle, ">", $lockfile) or die "Opening $lockfile: $!";
+flock($lockhandle, Fcntl::LOCK_EX) or die "Write-locking $lockfile: $!";
+
+my $client = Net::Amazon::S3::Client->new( s3 => Net::Amazon::S3->new( retry => 1 ) );
+my $db = Hydra::Model::DB->new();
+
+my $gcRootsDir = getGCRootsDir;
+opendir DIR, $gcRootsDir or die;
+my @roots = readdir DIR;
+closedir DIR;
+
+my @actual_roots = ();
+foreach my $link (@roots) {
+ next if $link eq "." || $link eq "..";
+ push @actual_roots, readlink "$gcRootsDir/$link";
+}
+
+# Don't delete a nix-cache-info file, if present
+my %closure = ( "nix-cache-info" => undef );
+foreach my $path (computeFSClosure(0, 0, @actual_roots)) {
+ my $hash = substr basename($path), 0, 32;
+ $closure{"$hash.narinfo"} = undef;
+ $closure{"$hash.nar"} = undef;
+}
+
+foreach my $bucket_config (@config) {
+ my $bucket = $client->bucket( name => $bucket_config->{name} );
+ my $prefix = exists $bucket_config->{prefix} ? $bucket_config->{prefix} : "";
+
+ my $cache_stream = $bucket->list({ prefix => $prefix });
+ until ($cache_stream->is_done) {
+ foreach my $object ($cache_stream->items) {
+ $object->delete unless exists $closure{basename($object->key)};
+ }
+ }
+}
+
+1;
diff --git a/src/script/hydra-update-gc-roots b/src/script/hydra-update-gc-roots
index 43b17a4b..1efbc69b 100755
--- a/src/script/hydra-update-gc-roots
+++ b/src/script/hydra-update-gc-roots
@@ -56,6 +56,11 @@ my @roots = readdir DIR;
closedir DIR;
+# For scheduled builds, we register the derivation as a GC root.
+print STDERR "*** looking for scheduled builds\n";
+keepBuild $_ foreach $db->resultset('Builds')->search({ finished => 0 }, { columns => [ @columns ] });
+
+
# Keep every build in every release of every project.
print STDERR "*** looking for release members\n";
keepBuild $_ foreach $db->resultset('Builds')->search_literal(
@@ -84,50 +89,33 @@ foreach my $project ($db->resultset('Projects')->search({}, { order_by => ["name
next;
}
- if ($keepnr <= 0 ) {
- print STDERR "*** jobset ", $project->name, ":", $jobset->name, " set to keep 0 builds\n";
- next;
- }
-
- # FIXME: base this on jobset evals?
- print STDERR "*** looking for the $keepnr most recent successful builds of each job in jobset ",
+ print STDERR "*** looking for all builds in the unfinished and $keepnr most recent finished evaluations of jobset ",
$project->name, ":", $jobset->name, "\n";
- keepBuild $_ foreach $jobset->builds->search(
- { 'me.id' => { 'in' => \
- [ "select b2.id from Builds b2 join " .
- " (select distinct job, system, coalesce( " .
- " (select id from builds where project = b.project and jobset = b.jobset and job = b.job and system = b.system and finished = 1 and buildStatus = 0 order by id desc offset ? limit 1)" .
- " , 0) as nth from builds b where project = ? and jobset = ? and isCurrent = 1) x " .
- " on b2.project = ? and b2.jobset = ? and b2.job = x.job and b2.system = x.system and (id >= x.nth) where finished = 1 and buildStatus = 0"
- , [ '', $keepnr - 1 ], [ '', $project->name ], [ '', $jobset->name ], [ '', $project->name ], [ '', $jobset->name ] ] }
- },
- { order_by => ["job", "system", "id"], columns => [ @columns ] });
- }
+ my @evals;
- # Go over all views in this project.
- foreach my $view ($project->views->all) {
- print STDERR "*** looking for builds to keep in view ", $project->name, ":", $view->name, "\n";
+ # Get the unfinished evals.
+ push @evals, $_->get_column("eval") foreach $jobset->builds->search(
+ { finished => 0 },
+ { join => "jobsetevalmembers", select => "jobsetevalmembers.eval", as => "eval", distinct => 1 });
- (my $primaryJob) = $view->viewjobs->search({isprimary => 1});
- my $jobs = [$view->viewjobs->all];
-
- # Keep all builds belonging to the most recent successful view result.
- my $latest = getLatestSuccessfulViewResult($project, $primaryJob, $jobs, 0);
- if (defined $latest) {
- print STDERR " keeping latest successful view result ", $latest->id, " (", $latest->get_column('releasename'), ")\n";
- my $result = getViewResult($latest, $jobs);
- keepBuild $_->{build} foreach @{$result->{jobs}};
+ # Get the N most recent finished evals.
+ if ($keepnr) {
+ push @evals, $_->get_column("id") foreach $jobset->jobsetevals->search(
+ { hasNewBuilds => 1 },
+ { where => \ "not exists (select 1 from builds b join jobsetevalmembers m on b.id = m.build where m.eval = me.id and b.finished = 0)"
+ , order_by => "id desc", rows => $keepnr });
}
+
+ keepBuild $_ foreach $jobset->builds->search(
+ { finished => 1, buildStatus => { -in => [0, 6] }
+ , id => { -in => $db->resultset('JobsetEvalMembers')->search({ eval => { -in => [@evals] } }, { select => "build" })->as_query }
+ },
+ { order_by => ["job", "id"], columns => [ @columns ] });
}
}
-# For scheduled builds, we register the derivation as a GC root.
-print STDERR "*** looking for scheduled builds\n";
-keepBuild $_ foreach $db->resultset('Builds')->search({ finished => 0 }, { columns => [ @columns ] });
-
-
# Remove existing roots that are no longer wanted.
print STDERR "*** removing unneeded GC roots\n";
diff --git a/src/sql/hydra.sql b/src/sql/hydra.sql
index c8108d73..8e5f3093 100644
--- a/src/sql/hydra.sql
+++ b/src/sql/hydra.sql
@@ -55,12 +55,14 @@ create table Jobsets (
errorTime integer, -- timestamp associated with errorMsg
lastCheckedTime integer, -- last time the evaluator looked at this jobset
triggerTime integer, -- set if we were triggered by a push event
- enabled integer not null default 1,
+ enabled integer not null default 1, -- 0 = disabled, 1 = enabled, 2 = one-shot
enableEmail integer not null default 1,
hidden integer not null default 0,
emailOverride text not null,
keepnr integer not null default 3,
checkInterval integer not null default 300, -- minimum time in seconds between polls (0 = disable polling)
+ schedulingShares integer not null default 100,
+ fetchErrorMsg text,
primary key (project, name),
foreign key (project) references Projects(name) on delete cascade on update cascade
#ifdef SQLITE
@@ -74,7 +76,8 @@ create table JobsetInputs (
project text not null,
jobset text not null,
name text not null,
- type text not null, -- "svn", "path", "uri", "string", "boolean"
+ type text not null, -- "svn", "path", "uri", "string", "boolean", "nix"
+ emailResponsible integer not null default 0, -- whether to email committers to this input who change a build
primary key (project, jobset, name),
foreign key (project, jobset) references Jobsets(project, name) on delete cascade on update cascade
);
@@ -140,7 +143,7 @@ create table Builds (
isCurrent integer default 0,
-- Copy of the nixExprInput/nixExprPath fields of the jobset that
- -- instantiated this build. Needed if we want to clone this
+ -- instantiated this build. Needed if we want to reproduce this
-- build.
nixExprInput text,
nixExprPath text,
@@ -255,6 +258,7 @@ create table BuildInputs (
uri text,
revision text,
value text,
+ emailResponsible integer not null default 0,
dependency integer, -- build ID of the input, for type == 'build'
path text,
@@ -322,6 +326,15 @@ create table CachedGitInputs (
primary key (uri, branch, revision)
);
+create table CachedDarcsInputs (
+ uri text not null,
+ revision text not null,
+ sha256hash text not null,
+ storePath text not null,
+ revCount integer not null,
+ primary key (uri, revision)
+);
+
create table CachedHgInputs (
uri text not null,
branch text not null,
@@ -514,6 +527,56 @@ create table NewsItems (
);
+create table AggregateConstituents (
+ aggregate integer not null references Builds(id) on delete cascade,
+ constituent integer not null references Builds(id) on delete cascade,
+ primary key (aggregate, constituent)
+);
+
+
+create table StarredJobs (
+ userName text not null,
+ project text not null,
+ jobset text not null,
+ job text not null,
+ primary key (userName, project, jobset, job),
+ foreign key (userName) references Users(userName) on update cascade on delete cascade,
+ foreign key (project) references Projects(name) on update cascade on delete cascade,
+ foreign key (project, jobset) references Jobsets(project, name) on update cascade on delete cascade,
+ foreign key (project, jobset, job) references Jobs(project, jobset, name) on update cascade on delete cascade
+);
+
+
+-- Cache of the number of finished builds.
+create table NrBuilds (
+ what text primary key not null,
+ count integer not null
+);
+
+insert into NrBuilds(what, count) values('finished', 0);
+
+#ifdef POSTGRESQL
+
+create function modifyNrBuildsFinished() returns trigger as $$
+ begin
+ if ((tg_op = 'INSERT' and new.finished = 1) or
+ (tg_op = 'UPDATE' and old.finished = 0 and new.finished = 1)) then
+ update NrBuilds set count = count + 1 where what = 'finished';
+ elsif ((tg_op = 'DELETE' and old.finished = 1) or
+ (tg_op = 'UPDATE' and old.finished = 1 and new.finished = 0)) then
+ update NrBuilds set count = count - 1 where what = 'finished';
+ end if;
+ return null;
+ end;
+$$ language plpgsql;
+
+create trigger NrBuildsFinished after insert or update or delete on Builds
+ for each row
+ execute procedure modifyNrBuildsFinished();
+
+#endif
+
+
-- Some indices.
create index IndexBuildInputsOnBuild on BuildInputs(build);
@@ -534,7 +597,8 @@ create index IndexBuildsOnJobAndSystem on Builds(project, jobset, job, system);
create index IndexBuildsOnJobset on Builds(project, jobset);
create index IndexBuildsOnProject on Builds(project);
create index IndexBuildsOnTimestamp on Builds(timestamp);
-create index IndexBuildsOnJobsetFinishedTimestamp on Builds(project, jobset, finished, timestamp DESC);
+create index IndexBuildsOnFinishedStopTime on Builds(finished, stoptime DESC);
+create index IndexBuildsOnJobsetFinishedTimestamp on Builds(project, jobset, finished, timestamp DESC); -- obsolete?
create index IndexBuildsOnJobFinishedId on builds(project, jobset, job, system, finished, id DESC);
create index IndexBuildsOnJobSystemCurrent on Builds(project, jobset, job, system, isCurrent);
create index IndexBuildsOnDrvPath on Builds(drvPath);
diff --git a/src/sql/upgrade-17.sql b/src/sql/upgrade-17.sql
new file mode 100644
index 00000000..bf827f75
--- /dev/null
+++ b/src/sql/upgrade-17.sql
@@ -0,0 +1,23 @@
+create table NrBuilds (
+ what text primary key not null,
+ count integer not null
+);
+
+create function modifyNrBuildsFinished() returns trigger as $$
+ begin
+ if ((tg_op = 'INSERT' and new.finished = 1) or
+ (tg_op = 'UPDATE' and old.finished = 0 and new.finished = 1)) then
+ update NrBuilds set count = count + 1 where what = 'finished';
+ elsif ((tg_op = 'DELETE' and old.finished = 1) or
+ (tg_op = 'UPDATE' and old.finished = 1 and new.finished = 0)) then
+ update NrBuilds set count = count - 1 where what = 'finished';
+ end if;
+ return null;
+ end;
+$$ language plpgsql;
+
+create trigger NrBuildsFinished after insert or update or delete on Builds
+ for each row
+ execute procedure modifyNrBuildsFinished();
+
+insert into NrBuilds(what, count) select 'finished', count(*) from Builds where finished = 1;
diff --git a/src/sql/upgrade-18.sql b/src/sql/upgrade-18.sql
new file mode 100644
index 00000000..acd1f397
--- /dev/null
+++ b/src/sql/upgrade-18.sql
@@ -0,0 +1 @@
+create index IndexBuildsOnFinishedStopTime on Builds(finished, stoptime DESC);
diff --git a/src/sql/upgrade-19.sql b/src/sql/upgrade-19.sql
new file mode 100644
index 00000000..72462d62
--- /dev/null
+++ b/src/sql/upgrade-19.sql
@@ -0,0 +1,5 @@
+create table AggregateConstituents (
+ aggregate integer not null references Builds(id) on delete cascade,
+ constituent integer not null references Builds(id) on delete cascade,
+ primary key (aggregate, constituent)
+);
diff --git a/src/sql/upgrade-20.sql b/src/sql/upgrade-20.sql
new file mode 100644
index 00000000..17df74c0
--- /dev/null
+++ b/src/sql/upgrade-20.sql
@@ -0,0 +1,8 @@
+create table CachedDarcsInputs (
+ uri text not null,
+ revision text not null,
+ sha256hash text not null,
+ storePath text not null,
+ revCount integer not null,
+ primary key (uri, revision)
+);
diff --git a/src/sql/upgrade-21.sql b/src/sql/upgrade-21.sql
new file mode 100644
index 00000000..531342c7
--- /dev/null
+++ b/src/sql/upgrade-21.sql
@@ -0,0 +1,2 @@
+alter table Jobsets
+ add column schedulingShares integer not null default 100;
diff --git a/src/sql/upgrade-22.sql b/src/sql/upgrade-22.sql
new file mode 100644
index 00000000..c2a182c4
--- /dev/null
+++ b/src/sql/upgrade-22.sql
@@ -0,0 +1 @@
+alter table Jobsets add column fetchErrorMsg text;
diff --git a/src/sql/upgrade-23.sql b/src/sql/upgrade-23.sql
new file mode 100644
index 00000000..a4875d20
--- /dev/null
+++ b/src/sql/upgrade-23.sql
@@ -0,0 +1,11 @@
+create table StarredJobs (
+ userName text not null,
+ project text not null,
+ jobset text not null,
+ job text not null,
+ primary key (userName, project, jobset, job),
+ foreign key (userName) references Users(userName) on update cascade on delete cascade,
+ foreign key (project) references Projects(name) on update cascade on delete cascade,
+ foreign key (project, jobset) references Jobsets(project, name) on update cascade on delete cascade,
+ foreign key (project, jobset, job) references Jobs(project, jobset, name) on update cascade on delete cascade
+);
diff --git a/src/sql/upgrade-24.sql b/src/sql/upgrade-24.sql
new file mode 100644
index 00000000..585711e6
--- /dev/null
+++ b/src/sql/upgrade-24.sql
@@ -0,0 +1,2 @@
+alter table JobsetInputs add column emailResponsible integer not null default 0;
+alter table BuildInputs add column emailResponsible integer not null default 0;
diff --git a/tests/Makefile.am b/tests/Makefile.am
index b4242741..ab848d85 100644
--- a/tests/Makefile.am
+++ b/tests/Makefile.am
@@ -3,6 +3,7 @@ TESTS_ENVIRONMENT = \
HYDRA_DBI="dbi:SQLite:db.sqlite" \
HYDRA_DATA="$(abs_builddir)/data" \
HYDRA_HOME="$(top_srcdir)/src" \
+ HYDRA_CONFIG= \
NIX_REMOTE= \
NIX_CONF_DIR="$(abs_builddir)/nix/etc/nix" \
NIX_STATE_DIR="$(abs_builddir)/nix/var/nix" \
diff --git a/tests/Setup.pm b/tests/Setup.pm
index 1cbbe750..96aecde6 100644
--- a/tests/Setup.pm
+++ b/tests/Setup.pm
@@ -61,7 +61,7 @@ sub createJobsetWithOneInput {
sub evalSucceeds {
my ($jobset) = @_;
- my ($res, $stdout, $stderr) = captureStdoutStderr(60, ("../src/script/hydra-evaluator", $jobset->project->name, $jobset->name));
+ my ($res, $stdout, $stderr) = captureStdoutStderr(60, ("hydra-evaluator", $jobset->project->name, $jobset->name));
chomp $stdout; chomp $stderr;
print STDERR "Evaluation errors for jobset ".$jobset->project->name.":".$jobset->name.": \n".$jobset->errormsg."\n" if $jobset->errormsg;
print STDERR "STDOUT: $stdout\n" if $stdout ne "";
@@ -71,7 +71,7 @@ sub evalSucceeds {
sub runBuild {
my ($build) = @_;
- my ($res, $stdout, $stderr) = captureStdoutStderr(60, ("../src/script/hydra-build", $build->id));
+ my ($res, $stdout, $stderr) = captureStdoutStderr(60, ("hydra-build", $build->id));
print "STDERR: $stderr" if $stderr ne "";
return !$res;
}
diff --git a/tests/api-test.nix b/tests/api-test.nix
index a798294b..2f45a48f 100644
--- a/tests/api-test.nix
+++ b/tests/api-test.nix
@@ -1,6 +1,7 @@
let
+ thisFile = builtins.toFile "default.nix" (builtins.readFile ./default.nix);
builder = builtins.toFile "builder.sh" ''
- echo -n ${builtins.readFile ./default.nix} > $out
+ echo ${thisFile} > $out
'';
in {
job = derivation {
diff --git a/tests/api-test.pl b/tests/api-test.pl
index ad91d1a8..f9068dd4 100644
--- a/tests/api-test.pl
+++ b/tests/api-test.pl
@@ -1,6 +1,6 @@
use LWP::UserAgent;
use JSON;
-use Test::Simple tests => 15;
+use Test::Simple tests => 16;
my $ua = LWP::UserAgent->new;
$ua->cookie_jar({});
@@ -22,11 +22,11 @@ my $result = request_json({ uri => "/login", method => "POST", data => { usernam
my $user = decode_json($result->content());
ok($user->{username} eq "root", "The root user is named root");
-ok($user->{userroles}->[0]->{role} eq "admin", "The root user is an admin");
+ok($user->{userroles}->[0] eq "admin", "The root user is an admin");
$user = decode_json(request_json({ uri => "/current-user" })->content());
ok($user->{username} eq "root", "The current user is named root");
-ok($user->{userroles}->[0]->{role} eq "admin", "The current user is an admin");
+ok($user->{userroles}->[0] eq "admin", "The current user is an admin");
ok(request_json({ uri => '/project/sample' })->code() == 404, "Non-existent projects don't exist");
@@ -37,13 +37,14 @@ my $project = decode_json(request_json({ uri => '/project/sample' })->content())
ok((not @{$project->{jobsets}}), "A new project has no jobsets");
-$result = request_json({ uri => '/jobset/sample/default', method => 'PUT', data => { nixexprpath => "default.nix", nixexprinput => "src", inputs => { src => { type => "path", values => "/run/jobset" } }, enabled => "1", checkinterval => "3600"} });
+$result = request_json({ uri => '/jobset/sample/default', method => 'PUT', data => { nixexprpath => "default.nix", nixexprinput => "my-src", inputs => { "my-src" => { type => "path", values => "/run/jobset" } }, enabled => "1", checkinterval => "3600"} });
ok($result->code() == 201, "PUTting a new jobset creates it");
my $jobset = decode_json(request_json({ uri => '/jobset/sample/default' })->content());
-ok($jobset->{jobsetinputs}->[0]->{name} eq "src", "The new jobset has an 'src' input");
-ok($jobset->{jobsetinputs}->[0]->{jobsetinputalts}->[0]->{value} eq "/run/jobset", "The 'src' input is in /run/jobset");
+ok(exists $jobset->{jobsetinputs}->{"my-src"}, "The new jobset has a 'my-src' input");
+
+ok($jobset->{jobsetinputs}->{"my-src"}->{jobsetinputalts}->[0] eq "/run/jobset", "The 'my-src' input is in /run/jobset");
system("LOGNAME=root NIX_STORE_DIR=/run/nix/store NIX_LOG_DIR=/run/nix/var/log/nix NIX_STATE_DIR=/run/nix/var/nix HYDRA_DATA=/var/lib/hydra HYDRA_DBI='dbi:Pg:dbname=hydra;user=root;' hydra-evaluator sample default");
$result = request_json({ uri => '/jobset/sample/default/evals' });
@@ -56,8 +57,9 @@ ok($eval->{hasnewbuilds} == 1, "The first eval of a jobset has new builds");
sleep 30;
system("echo >> /run/jobset/default.nix; LOGNAME=root NIX_STORE_DIR=/run/nix/store NIX_LOG_DIR=/run/nix/var/log/nix NIX_STATE_DIR=/run/nix/var/nix HYDRA_DATA=/var/lib/hydra HYDRA_DBI='dbi:Pg:dbname=hydra;user=root;' hydra-evaluator sample default");
my $evals = decode_json(request_json({ uri => '/jobset/sample/default/evals' })->content())->{evals};
-ok($evals->[0]->{jobsetevalinputs}->[0]->{revision} != $evals->[1]->{jobsetevalinputs}->[0]->{revision}, "Changing a jobset source changes its revision");
+ok($evals->[0]->{jobsetevalinputs}->{"my-src"}->{revision} != $evals->[1]->{jobsetevalinputs}->{"my-src"}->{revision}, "Changing a jobset source changes its revision");
-my $build = decode_json(request_json({ uri => "/build/" . $evals->[0]->{jobsetevalmembers}->[0]->{build} })->content());
+my $build = decode_json(request_json({ uri => "/build/" . $evals->[0]->{builds}->[0] })->content());
ok($build->{job} eq "job", "The build's job name is job");
ok($build->{finished} == 0, "The build isn't finished yet");
+ok($build->{buildoutputs}->{out}->{path} =~ /^\/run\/nix\/store\/[a-zA-Z0-9]{32}-job$/, "The build's outpath is in the nix store and named 'job'");
diff --git a/tests/evaluation-tests.pl b/tests/evaluation-tests.pl
index 2044c1be..90ae41df 100755
--- a/tests/evaluation-tests.pl
+++ b/tests/evaluation-tests.pl
@@ -7,7 +7,7 @@ use Setup;
my $db = Hydra::Model::DB->new;
-use Test::Simple tests => 68;
+use Test::Simple tests => 72;
hydra_setup($db);
@@ -102,6 +102,13 @@ my @scminputs = (
type => "hg",
uri => "$jobsBaseUri/hg-repo",
update => getcwd . "/jobs/hg-update.sh"
+ },
+ {
+ name => "darcs",
+ nixexpr => "darcs-input.nix",
+ type => "darcs",
+ uri => "$jobsBaseUri/darcs-repo",
+ update => getcwd . "/jobs/darcs-update.sh"
}
);
diff --git a/tests/jobs/darcs-input.nix b/tests/jobs/darcs-input.nix
new file mode 100644
index 00000000..9374b1f6
--- /dev/null
+++ b/tests/jobs/darcs-input.nix
@@ -0,0 +1,10 @@
+with import ./config.nix;
+{ src }:
+{
+ copy =
+ mkDerivation {
+ name = "git-input";
+ builder = ./scm-builder.sh;
+ inherit src;
+ };
+}
diff --git a/tests/jobs/darcs-update.sh b/tests/jobs/darcs-update.sh
new file mode 100755
index 00000000..164a9e9d
--- /dev/null
+++ b/tests/jobs/darcs-update.sh
@@ -0,0 +1,24 @@
+#! /bin/sh
+set -e
+
+repo="$1"
+STATE_FILE=$(pwd)/.hg-state
+if test -e $STATE_FILE; then
+ state=$(cat $STATE_FILE)
+ test $state -gt 1 && state=0
+else
+ state=0;
+fi
+
+case $state in
+ (0) echo "::Create repo. -- continue -- updated::"
+ mkdir darcs-repo
+ darcs init --repodir darcs-repo
+ touch darcs-repo/file
+ darcs add --repodir darcs-repo file
+ darcs record --repodir darcs-repo -a -l -m "add a file" file -A foobar@bar.bar
+ ;;
+ (*) echo "::End. -- stop -- nothing::" ;;
+esac
+
+echo $(($state + 1)) > $STATE_FILE
diff --git a/tests/query-all-tables.pl b/tests/query-all-tables.pl
index 55cc779f..771239a6 100755
--- a/tests/query-all-tables.pl
+++ b/tests/query-all-tables.pl
@@ -7,13 +7,13 @@ my $db = Hydra::Model::DB->new;
my @sources = $db->schema->sources;
my $nrtables = scalar(@sources);
-use Test::Simple tests => 43;
+use Test::Simple tests => 39;
foreach my $source (@sources) {
my $title = "Basic select query for $source";
- if ($source eq "SchemaVersion") {
+ if ($source eq "SchemaVersion" || $source eq "NrBuilds") {
ok(scalar($db->resultset($source)->all) == 1, $title);
- } elsif( $source !~ m/^(LatestSucceeded|JobStatus|ActiveJobs)/) {
+ } elsif( $source !~ m/^LatestSucceeded/) {
ok(scalar($db->resultset($source)->all) == 0, $title);
} else {
ok(scalar($db->resultset($source)->search({},{ bind => ["", "", ""] })) == 0, $title);
diff --git a/tests/s3-backup-test.config b/tests/s3-backup-test.config
new file mode 100644
index 00000000..49068ea9
--- /dev/null
+++ b/tests/s3-backup-test.config
@@ -0,0 +1,4 @@
+
+ jobs = tests:basic:job
+ name = hydra
+
diff --git a/tests/s3-backup-test.pl b/tests/s3-backup-test.pl
new file mode 100755
index 00000000..a81d2d22
--- /dev/null
+++ b/tests/s3-backup-test.pl
@@ -0,0 +1,49 @@
+use strict;
+use File::Basename;
+use Hydra::Model::DB;
+use Hydra::Helper::Nix;
+use Nix::Store;
+use Cwd;
+
+my $db = Hydra::Model::DB->new;
+
+use Test::Simple tests => 6;
+
+$db->resultset('Users')->create({ username => "root", emailaddress => 'root@invalid.org', password => '' });
+
+$db->resultset('Projects')->create({name => "tests", displayname => "", owner => "root"});
+my $project = $db->resultset('Projects')->update_or_create({name => "tests", displayname => "", owner => "root"});
+my $jobset = $project->jobsets->create({name => "basic", nixexprinput => "jobs", nixexprpath => "default.nix", emailoverride => ""});
+
+my $jobsetinput;
+
+$jobsetinput = $jobset->jobsetinputs->create({name => "jobs", type => "path"});
+$jobsetinput->jobsetinputalts->create({altnr => 0, value => getcwd . "/jobs"});
+system("hydra-evaluator " . $jobset->project->name . " " . $jobset->name);
+
+my $successful_hash;
+foreach my $build ($jobset->builds->search({finished => 0})) {
+ system("hydra-build " . $build->id);
+ my @outputs = $build->buildoutputs->all;
+ my $hash = substr basename($outputs[0]->path), 0, 32;
+ if ($build->job->name eq "job") {
+ ok(-e "/tmp/s3/hydra/$hash.nar", "The nar of a successful matched build is uploaded");
+ ok(-e "/tmp/s3/hydra/$hash.narinfo", "The narinfo of a successful matched build is uploaded");
+ $successful_hash = $hash;
+ }
+}
+
+system("hydra-s3-backup-collect-garbage");
+ok(-e "/tmp/s3/hydra/$successful_hash.nar", "The nar of a build that's a root is not removed by gc");
+ok(-e "/tmp/s3/hydra/$successful_hash.narinfo", "The narinfo of a build that's a root is not removed by gc");
+
+my $gcRootsDir = getGCRootsDir;
+opendir DIR, $gcRootsDir or die;
+while(readdir DIR) {
+ next if $_ eq "." or $_ eq "..";
+ unlink "$gcRootsDir/$_";
+}
+closedir DIR;
+system("hydra-s3-backup-collect-garbage");
+ok(not -e "/tmp/s3/hydra/$successful_hash.nar", "The nar of a build that's not a root is removed by gc");
+ok(not -e "/tmp/s3/hydra/$successful_hash.narinfo", "The narinfo of a build that's not a root is removed by gc");