forked from lix-project/hydra
Merge remote-tracking branch 'origin/master' into flake
This commit is contained in:
commit
a74dec6fb1
1
.gitignore
vendored
1
.gitignore
vendored
|
@ -32,3 +32,4 @@ Makefile.in
|
|||
/inst
|
||||
hydra-config.h
|
||||
hydra-config.h.in
|
||||
result
|
||||
|
|
|
@ -205,7 +205,7 @@ in
|
|||
<xref linkend='ex-hello' /> shows what a
|
||||
<filename>release.nix</filename> file for <link
|
||||
xlink:href="http://www.gnu.org/software/hello/">GNU Hello</link>
|
||||
would you like. GNU Hello is representative of many GNU
|
||||
would look like. GNU Hello is representative of many GNU
|
||||
and non-GNU free software projects:
|
||||
|
||||
<itemizedlist>
|
||||
|
|
|
@ -273,6 +273,7 @@ in
|
|||
runuser -u ${config.services.postgresql.superUser} -- ${config.services.postgresql.package}/bin/createdb -O hydra hydra
|
||||
touch ${baseDir}/.db-created
|
||||
fi
|
||||
echo "create extension if not exists pg_trgm" | runuser -u ${config.services.postgresql.superUser} -- ${config.services.postgresql.package}/bin/psql hydra
|
||||
''}
|
||||
|
||||
if [ ! -e ${cfg.gcRootsDir} ]; then
|
||||
|
@ -415,6 +416,8 @@ in
|
|||
hydra-users hydra-queue-runner hydra
|
||||
hydra-users hydra-www hydra
|
||||
hydra-users root hydra
|
||||
# The postgres user is used to create the pg_trgm extension for the hydra database
|
||||
hydra-users postgres postgres
|
||||
'';
|
||||
|
||||
services.postgresql.authentication = optionalString haveLocalDB
|
||||
|
|
70
release.nix
70
release.nix
|
@ -28,6 +28,22 @@ let
|
|||
services.postgresql.package = pkgs.postgresql95;
|
||||
|
||||
environment.systemPackages = [ pkgs.perlPackages.LWP pkgs.perlPackages.JSON ];
|
||||
|
||||
# The following is to work around the following error from hydra-server:
|
||||
# [error] Caught exception in engine "Cannot determine local time zone"
|
||||
time.timeZone = "UTC";
|
||||
|
||||
nix = {
|
||||
# The following is to work around: https://github.com/NixOS/hydra/pull/432
|
||||
buildMachines = [
|
||||
{ hostName = "localhost";
|
||||
system = "x86_64-linux";
|
||||
}
|
||||
];
|
||||
# Without this nix tries to fetch packages from the default
|
||||
# cache.nixos.org which is not reachable from this sandboxed NixOS test.
|
||||
binaryCaches = [];
|
||||
};
|
||||
};
|
||||
|
||||
# FIXME: use commit date.
|
||||
|
@ -216,10 +232,62 @@ rec {
|
|||
$machine->waitForOpenPort("3000");
|
||||
|
||||
# Run the API tests.
|
||||
$machine->mustSucceed("su - hydra -c 'perl ${./tests/api-test.pl}' >&2");
|
||||
$machine->mustSucceed("su - hydra -c 'perl -I ${build.${system}.perlDeps}/lib/perl5/site_perl ${./tests/api-test.pl}' >&2");
|
||||
'';
|
||||
});
|
||||
|
||||
tests.notifications = genAttrs' (system:
|
||||
with import (nixpkgs + "/nixos/lib/testing.nix") { inherit system; };
|
||||
simpleTest {
|
||||
machine = { pkgs, ... }: {
|
||||
imports = [ (hydraServer build.${system}) ];
|
||||
services.hydra-dev.extraConfig = ''
|
||||
<influxdb>
|
||||
url = http://127.0.0.1:8086
|
||||
db = hydra
|
||||
</influxdb>
|
||||
'';
|
||||
services.influxdb.enable = true;
|
||||
};
|
||||
testScript = ''
|
||||
$machine->waitForJob("hydra-init");
|
||||
|
||||
# Create an admin account and some other state.
|
||||
$machine->succeed
|
||||
( "su - hydra -c \"hydra-create-user root --email-address 'alice\@example.org' --password foobar --role admin\""
|
||||
, "mkdir /run/jobset"
|
||||
, "chmod 755 /run/jobset"
|
||||
, "cp ${./tests/api-test.nix} /run/jobset/default.nix"
|
||||
, "chmod 644 /run/jobset/default.nix"
|
||||
, "chown -R hydra /run/jobset"
|
||||
);
|
||||
|
||||
# Wait until InfluxDB can receive web requests
|
||||
$machine->waitForJob("influxdb");
|
||||
$machine->waitForOpenPort("8086");
|
||||
|
||||
# Create an InfluxDB database where hydra will write to
|
||||
$machine->succeed(
|
||||
"curl -XPOST 'http://127.0.0.1:8086/query' \\
|
||||
--data-urlencode 'q=CREATE DATABASE hydra'");
|
||||
|
||||
# Wait until hydra-server can receive HTTP requests
|
||||
$machine->waitForJob("hydra-server");
|
||||
$machine->waitForOpenPort("3000");
|
||||
|
||||
# Setup the project and jobset
|
||||
$machine->mustSucceed(
|
||||
"su - hydra -c 'perl -I ${build.${system}.perlDeps}/lib/perl5/site_perl ${./tests/setup-notifications-jobset.pl}' >&2");
|
||||
|
||||
# Wait until hydra has build the job and
|
||||
# the InfluxDBNotification plugin uploaded its notification to InfluxDB
|
||||
$machine->waitUntilSucceeds(
|
||||
"curl -s -H 'Accept: application/csv' \\
|
||||
-G 'http://127.0.0.1:8086/query?db=hydra' \\
|
||||
--data-urlencode 'q=SELECT * FROM hydra_build_status' | grep success");
|
||||
'';
|
||||
});
|
||||
|
||||
/*
|
||||
tests.s3backup = genAttrs' (system:
|
||||
with import (nixpkgs + "/nixos/lib/testing.nix") { inherit system; };
|
||||
|
|
|
@ -391,7 +391,12 @@ sub search :Local Args(0) {
|
|||
error($c, "Invalid character in query.")
|
||||
unless $query =~ /^[a-zA-Z0-9_\-\/.]+$/;
|
||||
|
||||
$c->stash->{limit} = 500;
|
||||
my $limit = trim $c->request->params->{"limit"};
|
||||
if ($limit eq "") {
|
||||
$c->stash->{limit} = 500;
|
||||
} else {
|
||||
$c->stash->{limit} = $limit;
|
||||
}
|
||||
|
||||
$c->stash->{projects} = [ $c->model('DB::Projects')->search(
|
||||
{ -and =>
|
||||
|
@ -422,12 +427,16 @@ sub search :Local Args(0) {
|
|||
|
||||
# Perform build search in separate queries to prevent seq scan on buildoutputs table.
|
||||
$c->stash->{builds} = [ $c->model('DB::Builds')->search(
|
||||
{ "buildoutputs.path" => trim($query) },
|
||||
{ order_by => ["id desc"], join => ["buildoutputs"] } ) ];
|
||||
{ "buildoutputs.path" => { ilike => "%$query%" } },
|
||||
{ order_by => ["id desc"], join => ["buildoutputs"]
|
||||
, rows => $c->stash->{limit}
|
||||
} ) ];
|
||||
|
||||
$c->stash->{buildsdrv} = [ $c->model('DB::Builds')->search(
|
||||
{ "drvpath" => trim($query) },
|
||||
{ order_by => ["id desc"] } ) ];
|
||||
{ "drvpath" => { ilike => "%$query%" } },
|
||||
{ order_by => ["id desc"]
|
||||
, rows => $c->stash->{limit}
|
||||
} ) ];
|
||||
|
||||
$c->stash->{resource} = { projects => $c->stash->{projects},
|
||||
jobsets => $c->stash->{jobsets},
|
||||
|
|
137
src/lib/Hydra/Plugin/InfluxDBNotification.pm
Normal file
137
src/lib/Hydra/Plugin/InfluxDBNotification.pm
Normal file
|
@ -0,0 +1,137 @@
|
|||
package Hydra::Plugin::InfluxDBNotification;
|
||||
|
||||
use strict;
|
||||
use parent 'Hydra::Plugin';
|
||||
use HTTP::Request;
|
||||
# use JSON;
|
||||
use LWP::UserAgent;
|
||||
# use Hydra::Helper::CatalystUtils;
|
||||
|
||||
sub toBuildStatusDetailed {
|
||||
my ($buildStatus) = @_;
|
||||
if ($buildStatus == 0) {
|
||||
return "success";
|
||||
}
|
||||
elsif ($buildStatus == 1) {
|
||||
return "failure";
|
||||
}
|
||||
elsif ($buildStatus == 2) {
|
||||
return "dependency-failed";
|
||||
}
|
||||
elsif ($buildStatus == 4) {
|
||||
return "cancelled";
|
||||
}
|
||||
elsif ($buildStatus == 6) {
|
||||
return "failed-with-output";
|
||||
}
|
||||
elsif ($buildStatus == 7) {
|
||||
return "timed-out";
|
||||
}
|
||||
elsif ($buildStatus == 9) {
|
||||
return "unsupported-system";
|
||||
}
|
||||
elsif ($buildStatus == 10) {
|
||||
return "log-limit-exceeded";
|
||||
}
|
||||
elsif ($buildStatus == 11) {
|
||||
return "output-limit-exceeded";
|
||||
}
|
||||
elsif ($buildStatus == 12) {
|
||||
return "non-deterministic-build";
|
||||
}
|
||||
else {
|
||||
return "aborted";
|
||||
}
|
||||
}
|
||||
|
||||
sub toBuildStatusClass {
|
||||
my ($buildStatus) = @_;
|
||||
if ($buildStatus == 0) {
|
||||
return "success";
|
||||
}
|
||||
elsif ($buildStatus == 3
|
||||
|| $buildStatus == 4
|
||||
|| $buildStatus == 8
|
||||
|| $buildStatus == 10
|
||||
|| $buildStatus == 11)
|
||||
{
|
||||
return "canceled";
|
||||
}
|
||||
else {
|
||||
return "failed";
|
||||
}
|
||||
}
|
||||
|
||||
# Syntax
|
||||
# build_status,job=my-job status=failed,result=dependency-failed duration=123i
|
||||
# | -------------------- -------------- |
|
||||
# | | | |
|
||||
# | | | |
|
||||
# +-----------+--------+-+---------+-+---------+
|
||||
# |measurement|,tag_set| |field_set| |timestamp|
|
||||
# +-----------+--------+-+---------+-+---------+
|
||||
sub createLine {
|
||||
my ($measurement, $tagSet, $fieldSet, $timestamp) = @_;
|
||||
my @tags = ();
|
||||
foreach my $tag (sort keys %$tagSet) {
|
||||
push @tags, "$tag=$tagSet->{$tag}";
|
||||
}
|
||||
my @fields = ();
|
||||
foreach my $field (sort keys %$fieldSet) {
|
||||
push @fields, "$field=$fieldSet->{$field}";
|
||||
}
|
||||
my $tags = join(",", @tags);
|
||||
my $fields = join(",", @fields);
|
||||
return "$measurement,$tags $fields $timestamp";
|
||||
}
|
||||
|
||||
sub buildFinished {
|
||||
my ($self, $build, $dependents) = @_;
|
||||
my $influxdb = $self->{config}->{influxdb};
|
||||
|
||||
# skip if we didn't configure
|
||||
return unless defined $influxdb;
|
||||
# skip if we didn't set the URL and the DB
|
||||
return unless ref $influxdb eq 'HASH' and exists $influxdb->{url} and exists $influxdb->{db};
|
||||
|
||||
my @lines = ();
|
||||
foreach my $b ($build, @{$dependents}) {
|
||||
my $tagSet = {
|
||||
status => toBuildStatusClass($b->buildstatus),
|
||||
result => toBuildStatusDetailed($b->buildstatus),
|
||||
project => $b->project->name,
|
||||
jobset => $b->jobset->name,
|
||||
repo => ($b->jobset->name =~ /^(.*)\.pr-/) ? $1 : $b->jobset->name,
|
||||
job => $b->job->name,
|
||||
system => $b->system,
|
||||
cached => $b->iscachedbuild ? "true" : "false",
|
||||
};
|
||||
my $fieldSet = {
|
||||
# this line is needed to be able to query the statuses
|
||||
build_status => $b->buildstatus . "i",
|
||||
build_id => '"' . $b->id . '"',
|
||||
main_build_id => '"' . $build->id . '"',
|
||||
duration => ($b->stoptime - $b->starttime) . "i",
|
||||
queued => ($b->starttime - $b->timestamp > 0 ? $b->starttime - $b->timestamp : 0) . "i",
|
||||
closure_size => ($b->closuresize // 0) . "i",
|
||||
size => ($b->size // 0) . "i",
|
||||
};
|
||||
my $line =
|
||||
createLine("hydra_build_status", $tagSet, $fieldSet, $b->stoptime);
|
||||
push @lines, $line;
|
||||
}
|
||||
|
||||
my $payload = join("\n", @lines);
|
||||
print STDERR "sending InfluxDB measurements to server $influxdb->{url}:\n$payload\n";
|
||||
|
||||
my $ua = LWP::UserAgent->new();
|
||||
my $req = HTTP::Request->new('POST',
|
||||
"$influxdb->{url}/write?db=$influxdb->{db}&precision=s");
|
||||
$req->header('Content-Type' => 'application/x-www-form-urlencoded');
|
||||
$req->content($payload);
|
||||
my $res = $ua->request($req);
|
||||
print STDERR $res->status_line, ": ", $res->decoded_content, "\n"
|
||||
unless $res->is_success;
|
||||
}
|
||||
|
||||
1;
|
|
@ -695,3 +695,22 @@ create index IndexBuildsOnKeep on Builds(keep) where keep = 1;
|
|||
create index IndexJobsetEvalsOnJobsetId on JobsetEvals(project, jobset, id desc) where hasNewBuilds = 1;
|
||||
|
||||
create index IndexBuildsOnNotificationPendingSince on Builds(notificationPendingSince) where notificationPendingSince is not null;
|
||||
|
||||
#ifdef POSTGRESQL
|
||||
-- The pg_trgm extension has to be created by a superuser. The NixOS
|
||||
-- module creates this extension in the systemd prestart script. We
|
||||
-- then ensure the extension has been created before creating the
|
||||
-- index. If it is not possible to create the extension, a warning
|
||||
-- message is emitted to inform the user the index creation is skipped
|
||||
-- (slower complex queries on builds.drvpath).
|
||||
do $$
|
||||
begin
|
||||
create extension if not exists pg_trgm;
|
||||
-- Provide an index used by LIKE operator on builds.drvpath (search query)
|
||||
create index IndexTrgmBuildsOnDrvpath on builds using gin (drvpath gin_trgm_ops);
|
||||
exception when others then
|
||||
raise warning 'Can not create extension pg_trgm: %', SQLERRM;
|
||||
raise warning 'HINT: Temporary provide superuser role to your Hydra Postgresql user and run the script src/sql/upgrade-57.sql';
|
||||
raise warning 'The pg_trgm index on builds.drvpath has been skipped (slower complex queries on builds.drvpath)';
|
||||
end$$;
|
||||
#endif
|
||||
|
|
|
@ -1,7 +1,16 @@
|
|||
alter table Jobsets alter column nixExprInput drop not null;
|
||||
alter table Jobsets alter column nixExprPath drop not null;
|
||||
alter table Jobsets add column type integer default 0;
|
||||
alter table Jobsets add column flake text;
|
||||
alter table Jobsets add check ((type = 0) = (nixExprInput is not null and nixExprPath is not null));
|
||||
alter table Jobsets add check ((type = 1) = (flake is not null));
|
||||
alter table JobsetEvals add column flake text;
|
||||
-- The pg_trgm extension has to be created by a superuser. The NixOS
|
||||
-- module creates this extension in the systemd prestart script. We
|
||||
-- then ensure the extension has been created before creating the
|
||||
-- index. If it is not possible to create the extension, a warning
|
||||
-- message is emitted to inform the user the index creation is skipped
|
||||
-- (slower complex queries on builds.drvpath).
|
||||
do $$
|
||||
begin
|
||||
create extension if not exists pg_trgm;
|
||||
-- Provide an index used by LIKE operator on builds.drvpath (search query)
|
||||
create index IndexTrgmBuildsOnDrvpath on builds using gin (drvpath gin_trgm_ops);
|
||||
exception when others then
|
||||
raise warning 'Can not create extension pg_trgm: %', SQLERRM;
|
||||
raise warning 'HINT: Temporary provide superuser role to your Hydra Postgresql user and run the script src/sql/upgrade-57.sql';
|
||||
raise warning 'The pg_trgm index on builds.drvpath has been skipped (slower complex queries on builds.drvpath)';
|
||||
end$$;
|
||||
|
|
7
src/sql/upgrade-58.sql
Normal file
7
src/sql/upgrade-58.sql
Normal file
|
@ -0,0 +1,7 @@
|
|||
alter table Jobsets alter column nixExprInput drop not null;
|
||||
alter table Jobsets alter column nixExprPath drop not null;
|
||||
alter table Jobsets add column type integer default 0;
|
||||
alter table Jobsets add column flake text;
|
||||
alter table Jobsets add check ((type = 0) = (nixExprInput is not null and nixExprPath is not null));
|
||||
alter table Jobsets add check ((type = 1) = (flake is not null));
|
||||
alter table JobsetEvals add column flake text;
|
|
@ -12,6 +12,7 @@ TESTS_ENVIRONMENT = \
|
|||
NIX_STORE_DIR="$(abs_builddir)/nix/store" \
|
||||
NIX_LOG_DIR="$(abs_builddir)/nix/var/log/nix" \
|
||||
NIX_BUILD_HOOK= \
|
||||
PGHOST=/tmp \
|
||||
PERL5LIB="$(srcdir):$(abs_top_srcdir)/src/lib:$$PERL5LIB" \
|
||||
PATH=$(abs_top_srcdir)/src/hydra-evaluator:$(abs_top_srcdir)/src/script:$(abs_top_srcdir)/src/hydra-eval-jobs:$(abs_top_srcdir)/src/hydra-queue-runner:$$PATH \
|
||||
perl -w
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
use strict;
|
||||
system("initdb -D postgres") == 0 or die;
|
||||
system("pg_ctl -D postgres -o \"-F -p 6433 -h ''\" -w start") == 0 or die;
|
||||
system("pg_ctl -D postgres -o \"-F -p 6433 -h '' -k /tmp \" -w start") == 0 or die;
|
||||
system("createdb -p 6433 hydra-test-suite") == 0 or die;
|
||||
system("hydra-init") == 0 or die;
|
||||
|
|
56
tests/setup-notifications-jobset.pl
Normal file
56
tests/setup-notifications-jobset.pl
Normal file
|
@ -0,0 +1,56 @@
|
|||
use LWP::UserAgent;
|
||||
use JSON;
|
||||
|
||||
my $ua = LWP::UserAgent->new;
|
||||
$ua->cookie_jar({});
|
||||
|
||||
sub request_json {
|
||||
my ($opts) = @_;
|
||||
my $req = HTTP::Request->new;
|
||||
$req->method($opts->{method} or "GET");
|
||||
$req->uri("http://localhost:3000$opts->{uri}");
|
||||
$req->header(Accept => "application/json");
|
||||
$req->header(Referer => "http://localhost:3000/") if $opts->{method} eq "POST";
|
||||
$req->content(encode_json($opts->{data})) if defined $opts->{data};
|
||||
my $res = $ua->request($req);
|
||||
print $res->as_string();
|
||||
return $res;
|
||||
}
|
||||
|
||||
my $result = request_json({
|
||||
uri => "/login",
|
||||
method => "POST",
|
||||
data => {
|
||||
username => "root",
|
||||
password => "foobar"
|
||||
}
|
||||
});
|
||||
|
||||
$result = request_json({
|
||||
uri => '/project/sample',
|
||||
method => 'PUT',
|
||||
data => {
|
||||
displayname => "Sample",
|
||||
enabled => "1",
|
||||
visible => "1",
|
||||
}
|
||||
});
|
||||
|
||||
$result = request_json({
|
||||
uri => '/jobset/sample/default',
|
||||
method => 'PUT',
|
||||
data => {
|
||||
nixexprpath => "default.nix",
|
||||
nixexprinput => "my-src",
|
||||
inputs => {
|
||||
"my-src" => {
|
||||
type => "path",
|
||||
value => "/run/jobset"
|
||||
}
|
||||
},
|
||||
enabled => "1",
|
||||
visible => "1",
|
||||
checkinterval => "5",
|
||||
keepnr => 1
|
||||
}
|
||||
});
|
Loading…
Reference in a new issue