Merge pull request #737 from Ma27/rm-sqlite
Get rid of dependency to SQLite
This commit is contained in:
commit
87837f1d82
1
.gitignore
vendored
1
.gitignore
vendored
|
@ -32,3 +32,4 @@ Makefile.in
|
||||||
hydra-config.h
|
hydra-config.h
|
||||||
hydra-config.h.in
|
hydra-config.h.in
|
||||||
result
|
result
|
||||||
|
tests/jobs/config.nix
|
||||||
|
|
|
@ -8,22 +8,23 @@
|
||||||
|
|
||||||
* Setting the maximum number of concurrent builds per system type:
|
* Setting the maximum number of concurrent builds per system type:
|
||||||
|
|
||||||
$ sqlite3 hydra.sqlite "insert into SystemTypes(system, maxConcurrent) values('i686-linux', 3);"
|
$ psql -d hydra <<< "insert into SystemTypes(system, maxConcurrent) values('i686-linux', 3);"
|
||||||
|
|
||||||
* Creating a user:
|
* Creating a user:
|
||||||
|
|
||||||
$ sqlite3 hydra.sqlite "insert into Users(userName, emailAddress, password) values('root', 'e.dolstra@tudelft.nl', '$(echo -n foobar | sha1sum | cut -c1-40)');"
|
$ hydra-create-user root --email-address 'e.dolstra@tudelft.nl' \
|
||||||
|
--password-hash "$(echo -n foobar | sha1sum | cut -c1-40)"
|
||||||
|
|
||||||
(Replace "foobar" with the desired password.)
|
(Replace "foobar" with the desired password.)
|
||||||
|
|
||||||
To make the user an admin:
|
To make the user an admin:
|
||||||
|
|
||||||
$ sqlite3 hydra.sqlite "insert into UserRoles(userName, role) values('root', 'admin');"
|
$ hydra-create-user root --role admin
|
||||||
|
|
||||||
To enable a non-admin user to create projects:
|
To enable a non-admin user to create projects:
|
||||||
|
|
||||||
$ sqlite3 hydra.sqlite "insert into UserRoles(userName, role) values('alice', 'create-projects');"
|
$ hydra-create-user root --role create-projects
|
||||||
|
|
||||||
* Creating a release set:
|
* Creating a release set:
|
||||||
|
|
||||||
insert into ReleaseSets(project, name) values('patchelf', 'unstable');
|
insert into ReleaseSets(project, name) values('patchelf', 'unstable');
|
||||||
|
|
|
@ -43,16 +43,11 @@ Identifier: patchelf
|
||||||
The identifier should be a unique name (it is the primary
|
The identifier should be a unique name (it is the primary
|
||||||
database key for the project table in the database). If you try
|
database key for the project table in the database). If you try
|
||||||
to create a project with an already existing identifier you'd
|
to create a project with an already existing identifier you'd
|
||||||
get an error message such as:
|
get an error message from the database.
|
||||||
|
|
||||||
<screen>
|
So try to create the project after entering just the general
|
||||||
I'm very sorry, but an error occurred:
|
information to figure out if you have chosen a unique name.
|
||||||
DBIx::Class::ResultSet::create(): DBI Exception: DBD::SQLite::st execute failed: column name is not unique(19) at dbdimp.c line 402
|
Job sets can be added once the project has been created.
|
||||||
</screen>
|
|
||||||
|
|
||||||
So try to create the project after entering just the general
|
|
||||||
information to figure out if you have chosen a unique name.
|
|
||||||
Job sets can be added once the project has been created.
|
|
||||||
|
|
||||||
<screen>
|
<screen>
|
||||||
Display name: Patchelf
|
Display name: Patchelf
|
||||||
|
|
|
@ -103,7 +103,7 @@
|
||||||
src = self;
|
src = self;
|
||||||
|
|
||||||
buildInputs =
|
buildInputs =
|
||||||
[ makeWrapper autoconf automake libtool unzip nukeReferences pkgconfig sqlite libpqxx
|
[ makeWrapper autoconf automake libtool unzip nukeReferences pkgconfig libpqxx
|
||||||
gitAndTools.topGit mercurial darcs subversion bazaar openssl bzip2 libxslt
|
gitAndTools.topGit mercurial darcs subversion bazaar openssl bzip2 libxslt
|
||||||
perlDeps perl final.nix
|
perlDeps perl final.nix
|
||||||
postgresql95 # for running the tests
|
postgresql95 # for running the tests
|
||||||
|
@ -114,7 +114,7 @@
|
||||||
];
|
];
|
||||||
|
|
||||||
hydraPath = lib.makeBinPath (
|
hydraPath = lib.makeBinPath (
|
||||||
[ sqlite subversion openssh final.nix coreutils findutils pixz
|
[ subversion openssh final.nix coreutils findutils pixz
|
||||||
gzip bzip2 lzma gnutar unzip git gitAndTools.topGit mercurial darcs gnused bazaar
|
gzip bzip2 lzma gnutar unzip git gitAndTools.topGit mercurial darcs gnused bazaar
|
||||||
] ++ lib.optionals stdenv.isLinux [ rpm dpkg cdrkit ] );
|
] ++ lib.optionals stdenv.isLinux [ rpm dpkg cdrkit ] );
|
||||||
|
|
||||||
|
|
|
@ -214,7 +214,7 @@ sub scmdiff : Path('/api/scmdiff') Args(0) {
|
||||||
sub triggerJobset {
|
sub triggerJobset {
|
||||||
my ($self, $c, $jobset, $force) = @_;
|
my ($self, $c, $jobset, $force) = @_;
|
||||||
print STDERR "triggering jobset ", $jobset->get_column('project') . ":" . $jobset->name, "\n";
|
print STDERR "triggering jobset ", $jobset->get_column('project') . ":" . $jobset->name, "\n";
|
||||||
txn_do($c->model('DB')->schema, sub {
|
$c->model('DB')->schema->txn_do(sub {
|
||||||
$jobset->update({ triggertime => time });
|
$jobset->update({ triggertime => time });
|
||||||
$jobset->update({ forceeval => 1 }) if $force;
|
$jobset->update({ forceeval => 1 }) if $force;
|
||||||
});
|
});
|
||||||
|
|
|
@ -90,7 +90,7 @@ sub news_submit : Chained('admin') PathPart('news/submit') Args(0) {
|
||||||
sub news_delete : Chained('admin') PathPart('news/delete') Args(1) {
|
sub news_delete : Chained('admin') PathPart('news/delete') Args(1) {
|
||||||
my ($self, $c, $id) = @_;
|
my ($self, $c, $id) = @_;
|
||||||
|
|
||||||
txn_do($c->model('DB')->schema, sub {
|
$c->model('DB')->schema->txn_do(sub {
|
||||||
my $newsItem = $c->model('DB::NewsItems')->find($id)
|
my $newsItem = $c->model('DB::NewsItems')->find($id)
|
||||||
or notFound($c, "Newsitem with id $id doesn't exist.");
|
or notFound($c, "Newsitem with id $id doesn't exist.");
|
||||||
$newsItem->delete;
|
$newsItem->delete;
|
||||||
|
|
|
@ -526,7 +526,7 @@ sub keep : Chained('buildChain') PathPart Args(1) {
|
||||||
registerRoot $_->path foreach $build->buildoutputs;
|
registerRoot $_->path foreach $build->buildoutputs;
|
||||||
}
|
}
|
||||||
|
|
||||||
txn_do($c->model('DB')->schema, sub {
|
$c->model('DB')->schema->txn_do(sub {
|
||||||
$build->update({keep => $keep});
|
$build->update({keep => $keep});
|
||||||
});
|
});
|
||||||
|
|
||||||
|
|
|
@ -62,7 +62,7 @@ sub jobset_PUT {
|
||||||
}
|
}
|
||||||
|
|
||||||
if (defined $c->stash->{jobset}) {
|
if (defined $c->stash->{jobset}) {
|
||||||
txn_do($c->model('DB')->schema, sub {
|
$c->model('DB')->schema->txn_do(sub {
|
||||||
updateJobset($c, $c->stash->{jobset});
|
updateJobset($c, $c->stash->{jobset});
|
||||||
});
|
});
|
||||||
|
|
||||||
|
@ -74,7 +74,7 @@ sub jobset_PUT {
|
||||||
|
|
||||||
else {
|
else {
|
||||||
my $jobset;
|
my $jobset;
|
||||||
txn_do($c->model('DB')->schema, sub {
|
$c->model('DB')->schema->txn_do(sub {
|
||||||
# Note: $jobsetName is validated in updateProject, which will
|
# Note: $jobsetName is validated in updateProject, which will
|
||||||
# abort the transaction if the name isn't valid.
|
# abort the transaction if the name isn't valid.
|
||||||
$jobset = $c->stash->{project}->jobsets->create(
|
$jobset = $c->stash->{project}->jobsets->create(
|
||||||
|
@ -100,7 +100,7 @@ sub jobset_DELETE {
|
||||||
error($c, "can't modify jobset of declarative project", 403);
|
error($c, "can't modify jobset of declarative project", 403);
|
||||||
}
|
}
|
||||||
|
|
||||||
txn_do($c->model('DB')->schema, sub {
|
$c->model('DB')->schema->txn_do(sub {
|
||||||
$c->stash->{jobset}->jobsetevals->delete;
|
$c->stash->{jobset}->jobsetevals->delete;
|
||||||
$c->stash->{jobset}->builds->delete;
|
$c->stash->{jobset}->builds->delete;
|
||||||
$c->stash->{jobset}->delete;
|
$c->stash->{jobset}->delete;
|
||||||
|
|
|
@ -146,7 +146,7 @@ sub release : Chained('evalChain') PathPart('release') Args(0) {
|
||||||
|
|
||||||
my $release;
|
my $release;
|
||||||
|
|
||||||
txn_do($c->model('DB')->schema, sub {
|
$c->model('DB')->schema->txn_do(sub {
|
||||||
|
|
||||||
$release = $c->stash->{project}->releases->create(
|
$release = $c->stash->{project}->releases->create(
|
||||||
{ name => $releaseName
|
{ name => $releaseName
|
||||||
|
|
|
@ -41,7 +41,7 @@ sub project_PUT {
|
||||||
if (defined $c->stash->{project}) {
|
if (defined $c->stash->{project}) {
|
||||||
requireProjectOwner($c, $c->stash->{project});
|
requireProjectOwner($c, $c->stash->{project});
|
||||||
|
|
||||||
txn_do($c->model('DB')->schema, sub {
|
$c->model('DB')->schema->txn_do(sub {
|
||||||
updateProject($c, $c->stash->{project});
|
updateProject($c, $c->stash->{project});
|
||||||
});
|
});
|
||||||
|
|
||||||
|
@ -55,7 +55,7 @@ sub project_PUT {
|
||||||
requireMayCreateProjects($c);
|
requireMayCreateProjects($c);
|
||||||
|
|
||||||
my $project;
|
my $project;
|
||||||
txn_do($c->model('DB')->schema, sub {
|
$c->model('DB')->schema->txn_do(sub {
|
||||||
# Note: $projectName is validated in updateProject,
|
# Note: $projectName is validated in updateProject,
|
||||||
# which will abort the transaction if the name isn't
|
# which will abort the transaction if the name isn't
|
||||||
# valid. Idem for the owner.
|
# valid. Idem for the owner.
|
||||||
|
@ -77,7 +77,7 @@ sub project_DELETE {
|
||||||
|
|
||||||
requireProjectOwner($c, $c->stash->{project});
|
requireProjectOwner($c, $c->stash->{project});
|
||||||
|
|
||||||
txn_do($c->model('DB')->schema, sub {
|
$c->model('DB')->schema->txn_do(sub {
|
||||||
$c->stash->{project}->jobsetevals->delete;
|
$c->stash->{project}->jobsetevals->delete;
|
||||||
$c->stash->{project}->builds->delete;
|
$c->stash->{project}->builds->delete;
|
||||||
$c->stash->{project}->delete;
|
$c->stash->{project}->delete;
|
||||||
|
@ -198,7 +198,7 @@ sub create_release_submit : Chained('projectChain') PathPart('create-release/sub
|
||||||
my $releaseName = $c->request->params->{name};
|
my $releaseName = $c->request->params->{name};
|
||||||
|
|
||||||
my $release;
|
my $release;
|
||||||
txn_do($c->model('DB')->schema, sub {
|
$c->model('DB')->schema->txn_do(sub {
|
||||||
# Note: $releaseName is validated in updateRelease, which will
|
# Note: $releaseName is validated in updateRelease, which will
|
||||||
# abort the transaction if the name isn't valid.
|
# abort the transaction if the name isn't valid.
|
||||||
$release = $c->stash->{project}->releases->create(
|
$release = $c->stash->{project}->releases->create(
|
||||||
|
|
|
@ -63,13 +63,13 @@ sub submit : Chained('release') PathPart('submit') Args(0) {
|
||||||
requireProjectOwner($c, $c->stash->{project});
|
requireProjectOwner($c, $c->stash->{project});
|
||||||
|
|
||||||
if (($c->request->params->{action} || "") eq "delete") {
|
if (($c->request->params->{action} || "") eq "delete") {
|
||||||
txn_do($c->model('DB')->schema, sub {
|
$c->model('DB')->schema->txn_do(sub {
|
||||||
$c->stash->{release}->delete;
|
$c->stash->{release}->delete;
|
||||||
});
|
});
|
||||||
$c->res->redirect($c->uri_for($c->controller('Project')->action_for('project'),
|
$c->res->redirect($c->uri_for($c->controller('Project')->action_for('project'),
|
||||||
[$c->stash->{project}->name]));
|
[$c->stash->{project}->name]));
|
||||||
} else {
|
} else {
|
||||||
txn_do($c->model('DB')->schema, sub {
|
$c->model('DB')->schema->txn_do(sub {
|
||||||
updateRelease($c, $c->stash->{release});
|
updateRelease($c, $c->stash->{release});
|
||||||
});
|
});
|
||||||
$c->res->redirect($c->uri_for($self->action_for("view"),
|
$c->res->redirect($c->uri_for($self->action_for("view"),
|
||||||
|
|
|
@ -163,7 +163,7 @@ sub register :Local Args(0) {
|
||||||
error($c, "Your user name is already taken.")
|
error($c, "Your user name is already taken.")
|
||||||
if $c->find_user({ username => $userName });
|
if $c->find_user({ username => $userName });
|
||||||
|
|
||||||
txn_do($c->model('DB')->schema, sub {
|
$c->model('DB')->schema->txn_do(sub {
|
||||||
my $user = $c->model('DB::Users')->create(
|
my $user = $c->model('DB::Users')->create(
|
||||||
{ username => $userName
|
{ username => $userName
|
||||||
, password => "!"
|
, password => "!"
|
||||||
|
@ -261,7 +261,7 @@ sub edit_PUT {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
txn_do($c->model('DB')->schema, sub {
|
$c->model('Db')->schema->txn_do(sub {
|
||||||
updatePreferences($c, $user);
|
updatePreferences($c, $user);
|
||||||
});
|
});
|
||||||
|
|
||||||
|
|
|
@ -46,7 +46,7 @@ sub updateDeclarativeJobset {
|
||||||
$update{$key} = $declSpec->{$key};
|
$update{$key} = $declSpec->{$key};
|
||||||
delete $declSpec->{$key};
|
delete $declSpec->{$key};
|
||||||
}
|
}
|
||||||
txn_do($db, sub {
|
$db->txn_do(sub {
|
||||||
my $jobset = $project->jobsets->update_or_create(\%update);
|
my $jobset = $project->jobsets->update_or_create(\%update);
|
||||||
$jobset->jobsetinputs->delete;
|
$jobset->jobsetinputs->delete;
|
||||||
while ((my $name, my $data) = each %{$declSpec->{"inputs"}}) {
|
while ((my $name, my $data) = each %{$declSpec->{"inputs"}}) {
|
||||||
|
@ -79,7 +79,7 @@ sub handleDeclarativeJobsetBuild {
|
||||||
}
|
}
|
||||||
|
|
||||||
my $declSpec = decode_json($declText);
|
my $declSpec = decode_json($declText);
|
||||||
txn_do($db, sub {
|
$db->txn_do(sub {
|
||||||
my @kept = keys %$declSpec;
|
my @kept = keys %$declSpec;
|
||||||
push @kept, ".jobsets";
|
push @kept, ".jobsets";
|
||||||
$project->jobsets->search({ name => { "not in" => \@kept } })->update({ enabled => 0, hidden => 1 });
|
$project->jobsets->search({ name => { "not in" => \@kept } })->update({ enabled => 0, hidden => 1 });
|
||||||
|
|
|
@ -14,7 +14,7 @@ use IPC::Run;
|
||||||
|
|
||||||
our @ISA = qw(Exporter);
|
our @ISA = qw(Exporter);
|
||||||
our @EXPORT = qw(
|
our @EXPORT = qw(
|
||||||
getHydraHome getHydraConfig getBaseUrl txn_do
|
getHydraHome getHydraConfig getBaseUrl
|
||||||
getSCMCacheDir
|
getSCMCacheDir
|
||||||
registerRoot getGCRootsDir gcRootFor
|
registerRoot getGCRootsDir gcRootFor
|
||||||
jobsetOverview jobsetOverview_
|
jobsetOverview jobsetOverview_
|
||||||
|
@ -61,22 +61,6 @@ sub getBaseUrl {
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
# Awful hack to handle timeouts in SQLite: just retry the transaction.
|
|
||||||
# DBD::SQLite *has* a 30 second retry window, but apparently it
|
|
||||||
# doesn't work.
|
|
||||||
sub txn_do {
|
|
||||||
my ($db, $coderef) = @_;
|
|
||||||
my $res;
|
|
||||||
while (1) {
|
|
||||||
eval {
|
|
||||||
$res = $db->txn_do($coderef);
|
|
||||||
};
|
|
||||||
return $res if !$@;
|
|
||||||
die $@ unless $@ =~ "database is locked";
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
sub getSCMCacheDir {
|
sub getSCMCacheDir {
|
||||||
return Hydra::Model::DB::getHydraPath . "/scm" ;
|
return Hydra::Model::DB::getHydraPath . "/scm" ;
|
||||||
}
|
}
|
||||||
|
@ -446,7 +430,7 @@ sub getTotalShares {
|
||||||
|
|
||||||
sub cancelBuilds($$) {
|
sub cancelBuilds($$) {
|
||||||
my ($db, $builds) = @_;
|
my ($db, $builds) = @_;
|
||||||
return txn_do($db, sub {
|
return $db->txn_do(sub {
|
||||||
$builds = $builds->search({ finished => 0 });
|
$builds = $builds->search({ finished => 0 });
|
||||||
my $n = $builds->count;
|
my $n = $builds->count;
|
||||||
my $time = time();
|
my $time = time();
|
||||||
|
@ -473,7 +457,7 @@ sub restartBuilds($$) {
|
||||||
|
|
||||||
my $nrRestarted = 0;
|
my $nrRestarted = 0;
|
||||||
|
|
||||||
txn_do($db, sub {
|
$db->txn_do(sub {
|
||||||
# Reset the stats for the evals to which the builds belongs.
|
# Reset the stats for the evals to which the builds belongs.
|
||||||
# !!! Should do this in a trigger.
|
# !!! Should do this in a trigger.
|
||||||
$db->resultset('JobsetEvals')->search(
|
$db->resultset('JobsetEvals')->search(
|
||||||
|
|
|
@ -10,11 +10,7 @@ sub getHydraPath {
|
||||||
}
|
}
|
||||||
|
|
||||||
sub getHydraDBPath {
|
sub getHydraDBPath {
|
||||||
my $db = $ENV{"HYDRA_DBI"};
|
return $ENV{"HYDRA_DBI"} || "dbi:Pg:dbname=hydra;";
|
||||||
return $db if defined $db;
|
|
||||||
my $path = getHydraPath . '/hydra.sqlite';
|
|
||||||
#warn "The Hydra database ($path) does not exist!\n" unless -f $path;
|
|
||||||
return "dbi:SQLite:$path";
|
|
||||||
}
|
}
|
||||||
|
|
||||||
__PACKAGE__->config(
|
__PACKAGE__->config(
|
||||||
|
|
|
@ -58,7 +58,7 @@ sub fetchInput {
|
||||||
# FIXME: time window between nix-prefetch-bzr and addTempRoot.
|
# FIXME: time window between nix-prefetch-bzr and addTempRoot.
|
||||||
addTempRoot($storePath);
|
addTempRoot($storePath);
|
||||||
|
|
||||||
txn_do($self->{db}, sub {
|
$self->{db}->txn_do(sub {
|
||||||
$self->{db}->resultset('CachedBazaarInputs')->create(
|
$self->{db}->resultset('CachedBazaarInputs')->create(
|
||||||
{ uri => $uri
|
{ uri => $uri
|
||||||
, revision => $revision
|
, revision => $revision
|
||||||
|
|
|
@ -77,7 +77,7 @@ sub fetchInput {
|
||||||
$sha256 = queryPathHash($storePath);
|
$sha256 = queryPathHash($storePath);
|
||||||
$sha256 =~ s/sha256://;
|
$sha256 =~ s/sha256://;
|
||||||
|
|
||||||
txn_do($self->{db}, sub {
|
$self->{db}->txn_do(sub {
|
||||||
$self->{db}->resultset('CachedDarcsInputs')->update_or_create(
|
$self->{db}->resultset('CachedDarcsInputs')->update_or_create(
|
||||||
{ uri => $uri
|
{ uri => $uri
|
||||||
, revision => $revision
|
, revision => $revision
|
||||||
|
|
|
@ -218,7 +218,7 @@ sub fetchInput {
|
||||||
# FIXME: time window between nix-prefetch-git and addTempRoot.
|
# FIXME: time window between nix-prefetch-git and addTempRoot.
|
||||||
addTempRoot($storePath);
|
addTempRoot($storePath);
|
||||||
|
|
||||||
txn_do($self->{db}, sub {
|
$self->{db}->txn_do(sub {
|
||||||
$self->{db}->resultset('CachedGitInputs')->update_or_create(
|
$self->{db}->resultset('CachedGitInputs')->update_or_create(
|
||||||
{ uri => $uri
|
{ uri => $uri
|
||||||
, branch => $branch
|
, branch => $branch
|
||||||
|
|
|
@ -85,7 +85,7 @@ sub fetchInput {
|
||||||
# FIXME: time window between nix-prefetch-hg and addTempRoot.
|
# FIXME: time window between nix-prefetch-hg and addTempRoot.
|
||||||
addTempRoot($storePath);
|
addTempRoot($storePath);
|
||||||
|
|
||||||
txn_do($self->{db}, sub {
|
$self->{db}->txn_do(sub {
|
||||||
$self->{db}->resultset('CachedHgInputs')->update_or_create(
|
$self->{db}->resultset('CachedHgInputs')->update_or_create(
|
||||||
{ uri => $uri
|
{ uri => $uri
|
||||||
, branch => $branch
|
, branch => $branch
|
||||||
|
|
|
@ -54,7 +54,7 @@ sub fetchInput {
|
||||||
# changes, we get a new "revision", but if it doesn't change
|
# changes, we get a new "revision", but if it doesn't change
|
||||||
# (or changes back), we don't get a new "revision".
|
# (or changes back), we don't get a new "revision".
|
||||||
if (!defined $cachedInput) {
|
if (!defined $cachedInput) {
|
||||||
txn_do($self->{db}, sub {
|
$self->{db}->txn_do(sub {
|
||||||
$self->{db}->resultset('CachedPathInputs')->update_or_create(
|
$self->{db}->resultset('CachedPathInputs')->update_or_create(
|
||||||
{ srcpath => $uri
|
{ srcpath => $uri
|
||||||
, timestamp => $timestamp
|
, timestamp => $timestamp
|
||||||
|
@ -65,7 +65,7 @@ sub fetchInput {
|
||||||
});
|
});
|
||||||
} else {
|
} else {
|
||||||
$timestamp = $cachedInput->timestamp;
|
$timestamp = $cachedInput->timestamp;
|
||||||
txn_do($self->{db}, sub {
|
$self->{db}->txn_do(sub {
|
||||||
$cachedInput->update({lastseen => time});
|
$cachedInput->update({lastseen => time});
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
|
@ -71,7 +71,7 @@ sub fetchInput {
|
||||||
|
|
||||||
$sha256 = queryPathHash($storePath); $sha256 =~ s/sha256://;
|
$sha256 = queryPathHash($storePath); $sha256 =~ s/sha256://;
|
||||||
|
|
||||||
txn_do($self->{db}, sub {
|
$self->{db}->txn_do(sub {
|
||||||
$self->{db}->resultset('CachedSubversionInputs')->update_or_create(
|
$self->{db}->resultset('CachedSubversionInputs')->update_or_create(
|
||||||
{ uri => $uri
|
{ uri => $uri
|
||||||
, revision => $revision
|
, revision => $revision
|
||||||
|
|
|
@ -54,7 +54,7 @@ die "$0: type must be `hydra' or `google'\n"
|
||||||
|
|
||||||
my $db = Hydra::Model::DB->new();
|
my $db = Hydra::Model::DB->new();
|
||||||
|
|
||||||
txn_do($db, sub {
|
$db->txn_do(sub {
|
||||||
my $user = $db->resultset('Users')->find({ username => $renameFrom // $userName });
|
my $user = $db->resultset('Users')->find({ username => $renameFrom // $userName });
|
||||||
if ($renameFrom) {
|
if ($renameFrom) {
|
||||||
die "$0: user `$renameFrom' does not exist\n" unless $user;
|
die "$0: user `$renameFrom' does not exist\n" unless $user;
|
||||||
|
|
|
@ -399,7 +399,7 @@ sub checkBuild {
|
||||||
|
|
||||||
my $build;
|
my $build;
|
||||||
|
|
||||||
txn_do($db, sub {
|
$db->txn_do(sub {
|
||||||
my $job = $jobset->jobs->update_or_create({
|
my $job = $jobset->jobs->update_or_create({
|
||||||
name => $jobName,
|
name => $jobName,
|
||||||
jobset_id => $jobset->id,
|
jobset_id => $jobset->id,
|
||||||
|
@ -501,7 +501,7 @@ sub setJobsetError {
|
||||||
my $prevError = $jobset->errormsg;
|
my $prevError = $jobset->errormsg;
|
||||||
|
|
||||||
eval {
|
eval {
|
||||||
txn_do($db, sub {
|
$db->txn_do(sub {
|
||||||
$jobset->update({ errormsg => $errorMsg, errortime => time, fetcherrormsg => undef });
|
$jobset->update({ errormsg => $errorMsg, errortime => time, fetcherrormsg => undef });
|
||||||
});
|
});
|
||||||
};
|
};
|
||||||
|
@ -603,7 +603,7 @@ sub checkJobsetWrapped {
|
||||||
if ($fetchError) {
|
if ($fetchError) {
|
||||||
Net::Statsd::increment("hydra.evaluator.failed_checkouts");
|
Net::Statsd::increment("hydra.evaluator.failed_checkouts");
|
||||||
print STDERR $fetchError;
|
print STDERR $fetchError;
|
||||||
txn_do($db, sub {
|
$db->txn_do(sub {
|
||||||
$jobset->update({ lastcheckedtime => time, fetcherrormsg => $fetchError }) if !$dryRun;
|
$jobset->update({ lastcheckedtime => time, fetcherrormsg => $fetchError }) if !$dryRun;
|
||||||
$db->storage->dbh->do("notify eval_failed, ?", undef, join('\t', $tmpId));
|
$db->storage->dbh->do("notify eval_failed, ?", undef, join('\t', $tmpId));
|
||||||
});
|
});
|
||||||
|
@ -619,7 +619,7 @@ sub checkJobsetWrapped {
|
||||||
if (defined $prevEval && $prevEval->hash eq $argsHash && !$dryRun && !$jobset->forceeval && $prevEval->flake eq $flakeRef) {
|
if (defined $prevEval && $prevEval->hash eq $argsHash && !$dryRun && !$jobset->forceeval && $prevEval->flake eq $flakeRef) {
|
||||||
print STDERR " jobset is unchanged, skipping\n";
|
print STDERR " jobset is unchanged, skipping\n";
|
||||||
Net::Statsd::increment("hydra.evaluator.unchanged_checkouts");
|
Net::Statsd::increment("hydra.evaluator.unchanged_checkouts");
|
||||||
txn_do($db, sub {
|
$db->txn_do(sub {
|
||||||
$jobset->update({ lastcheckedtime => time, fetcherrormsg => undef });
|
$jobset->update({ lastcheckedtime => time, fetcherrormsg => undef });
|
||||||
$db->storage->dbh->do("notify eval_cached, ?", undef, join('\t', $tmpId));
|
$db->storage->dbh->do("notify eval_cached, ?", undef, join('\t', $tmpId));
|
||||||
});
|
});
|
||||||
|
@ -660,7 +660,7 @@ sub checkJobsetWrapped {
|
||||||
my $dbStart = clock_gettime(CLOCK_MONOTONIC);
|
my $dbStart = clock_gettime(CLOCK_MONOTONIC);
|
||||||
|
|
||||||
my %buildMap;
|
my %buildMap;
|
||||||
txn_do($db, sub {
|
$db->txn_do(sub {
|
||||||
|
|
||||||
my $prevEval = getPrevJobsetEval($db, $jobset, 1);
|
my $prevEval = getPrevJobsetEval($db, $jobset, 1);
|
||||||
|
|
||||||
|
@ -806,7 +806,7 @@ sub checkJobset {
|
||||||
my $failed = 0;
|
my $failed = 0;
|
||||||
if ($checkError) {
|
if ($checkError) {
|
||||||
print STDERR $checkError;
|
print STDERR $checkError;
|
||||||
txn_do($db, sub {
|
$db->txn_do(sub {
|
||||||
$jobset->update({lastcheckedtime => time});
|
$jobset->update({lastcheckedtime => time});
|
||||||
setJobsetError($jobset, $checkError);
|
setJobsetError($jobset, $checkError);
|
||||||
$db->storage->dbh->do("notify eval_failed, ?", undef, join('\t', $tmpId));
|
$db->storage->dbh->do("notify eval_failed, ?", undef, join('\t', $tmpId));
|
||||||
|
|
|
@ -25,9 +25,8 @@ my @tables = $dbh->tables;
|
||||||
if (! grep { /SchemaVersion/i } @tables) {
|
if (! grep { /SchemaVersion/i } @tables) {
|
||||||
print STDERR "initialising the Hydra database schema...\n";
|
print STDERR "initialising the Hydra database schema...\n";
|
||||||
my $schema = read_file(
|
my $schema = read_file(
|
||||||
$dbh->{Driver}->{Name} eq 'SQLite' ? "$home/sql/hydra-sqlite.sql" :
|
|
||||||
$dbh->{Driver}->{Name} eq 'Pg' ? "$home/sql/hydra-postgresql.sql" :
|
$dbh->{Driver}->{Name} eq 'Pg' ? "$home/sql/hydra-postgresql.sql" :
|
||||||
die "unsupported database type\n");
|
die "unsupported database type $dbh->{Driver}->{Name}\n");
|
||||||
my @statements = $sql_splitter->split($schema);
|
my @statements = $sql_splitter->split($schema);
|
||||||
eval {
|
eval {
|
||||||
$dbh->begin_work;
|
$dbh->begin_work;
|
||||||
|
|
|
@ -79,14 +79,8 @@ create table Jobsets (
|
||||||
primary key (project, name),
|
primary key (project, name),
|
||||||
foreign key (project) references Projects(name) on delete cascade on update cascade,
|
foreign key (project) references Projects(name) on delete cascade on update cascade,
|
||||||
constraint Jobsets_id_unique UNIQUE(id)
|
constraint Jobsets_id_unique UNIQUE(id)
|
||||||
#ifdef SQLITE
|
|
||||||
,
|
|
||||||
foreign key (project, name, nixExprInput) references JobsetInputs(project, jobset, name)
|
|
||||||
#endif
|
|
||||||
);
|
);
|
||||||
|
|
||||||
#ifdef POSTGRESQL
|
|
||||||
|
|
||||||
create function notifyJobsetSharesChanged() returns trigger as 'begin notify jobset_shares_changed; return null; end;' language plpgsql;
|
create function notifyJobsetSharesChanged() returns trigger as 'begin notify jobset_shares_changed; return null; end;' language plpgsql;
|
||||||
create trigger JobsetSharesChanged after update on Jobsets for each row
|
create trigger JobsetSharesChanged after update on Jobsets for each row
|
||||||
when (old.schedulingShares != new.schedulingShares) execute procedure notifyJobsetSharesChanged();
|
when (old.schedulingShares != new.schedulingShares) execute procedure notifyJobsetSharesChanged();
|
||||||
|
@ -104,9 +98,6 @@ create trigger JobsetSchedulingChanged after update on Jobsets for each row
|
||||||
or (old.enabled != new.enabled))
|
or (old.enabled != new.enabled))
|
||||||
execute procedure notifyJobsetSchedulingChanged();
|
execute procedure notifyJobsetSchedulingChanged();
|
||||||
|
|
||||||
#endif
|
|
||||||
|
|
||||||
|
|
||||||
create table JobsetRenames (
|
create table JobsetRenames (
|
||||||
project text not null,
|
project text not null,
|
||||||
from_ text not null,
|
from_ text not null,
|
||||||
|
@ -157,11 +148,7 @@ create table Jobs (
|
||||||
|
|
||||||
|
|
||||||
create table Builds (
|
create table Builds (
|
||||||
#ifdef POSTGRESQL
|
|
||||||
id serial primary key not null,
|
id serial primary key not null,
|
||||||
#else
|
|
||||||
id integer primary key autoincrement not null,
|
|
||||||
#endif
|
|
||||||
|
|
||||||
finished integer not null, -- 0 = scheduled, 1 = finished
|
finished integer not null, -- 0 = scheduled, 1 = finished
|
||||||
|
|
||||||
|
@ -244,8 +231,6 @@ create table Builds (
|
||||||
);
|
);
|
||||||
|
|
||||||
|
|
||||||
#ifdef POSTGRESQL
|
|
||||||
|
|
||||||
create function notifyBuildsDeleted() returns trigger as 'begin notify builds_deleted; return null; end;' language plpgsql;
|
create function notifyBuildsDeleted() returns trigger as 'begin notify builds_deleted; return null; end;' language plpgsql;
|
||||||
create trigger BuildsDeleted after delete on Builds execute procedure notifyBuildsDeleted();
|
create trigger BuildsDeleted after delete on Builds execute procedure notifyBuildsDeleted();
|
||||||
|
|
||||||
|
@ -261,8 +246,6 @@ create function notifyBuildBumped() returns trigger as 'begin notify builds_bump
|
||||||
create trigger BuildBumped after update on Builds for each row
|
create trigger BuildBumped after update on Builds for each row
|
||||||
when (old.globalPriority != new.globalPriority) execute procedure notifyBuildBumped();
|
when (old.globalPriority != new.globalPriority) execute procedure notifyBuildBumped();
|
||||||
|
|
||||||
#endif
|
|
||||||
|
|
||||||
|
|
||||||
create table BuildOutputs (
|
create table BuildOutputs (
|
||||||
build integer not null,
|
build integer not null,
|
||||||
|
@ -332,11 +315,7 @@ create table BuildStepOutputs (
|
||||||
|
|
||||||
-- Inputs of builds.
|
-- Inputs of builds.
|
||||||
create table BuildInputs (
|
create table BuildInputs (
|
||||||
#ifdef POSTGRESQL
|
|
||||||
id serial primary key not null,
|
id serial primary key not null,
|
||||||
#else
|
|
||||||
id integer primary key autoincrement not null,
|
|
||||||
#endif
|
|
||||||
|
|
||||||
-- Which build this input belongs to.
|
-- Which build this input belongs to.
|
||||||
build integer,
|
build integer,
|
||||||
|
@ -502,11 +481,7 @@ create table ReleaseMembers (
|
||||||
|
|
||||||
|
|
||||||
create table JobsetEvals (
|
create table JobsetEvals (
|
||||||
#ifdef POSTGRESQL
|
|
||||||
id serial primary key not null,
|
id serial primary key not null,
|
||||||
#else
|
|
||||||
id integer primary key autoincrement not null,
|
|
||||||
#endif
|
|
||||||
|
|
||||||
project text not null,
|
project text not null,
|
||||||
jobset text not null,
|
jobset text not null,
|
||||||
|
@ -577,11 +552,7 @@ create table UriRevMapper (
|
||||||
|
|
||||||
|
|
||||||
create table NewsItems (
|
create table NewsItems (
|
||||||
#ifdef POSTGRESQL
|
|
||||||
id serial primary key not null,
|
id serial primary key not null,
|
||||||
#else
|
|
||||||
id integer primary key autoincrement not null,
|
|
||||||
#endif
|
|
||||||
contents text not null,
|
contents text not null,
|
||||||
createTime integer not null,
|
createTime integer not null,
|
||||||
author text not null,
|
author text not null,
|
||||||
|
@ -614,7 +585,6 @@ create table FailedPaths (
|
||||||
path text primary key not null
|
path text primary key not null
|
||||||
);
|
);
|
||||||
|
|
||||||
#ifdef POSTGRESQL
|
|
||||||
|
|
||||||
-- Needed because Postgres doesn't have "ignore duplicate" or upsert
|
-- Needed because Postgres doesn't have "ignore duplicate" or upsert
|
||||||
-- yet.
|
-- yet.
|
||||||
|
@ -622,7 +592,6 @@ create rule IdempotentInsert as on insert to FailedPaths
|
||||||
where exists (select 1 from FailedPaths where path = new.path)
|
where exists (select 1 from FailedPaths where path = new.path)
|
||||||
do instead nothing;
|
do instead nothing;
|
||||||
|
|
||||||
#endif
|
|
||||||
|
|
||||||
|
|
||||||
create table SystemStatus (
|
create table SystemStatus (
|
||||||
|
@ -639,7 +608,6 @@ create table NrBuilds (
|
||||||
|
|
||||||
insert into NrBuilds(what, count) values('finished', 0);
|
insert into NrBuilds(what, count) values('finished', 0);
|
||||||
|
|
||||||
#ifdef POSTGRESQL
|
|
||||||
|
|
||||||
create function modifyNrBuildsFinished() returns trigger as $$
|
create function modifyNrBuildsFinished() returns trigger as $$
|
||||||
begin
|
begin
|
||||||
|
@ -658,8 +626,6 @@ create trigger NrBuildsFinished after insert or update or delete on Builds
|
||||||
for each row
|
for each row
|
||||||
execute procedure modifyNrBuildsFinished();
|
execute procedure modifyNrBuildsFinished();
|
||||||
|
|
||||||
#endif
|
|
||||||
|
|
||||||
|
|
||||||
-- Some indices.
|
-- Some indices.
|
||||||
|
|
||||||
|
@ -704,7 +670,6 @@ create index IndexJobsetEvalsOnJobsetId on JobsetEvals(project, jobset, id desc)
|
||||||
|
|
||||||
create index IndexBuildsOnNotificationPendingSince on Builds(notificationPendingSince) where notificationPendingSince is not null;
|
create index IndexBuildsOnNotificationPendingSince on Builds(notificationPendingSince) where notificationPendingSince is not null;
|
||||||
|
|
||||||
#ifdef POSTGRESQL
|
|
||||||
-- The pg_trgm extension has to be created by a superuser. The NixOS
|
-- The pg_trgm extension has to be created by a superuser. The NixOS
|
||||||
-- module creates this extension in the systemd prestart script. We
|
-- module creates this extension in the systemd prestart script. We
|
||||||
-- then ensure the extension has been created before creating the
|
-- then ensure the extension has been created before creating the
|
||||||
|
@ -721,4 +686,3 @@ exception when others then
|
||||||
raise warning 'HINT: Temporary provide superuser role to your Hydra Postgresql user and run the script src/sql/upgrade-57.sql';
|
raise warning 'HINT: Temporary provide superuser role to your Hydra Postgresql user and run the script src/sql/upgrade-57.sql';
|
||||||
raise warning 'The pg_trgm index on builds.drvpath has been skipped (slower complex queries on builds.drvpath)';
|
raise warning 'The pg_trgm index on builds.drvpath has been skipped (slower complex queries on builds.drvpath)';
|
||||||
end$$;
|
end$$;
|
||||||
#endif
|
|
||||||
|
|
|
@ -31,9 +31,6 @@ TESTS = \
|
||||||
|
|
||||||
check_SCRIPTS = repos
|
check_SCRIPTS = repos
|
||||||
|
|
||||||
db.sqlite: $(top_srcdir)/src/sql/hydra-sqlite.sql
|
|
||||||
$(TESTS_ENVIRONMENT) $(top_srcdir)/src/script/hydra-init
|
|
||||||
|
|
||||||
repos: dirs
|
repos: dirs
|
||||||
|
|
||||||
dirs:
|
dirs:
|
||||||
|
|
Loading…
Reference in a new issue