Get rid of dependency to SQLite

SQLite isn't properly supported by Hydra for a few years now[1], but
Hydra still depends on it. Apart from a slightly bigger closure this can
cause confusion by users since Hydra picks up SQLite rather than
PostgreSQL by default if HYDRA_DBI isn't configured properly[2]

[1] 78974abb69
[2] https://logs.nix.samueldr.com/nixos-dev/2020-04-10#3297342;
This commit is contained in:
Maximilian Bosch 2020-04-10 18:13:18 +02:00
parent a42cf35a1c
commit efcbc08686
No known key found for this signature in database
GPG key ID: 091DBF4D1FC46B8E
8 changed files with 16 additions and 64 deletions

1
.gitignore vendored
View file

@ -32,3 +32,4 @@ Makefile.in
hydra-config.h hydra-config.h
hydra-config.h.in hydra-config.h.in
result result
tests/jobs/config.nix

View file

@ -8,22 +8,23 @@
* Setting the maximum number of concurrent builds per system type: * Setting the maximum number of concurrent builds per system type:
$ sqlite3 hydra.sqlite "insert into SystemTypes(system, maxConcurrent) values('i686-linux', 3);" $ psql -d hydra <<< "insert into SystemTypes(system, maxConcurrent) values('i686-linux', 3);"
* Creating a user: * Creating a user:
$ sqlite3 hydra.sqlite "insert into Users(userName, emailAddress, password) values('root', 'e.dolstra@tudelft.nl', '$(echo -n foobar | sha1sum | cut -c1-40)');" $ hydra-create-user root --email-address 'e.dolstra@tudelft.nl' \
--password-hash "$(echo -n foobar | sha1sum | cut -c1-40)"
(Replace "foobar" with the desired password.) (Replace "foobar" with the desired password.)
To make the user an admin: To make the user an admin:
$ sqlite3 hydra.sqlite "insert into UserRoles(userName, role) values('root', 'admin');" $ hydra-create-user root --role admin
To enable a non-admin user to create projects: To enable a non-admin user to create projects:
$ sqlite3 hydra.sqlite "insert into UserRoles(userName, role) values('alice', 'create-projects');" $ hydra-create-user root --role create-projects
* Creating a release set: * Creating a release set:
insert into ReleaseSets(project, name) values('patchelf', 'unstable'); insert into ReleaseSets(project, name) values('patchelf', 'unstable');

View file

@ -43,16 +43,11 @@ Identifier: patchelf
The identifier should be a unique name (it is the primary The identifier should be a unique name (it is the primary
database key for the project table in the database). If you try database key for the project table in the database). If you try
to create a project with an already existing identifier you'd to create a project with an already existing identifier you'd
get an error message such as: get an error message from the database.
<screen> So try to create the project after entering just the general
I'm very sorry, but an error occurred: information to figure out if you have chosen a unique name.
DBIx::Class::ResultSet::create(): DBI Exception: DBD::SQLite::st execute failed: column name is not unique(19) at dbdimp.c line 402 Job sets can be added once the project has been created.
</screen>
So try to create the project after entering just the general
information to figure out if you have chosen a unique name.
Job sets can be added once the project has been created.
<screen> <screen>
Display name: Patchelf Display name: Patchelf

View file

@ -62,7 +62,6 @@
CatalystXRoleApplicator CatalystXRoleApplicator
CryptRandPasswd CryptRandPasswd
DBDPg DBDPg
DBDSQLite
DataDump DataDump
DateTime DateTime
DigestSHA1 DigestSHA1
@ -103,7 +102,7 @@
src = self; src = self;
buildInputs = buildInputs =
[ makeWrapper autoconf automake libtool unzip nukeReferences pkgconfig sqlite libpqxx [ makeWrapper autoconf automake libtool unzip nukeReferences pkgconfig libpqxx
gitAndTools.topGit mercurial darcs subversion bazaar openssl bzip2 libxslt gitAndTools.topGit mercurial darcs subversion bazaar openssl bzip2 libxslt
perlDeps perl final.nix perlDeps perl final.nix
postgresql95 # for running the tests postgresql95 # for running the tests
@ -114,7 +113,7 @@
]; ];
hydraPath = lib.makeBinPath ( hydraPath = lib.makeBinPath (
[ sqlite subversion openssh final.nix coreutils findutils pixz [ subversion openssh final.nix coreutils findutils pixz
gzip bzip2 lzma gnutar unzip git gitAndTools.topGit mercurial darcs gnused bazaar gzip bzip2 lzma gnutar unzip git gitAndTools.topGit mercurial darcs gnused bazaar
] ++ lib.optionals stdenv.isLinux [ rpm dpkg cdrkit ] ); ] ++ lib.optionals stdenv.isLinux [ rpm dpkg cdrkit ] );

View file

@ -10,11 +10,7 @@ sub getHydraPath {
} }
sub getHydraDBPath { sub getHydraDBPath {
my $db = $ENV{"HYDRA_DBI"}; return $ENV{"HYDRA_DBI"} || "dbi:Pg:dbname=hydra;";
return $db if defined $db;
my $path = getHydraPath . '/hydra.sqlite';
#warn "The Hydra database ($path) does not exist!\n" unless -f $path;
return "dbi:SQLite:$path";
} }
__PACKAGE__->config( __PACKAGE__->config(

View file

@ -25,9 +25,8 @@ my @tables = $dbh->tables;
if (! grep { /SchemaVersion/i } @tables) { if (! grep { /SchemaVersion/i } @tables) {
print STDERR "initialising the Hydra database schema...\n"; print STDERR "initialising the Hydra database schema...\n";
my $schema = read_file( my $schema = read_file(
$dbh->{Driver}->{Name} eq 'SQLite' ? "$home/sql/hydra-sqlite.sql" :
$dbh->{Driver}->{Name} eq 'Pg' ? "$home/sql/hydra-postgresql.sql" : $dbh->{Driver}->{Name} eq 'Pg' ? "$home/sql/hydra-postgresql.sql" :
die "unsupported database type\n"); die "unsupported database type $dbh->{Driver}->{Name}\n");
my @statements = $sql_splitter->split($schema); my @statements = $sql_splitter->split($schema);
eval { eval {
$dbh->begin_work; $dbh->begin_work;

View file

@ -79,14 +79,8 @@ create table Jobsets (
primary key (project, name), primary key (project, name),
foreign key (project) references Projects(name) on delete cascade on update cascade, foreign key (project) references Projects(name) on delete cascade on update cascade,
constraint Jobsets_id_unique UNIQUE(id) constraint Jobsets_id_unique UNIQUE(id)
#ifdef SQLITE
,
foreign key (project, name, nixExprInput) references JobsetInputs(project, jobset, name)
#endif
); );
#ifdef POSTGRESQL
create function notifyJobsetSharesChanged() returns trigger as 'begin notify jobset_shares_changed; return null; end;' language plpgsql; create function notifyJobsetSharesChanged() returns trigger as 'begin notify jobset_shares_changed; return null; end;' language plpgsql;
create trigger JobsetSharesChanged after update on Jobsets for each row create trigger JobsetSharesChanged after update on Jobsets for each row
when (old.schedulingShares != new.schedulingShares) execute procedure notifyJobsetSharesChanged(); when (old.schedulingShares != new.schedulingShares) execute procedure notifyJobsetSharesChanged();
@ -104,9 +98,6 @@ create trigger JobsetSchedulingChanged after update on Jobsets for each row
or (old.enabled != new.enabled)) or (old.enabled != new.enabled))
execute procedure notifyJobsetSchedulingChanged(); execute procedure notifyJobsetSchedulingChanged();
#endif
create table JobsetRenames ( create table JobsetRenames (
project text not null, project text not null,
from_ text not null, from_ text not null,
@ -157,11 +148,7 @@ create table Jobs (
create table Builds ( create table Builds (
#ifdef POSTGRESQL
id serial primary key not null, id serial primary key not null,
#else
id integer primary key autoincrement not null,
#endif
finished integer not null, -- 0 = scheduled, 1 = finished finished integer not null, -- 0 = scheduled, 1 = finished
@ -244,8 +231,6 @@ create table Builds (
); );
#ifdef POSTGRESQL
create function notifyBuildsDeleted() returns trigger as 'begin notify builds_deleted; return null; end;' language plpgsql; create function notifyBuildsDeleted() returns trigger as 'begin notify builds_deleted; return null; end;' language plpgsql;
create trigger BuildsDeleted after delete on Builds execute procedure notifyBuildsDeleted(); create trigger BuildsDeleted after delete on Builds execute procedure notifyBuildsDeleted();
@ -261,8 +246,6 @@ create function notifyBuildBumped() returns trigger as 'begin notify builds_bump
create trigger BuildBumped after update on Builds for each row create trigger BuildBumped after update on Builds for each row
when (old.globalPriority != new.globalPriority) execute procedure notifyBuildBumped(); when (old.globalPriority != new.globalPriority) execute procedure notifyBuildBumped();
#endif
create table BuildOutputs ( create table BuildOutputs (
build integer not null, build integer not null,
@ -332,11 +315,7 @@ create table BuildStepOutputs (
-- Inputs of builds. -- Inputs of builds.
create table BuildInputs ( create table BuildInputs (
#ifdef POSTGRESQL
id serial primary key not null, id serial primary key not null,
#else
id integer primary key autoincrement not null,
#endif
-- Which build this input belongs to. -- Which build this input belongs to.
build integer, build integer,
@ -502,11 +481,7 @@ create table ReleaseMembers (
create table JobsetEvals ( create table JobsetEvals (
#ifdef POSTGRESQL
id serial primary key not null, id serial primary key not null,
#else
id integer primary key autoincrement not null,
#endif
project text not null, project text not null,
jobset text not null, jobset text not null,
@ -577,11 +552,7 @@ create table UriRevMapper (
create table NewsItems ( create table NewsItems (
#ifdef POSTGRESQL
id serial primary key not null, id serial primary key not null,
#else
id integer primary key autoincrement not null,
#endif
contents text not null, contents text not null,
createTime integer not null, createTime integer not null,
author text not null, author text not null,
@ -614,7 +585,6 @@ create table FailedPaths (
path text primary key not null path text primary key not null
); );
#ifdef POSTGRESQL
-- Needed because Postgres doesn't have "ignore duplicate" or upsert -- Needed because Postgres doesn't have "ignore duplicate" or upsert
-- yet. -- yet.
@ -622,7 +592,6 @@ create rule IdempotentInsert as on insert to FailedPaths
where exists (select 1 from FailedPaths where path = new.path) where exists (select 1 from FailedPaths where path = new.path)
do instead nothing; do instead nothing;
#endif
create table SystemStatus ( create table SystemStatus (
@ -639,7 +608,6 @@ create table NrBuilds (
insert into NrBuilds(what, count) values('finished', 0); insert into NrBuilds(what, count) values('finished', 0);
#ifdef POSTGRESQL
create function modifyNrBuildsFinished() returns trigger as $$ create function modifyNrBuildsFinished() returns trigger as $$
begin begin
@ -658,8 +626,6 @@ create trigger NrBuildsFinished after insert or update or delete on Builds
for each row for each row
execute procedure modifyNrBuildsFinished(); execute procedure modifyNrBuildsFinished();
#endif
-- Some indices. -- Some indices.
@ -704,7 +670,6 @@ create index IndexJobsetEvalsOnJobsetId on JobsetEvals(project, jobset, id desc)
create index IndexBuildsOnNotificationPendingSince on Builds(notificationPendingSince) where notificationPendingSince is not null; create index IndexBuildsOnNotificationPendingSince on Builds(notificationPendingSince) where notificationPendingSince is not null;
#ifdef POSTGRESQL
-- The pg_trgm extension has to be created by a superuser. The NixOS -- The pg_trgm extension has to be created by a superuser. The NixOS
-- module creates this extension in the systemd prestart script. We -- module creates this extension in the systemd prestart script. We
-- then ensure the extension has been created before creating the -- then ensure the extension has been created before creating the
@ -721,4 +686,3 @@ exception when others then
raise warning 'HINT: Temporary provide superuser role to your Hydra Postgresql user and run the script src/sql/upgrade-57.sql'; raise warning 'HINT: Temporary provide superuser role to your Hydra Postgresql user and run the script src/sql/upgrade-57.sql';
raise warning 'The pg_trgm index on builds.drvpath has been skipped (slower complex queries on builds.drvpath)'; raise warning 'The pg_trgm index on builds.drvpath has been skipped (slower complex queries on builds.drvpath)';
end$$; end$$;
#endif

View file

@ -31,9 +31,6 @@ TESTS = \
check_SCRIPTS = repos check_SCRIPTS = repos
db.sqlite: $(top_srcdir)/src/sql/hydra-sqlite.sql
$(TESTS_ENVIRONMENT) $(top_srcdir)/src/script/hydra-init
repos: dirs repos: dirs
dirs: dirs: