From de24771a8ea84a7ffe2b47cba8a613d432830604 Mon Sep 17 00:00:00 2001 From: Kevin Quick Date: Sat, 11 Jan 2020 17:01:44 -0800 Subject: [PATCH 01/52] Handle case where jobset has no defined errormsg for api/jobsets --- src/lib/Hydra/Controller/API.pm | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/lib/Hydra/Controller/API.pm b/src/lib/Hydra/Controller/API.pm index 8bb710e0..18f60eb2 100644 --- a/src/lib/Hydra/Controller/API.pm +++ b/src/lib/Hydra/Controller/API.pm @@ -88,7 +88,7 @@ sub jobsetToHash { triggertime => $jobset->triggertime, fetcherrormsg => $jobset->fetcherrormsg, errortime => $jobset->errortime, - haserrormsg => $jobset->errormsg eq "" ? JSON::false : JSON::true + haserrormsg => defined($jobset->errormsg) ? ($jobset->errormsg eq "" ? JSON::false : JSON::true) : JSON::false }; } From 08bfff9d13dff0a7ebe43d9c30c37d797e02eedb Mon Sep 17 00:00:00 2001 From: Bas van Dijk Date: Tue, 14 Jan 2020 13:34:35 +0100 Subject: [PATCH 02/52] hydra-queue-runner: sleep 5s after handling an exception instead of immediately calling `readMachinesFiles` again which could immediately throw another exception again. --- src/hydra-queue-runner/hydra-queue-runner.cc | 1 + 1 file changed, 1 insertion(+) diff --git a/src/hydra-queue-runner/hydra-queue-runner.cc b/src/hydra-queue-runner/hydra-queue-runner.cc index f97a1c95..99a8a811 100644 --- a/src/hydra-queue-runner/hydra-queue-runner.cc +++ b/src/hydra-queue-runner/hydra-queue-runner.cc @@ -218,6 +218,7 @@ void State::monitorMachinesFile() sleep(30); } catch (std::exception & e) { printMsg(lvlError, format("reloading machines file: %1%") % e.what()); + sleep(5); } } } From cdd9d6e071f2499b81f567a6376feba10b95e39c Mon Sep 17 00:00:00 2001 From: Kevin Quick Date: Mon, 20 Jan 2020 10:40:33 -0800 Subject: [PATCH 03/52] Update haserrormsg logic implementation. --- src/lib/Hydra/Controller/API.pm | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/lib/Hydra/Controller/API.pm b/src/lib/Hydra/Controller/API.pm index 18f60eb2..a084a3a8 100644 --- a/src/lib/Hydra/Controller/API.pm +++ b/src/lib/Hydra/Controller/API.pm @@ -88,7 +88,7 @@ sub jobsetToHash { triggertime => $jobset->triggertime, fetcherrormsg => $jobset->fetcherrormsg, errortime => $jobset->errortime, - haserrormsg => defined($jobset->errormsg) ? ($jobset->errormsg eq "" ? JSON::false : JSON::true) : JSON::false + haserrormsg => defined($jobset->errormsg) && $jobset->errormsg ne "" ? JSON::true : JSON::false }; } From bd9b656c54a33b0a4e56db82bc0d34afbb98b644 Mon Sep 17 00:00:00 2001 From: Bas van Dijk Date: Sun, 26 Jan 2020 20:15:18 +0100 Subject: [PATCH 04/52] Fix printing aggregate status `nrMembers` is undefined and it should have clearly be `nrConstituents` which is calculated just before. Fixes #693. --- src/root/build.tt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/root/build.tt b/src/root/build.tt index 750ae474..f13d35d3 100644 --- a/src/root/build.tt +++ b/src/root/build.tt @@ -186,7 +186,7 @@ END; IF b.finished && b.buildstatus != 0; nrFailedConstituents = nrFailedConstituents + 1; END; END; %]; - [%+ IF nrFinished == nrMembers && nrFailedConstituents == 0 %] + [%+ IF nrFinished == nrConstituents && nrFailedConstituents == 0 %] all [% nrConstituents %] constituent builds succeeded [% ELSE %] [% nrFailedConstituents %] out of [% nrConstituents %] constituent builds failed From 5fab5e935bdb3b6032a62577de66cc2fd07e8741 Mon Sep 17 00:00:00 2001 From: Bas van Dijk Date: Sun, 26 Jan 2020 21:45:38 +0100 Subject: [PATCH 05/52] Remove the "log diff" buttons because they're referencing the removed `logdiff` API. This API was removed in 4d1816b1529e4877f9527f039facbe9953c187bd. Fixes #409 --- src/root/build.tt | 2 -- 1 file changed, 2 deletions(-) diff --git a/src/root/build.tt b/src/root/build.tt index 750ae474..8f026576 100644 --- a/src/root/build.tt +++ b/src/root/build.tt @@ -292,11 +292,9 @@ END; Last successful build [% INCLUDE renderDateTime timestamp = prevSuccessfulBuild.timestamp %] [% IF prevSuccessfulBuild && firstBrokenBuild && firstBrokenBuild.id != build.id %] First broken build [% INCLUDE renderDateTime timestamp = firstBrokenBuild.timestamp %] - log diff [% END %] This build [% INCLUDE renderDateTime timestamp = build.timestamp %] - log diff From c2f932a7e3850048498938836797ee14ec916047 Mon Sep 17 00:00:00 2001 From: Graham Christensen Date: Thu, 6 Feb 2020 12:23:47 -0500 Subject: [PATCH 06/52] sql: Generate models from postgresql Lowercasing is due to postgresql not having case-sensitive table names. It always technically workde before, but those table names never existed literally. The switch to generating from postgresql is to handle an upcoming addition of an auto-incrementign ID to the Jobset table. Sqlite doesn't seem to be able to handle the table having an auto incrementing ID field which isn't the primary key, but we can't change the primary key trivially. Since hydra doesn't support sqlite and hasn't for many year anyway, it is easier to just generate from pgsql directly. --- src/lib/Hydra/Schema/AggregateConstituents.pm | 8 ++-- src/lib/Hydra/Schema/BuildInputs.pm | 16 +++++-- src/lib/Hydra/Schema/BuildMetrics.pm | 8 ++-- src/lib/Hydra/Schema/BuildOutputs.pm | 8 ++-- src/lib/Hydra/Schema/BuildProducts.pm | 8 ++-- src/lib/Hydra/Schema/BuildStepOutputs.pm | 8 ++-- src/lib/Hydra/Schema/BuildSteps.pm | 8 ++-- src/lib/Hydra/Schema/Builds.pm | 16 +++++-- src/lib/Hydra/Schema/CachedBazaarInputs.pm | 8 ++-- src/lib/Hydra/Schema/CachedCVSInputs.pm | 8 ++-- src/lib/Hydra/Schema/CachedDarcsInputs.pm | 8 ++-- src/lib/Hydra/Schema/CachedGitInputs.pm | 8 ++-- src/lib/Hydra/Schema/CachedHgInputs.pm | 8 ++-- src/lib/Hydra/Schema/CachedPathInputs.pm | 8 ++-- .../Hydra/Schema/CachedSubversionInputs.pm | 8 ++-- src/lib/Hydra/Schema/FailedPaths.pm | 8 ++-- src/lib/Hydra/Schema/Jobs.pm | 8 ++-- src/lib/Hydra/Schema/JobsetEvalInputs.pm | 8 ++-- src/lib/Hydra/Schema/JobsetEvalMembers.pm | 8 ++-- src/lib/Hydra/Schema/JobsetEvals.pm | 16 +++++-- src/lib/Hydra/Schema/JobsetInputAlts.pm | 8 ++-- src/lib/Hydra/Schema/JobsetInputs.pm | 27 ++--------- src/lib/Hydra/Schema/JobsetRenames.pm | 8 ++-- src/lib/Hydra/Schema/Jobsets.pm | 29 +++--------- src/lib/Hydra/Schema/NewsItems.pm | 16 +++++-- src/lib/Hydra/Schema/NrBuilds.pm | 8 ++-- src/lib/Hydra/Schema/ProjectMembers.pm | 8 ++-- src/lib/Hydra/Schema/Projects.pm | 8 ++-- src/lib/Hydra/Schema/ReleaseMembers.pm | 8 ++-- src/lib/Hydra/Schema/Releases.pm | 8 ++-- src/lib/Hydra/Schema/SchemaVersion.pm | 8 ++-- src/lib/Hydra/Schema/StarredJobs.pm | 8 ++-- src/lib/Hydra/Schema/SystemStatus.pm | 8 ++-- src/lib/Hydra/Schema/SystemTypes.pm | 8 ++-- src/lib/Hydra/Schema/UriRevMapper.pm | 8 ++-- src/lib/Hydra/Schema/UserRoles.pm | 8 ++-- src/lib/Hydra/Schema/Users.pm | 8 ++-- src/sql/Makefile.am | 10 +--- src/sql/update-dbix-harness.sh | 40 ++++++++++++++++ src/sql/update-dbix.pl | 47 +++++++++++++++++-- 40 files changed, 264 insertions(+), 201 deletions(-) create mode 100755 src/sql/update-dbix-harness.sh diff --git a/src/lib/Hydra/Schema/AggregateConstituents.pm b/src/lib/Hydra/Schema/AggregateConstituents.pm index 8112a49c..c4de2170 100644 --- a/src/lib/Hydra/Schema/AggregateConstituents.pm +++ b/src/lib/Hydra/Schema/AggregateConstituents.pm @@ -27,11 +27,11 @@ use base 'DBIx::Class::Core'; __PACKAGE__->load_components("+Hydra::Component::ToJSON"); -=head1 TABLE: C +=head1 TABLE: C =cut -__PACKAGE__->table("AggregateConstituents"); +__PACKAGE__->table("aggregateconstituents"); =head1 ACCESSORS @@ -103,8 +103,8 @@ __PACKAGE__->belongs_to( ); -# Created by DBIx::Class::Schema::Loader v0.07033 @ 2013-08-15 00:20:01 -# DO NOT MODIFY THIS OR ANYTHING ABOVE! md5sum:TLNenyPLIWw2gWsOVhplZw +# Created by DBIx::Class::Schema::Loader v0.07049 @ 2020-02-06 12:22:36 +# DO NOT MODIFY THIS OR ANYTHING ABOVE! md5sum:bQfQoSstlaFy7zw8i1R+ow # You can replace this text with custom code or comments, and it will be preserved on regeneration diff --git a/src/lib/Hydra/Schema/BuildInputs.pm b/src/lib/Hydra/Schema/BuildInputs.pm index ce1b7320..65b936b1 100644 --- a/src/lib/Hydra/Schema/BuildInputs.pm +++ b/src/lib/Hydra/Schema/BuildInputs.pm @@ -27,11 +27,11 @@ use base 'DBIx::Class::Core'; __PACKAGE__->load_components("+Hydra::Component::ToJSON"); -=head1 TABLE: C +=head1 TABLE: C =cut -__PACKAGE__->table("BuildInputs"); +__PACKAGE__->table("buildinputs"); =head1 ACCESSORS @@ -40,6 +40,7 @@ __PACKAGE__->table("BuildInputs"); data_type: 'integer' is_auto_increment: 1 is_nullable: 0 + sequence: 'buildinputs_id_seq' =head2 build @@ -98,7 +99,12 @@ __PACKAGE__->table("BuildInputs"); __PACKAGE__->add_columns( "id", - { data_type => "integer", is_auto_increment => 1, is_nullable => 0 }, + { + data_type => "integer", + is_auto_increment => 1, + is_nullable => 0, + sequence => "buildinputs_id_seq", + }, "build", { data_type => "integer", is_foreign_key => 1, is_nullable => 1 }, "name", @@ -176,8 +182,8 @@ __PACKAGE__->belongs_to( ); -# Created by DBIx::Class::Schema::Loader v0.07033 @ 2013-10-08 13:08:15 -# DO NOT MODIFY THIS OR ANYTHING ABOVE! md5sum:OaJPzRM+8XGsu3eIkqeYEw +# Created by DBIx::Class::Schema::Loader v0.07049 @ 2020-02-06 12:22:36 +# DO NOT MODIFY THIS OR ANYTHING ABOVE! md5sum:/Fwb8emBsvwrZlEab2X+gQ my %hint = ( columns => [ diff --git a/src/lib/Hydra/Schema/BuildMetrics.pm b/src/lib/Hydra/Schema/BuildMetrics.pm index 58bbed94..324d69e8 100644 --- a/src/lib/Hydra/Schema/BuildMetrics.pm +++ b/src/lib/Hydra/Schema/BuildMetrics.pm @@ -27,11 +27,11 @@ use base 'DBIx::Class::Core'; __PACKAGE__->load_components("+Hydra::Component::ToJSON"); -=head1 TABLE: C +=head1 TABLE: C =cut -__PACKAGE__->table("BuildMetrics"); +__PACKAGE__->table("buildmetrics"); =head1 ACCESSORS @@ -177,8 +177,8 @@ __PACKAGE__->belongs_to( ); -# Created by DBIx::Class::Schema::Loader v0.07043 @ 2015-07-30 16:52:20 -# DO NOT MODIFY THIS OR ANYTHING ABOVE! md5sum:qoPm5/le+sVHigW4Dmum2Q +# Created by DBIx::Class::Schema::Loader v0.07049 @ 2020-02-06 12:22:36 +# DO NOT MODIFY THIS OR ANYTHING ABOVE! md5sum:Roy7h/K9u7DQOzet4B1sbA sub json_hint { return { columns => ['value', 'unit'] }; diff --git a/src/lib/Hydra/Schema/BuildOutputs.pm b/src/lib/Hydra/Schema/BuildOutputs.pm index 751eac4b..4a2b00f7 100644 --- a/src/lib/Hydra/Schema/BuildOutputs.pm +++ b/src/lib/Hydra/Schema/BuildOutputs.pm @@ -27,11 +27,11 @@ use base 'DBIx::Class::Core'; __PACKAGE__->load_components("+Hydra::Component::ToJSON"); -=head1 TABLE: C +=head1 TABLE: C =cut -__PACKAGE__->table("BuildOutputs"); +__PACKAGE__->table("buildoutputs"); =head1 ACCESSORS @@ -94,8 +94,8 @@ __PACKAGE__->belongs_to( ); -# Created by DBIx::Class::Schema::Loader v0.07033 @ 2013-06-13 01:54:50 -# DO NOT MODIFY THIS OR ANYTHING ABOVE! md5sum:V8MbzKvZNEaeHBJV67+ZMQ +# Created by DBIx::Class::Schema::Loader v0.07049 @ 2020-02-06 12:22:36 +# DO NOT MODIFY THIS OR ANYTHING ABOVE! md5sum:71R9clwAP6vzDh10EukTaw my %hint = ( columns => [ diff --git a/src/lib/Hydra/Schema/BuildProducts.pm b/src/lib/Hydra/Schema/BuildProducts.pm index bba1d601..f52b2937 100644 --- a/src/lib/Hydra/Schema/BuildProducts.pm +++ b/src/lib/Hydra/Schema/BuildProducts.pm @@ -27,11 +27,11 @@ use base 'DBIx::Class::Core'; __PACKAGE__->load_components("+Hydra::Component::ToJSON"); -=head1 TABLE: C +=head1 TABLE: C =cut -__PACKAGE__->table("BuildProducts"); +__PACKAGE__->table("buildproducts"); =head1 ACCESSORS @@ -143,8 +143,8 @@ __PACKAGE__->belongs_to( ); -# Created by DBIx::Class::Schema::Loader v0.07043 @ 2016-04-13 14:49:33 -# DO NOT MODIFY THIS OR ANYTHING ABOVE! md5sum:kONECZn56f7sqfrLviiUOQ +# Created by DBIx::Class::Schema::Loader v0.07049 @ 2020-02-06 12:22:36 +# DO NOT MODIFY THIS OR ANYTHING ABOVE! md5sum:iI0gmKqQxiPBTy5QsM6tpQ my %hint = ( columns => [ diff --git a/src/lib/Hydra/Schema/BuildStepOutputs.pm b/src/lib/Hydra/Schema/BuildStepOutputs.pm index 8eca1be3..19034c9b 100644 --- a/src/lib/Hydra/Schema/BuildStepOutputs.pm +++ b/src/lib/Hydra/Schema/BuildStepOutputs.pm @@ -27,11 +27,11 @@ use base 'DBIx::Class::Core'; __PACKAGE__->load_components("+Hydra::Component::ToJSON"); -=head1 TABLE: C +=head1 TABLE: C =cut -__PACKAGE__->table("BuildStepOutputs"); +__PACKAGE__->table("buildstepoutputs"); =head1 ACCESSORS @@ -119,8 +119,8 @@ __PACKAGE__->belongs_to( ); -# Created by DBIx::Class::Schema::Loader v0.07033 @ 2013-06-13 01:54:50 -# DO NOT MODIFY THIS OR ANYTHING ABOVE! md5sum:A/4v3ugXYbuYoKPlOvC6mg +# Created by DBIx::Class::Schema::Loader v0.07049 @ 2020-02-06 12:22:36 +# DO NOT MODIFY THIS OR ANYTHING ABOVE! md5sum:Y6DpbTM6z4cOGoYIhD3i1A # You can replace this text with custom code or comments, and it will be preserved on regeneration diff --git a/src/lib/Hydra/Schema/BuildSteps.pm b/src/lib/Hydra/Schema/BuildSteps.pm index bf572fcf..dd6a4dbf 100644 --- a/src/lib/Hydra/Schema/BuildSteps.pm +++ b/src/lib/Hydra/Schema/BuildSteps.pm @@ -27,11 +27,11 @@ use base 'DBIx::Class::Core'; __PACKAGE__->load_components("+Hydra::Component::ToJSON"); -=head1 TABLE: C +=head1 TABLE: C =cut -__PACKAGE__->table("BuildSteps"); +__PACKAGE__->table("buildsteps"); =head1 ACCESSORS @@ -215,8 +215,8 @@ __PACKAGE__->belongs_to( ); -# Created by DBIx::Class::Schema::Loader v0.07045 @ 2016-12-07 13:48:19 -# DO NOT MODIFY THIS OR ANYTHING ABOVE! md5sum:3FYkqSUfgWmiqZzmX8J4TA +# Created by DBIx::Class::Schema::Loader v0.07049 @ 2020-02-06 12:22:36 +# DO NOT MODIFY THIS OR ANYTHING ABOVE! md5sum:AMjHq4g/fSUv/lZuZOljYg my %hint = ( columns => [ diff --git a/src/lib/Hydra/Schema/Builds.pm b/src/lib/Hydra/Schema/Builds.pm index 35d9ab36..013fd09d 100644 --- a/src/lib/Hydra/Schema/Builds.pm +++ b/src/lib/Hydra/Schema/Builds.pm @@ -27,11 +27,11 @@ use base 'DBIx::Class::Core'; __PACKAGE__->load_components("+Hydra::Component::ToJSON"); -=head1 TABLE: C +=head1 TABLE: C =cut -__PACKAGE__->table("Builds"); +__PACKAGE__->table("builds"); =head1 ACCESSORS @@ -40,6 +40,7 @@ __PACKAGE__->table("Builds"); data_type: 'integer' is_auto_increment: 1 is_nullable: 0 + sequence: 'builds_id_seq' =head2 finished @@ -200,7 +201,12 @@ __PACKAGE__->table("Builds"); __PACKAGE__->add_columns( "id", - { data_type => "integer", is_auto_increment => 1, is_nullable => 0 }, + { + data_type => "integer", + is_auto_increment => 1, + is_nullable => 0, + sequence => "builds_id_seq", + }, "finished", { data_type => "integer", is_nullable => 0 }, "timestamp", @@ -544,8 +550,8 @@ __PACKAGE__->many_to_many( ); -# Created by DBIx::Class::Schema::Loader v0.07049 @ 2019-08-19 16:12:37 -# DO NOT MODIFY THIS OR ANYTHING ABOVE! md5sum:VjYbAQwv4THW2VfWQ5ajYQ +# Created by DBIx::Class::Schema::Loader v0.07049 @ 2020-02-06 12:22:36 +# DO NOT MODIFY THIS OR ANYTHING ABOVE! md5sum:H3hs+zEywsUmwTWKfSE8wQ __PACKAGE__->has_many( "dependents", diff --git a/src/lib/Hydra/Schema/CachedBazaarInputs.pm b/src/lib/Hydra/Schema/CachedBazaarInputs.pm index f3170a9c..6d02c69c 100644 --- a/src/lib/Hydra/Schema/CachedBazaarInputs.pm +++ b/src/lib/Hydra/Schema/CachedBazaarInputs.pm @@ -27,11 +27,11 @@ use base 'DBIx::Class::Core'; __PACKAGE__->load_components("+Hydra::Component::ToJSON"); -=head1 TABLE: C +=head1 TABLE: C =cut -__PACKAGE__->table("CachedBazaarInputs"); +__PACKAGE__->table("cachedbazaarinputs"); =head1 ACCESSORS @@ -83,8 +83,8 @@ __PACKAGE__->add_columns( __PACKAGE__->set_primary_key("uri", "revision"); -# Created by DBIx::Class::Schema::Loader v0.07033 @ 2013-06-13 01:54:50 -# DO NOT MODIFY THIS OR ANYTHING ABOVE! md5sum:zvun8uhxwrr7B8EsqBoCjA +# Created by DBIx::Class::Schema::Loader v0.07049 @ 2020-02-06 12:22:36 +# DO NOT MODIFY THIS OR ANYTHING ABOVE! md5sum:X8L4C57lMOctdqOKSmfA/g # You can replace this text with custom content, and it will be preserved on regeneration diff --git a/src/lib/Hydra/Schema/CachedCVSInputs.pm b/src/lib/Hydra/Schema/CachedCVSInputs.pm index 646a9f4f..8f19dd20 100644 --- a/src/lib/Hydra/Schema/CachedCVSInputs.pm +++ b/src/lib/Hydra/Schema/CachedCVSInputs.pm @@ -27,11 +27,11 @@ use base 'DBIx::Class::Core'; __PACKAGE__->load_components("+Hydra::Component::ToJSON"); -=head1 TABLE: C +=head1 TABLE: C =cut -__PACKAGE__->table("CachedCVSInputs"); +__PACKAGE__->table("cachedcvsinputs"); =head1 ACCESSORS @@ -99,8 +99,8 @@ __PACKAGE__->add_columns( __PACKAGE__->set_primary_key("uri", "module", "sha256hash"); -# Created by DBIx::Class::Schema::Loader v0.07033 @ 2013-06-13 01:54:50 -# DO NOT MODIFY THIS OR ANYTHING ABOVE! md5sum:Vi1qzjW52Lnsl0JSmGzy0w +# Created by DBIx::Class::Schema::Loader v0.07049 @ 2020-02-06 12:22:36 +# DO NOT MODIFY THIS OR ANYTHING ABOVE! md5sum:6eQ+i/th+oVZNRiDPd2luA # You can replace this text with custom content, and it will be preserved on regeneration 1; diff --git a/src/lib/Hydra/Schema/CachedDarcsInputs.pm b/src/lib/Hydra/Schema/CachedDarcsInputs.pm index 59488060..9d218390 100644 --- a/src/lib/Hydra/Schema/CachedDarcsInputs.pm +++ b/src/lib/Hydra/Schema/CachedDarcsInputs.pm @@ -27,11 +27,11 @@ use base 'DBIx::Class::Core'; __PACKAGE__->load_components("+Hydra::Component::ToJSON"); -=head1 TABLE: C +=head1 TABLE: C =cut -__PACKAGE__->table("CachedDarcsInputs"); +__PACKAGE__->table("cacheddarcsinputs"); =head1 ACCESSORS @@ -90,8 +90,8 @@ __PACKAGE__->add_columns( __PACKAGE__->set_primary_key("uri", "revision"); -# Created by DBIx::Class::Schema::Loader v0.07033 @ 2013-09-20 11:08:50 -# DO NOT MODIFY THIS OR ANYTHING ABOVE! md5sum:Yl1slt3SAizijgu0KUTn0A +# Created by DBIx::Class::Schema::Loader v0.07049 @ 2020-02-06 12:22:36 +# DO NOT MODIFY THIS OR ANYTHING ABOVE! md5sum:Buwq42sBXQVfYUy01WMyYw # You can replace this text with custom code or comments, and it will be preserved on regeneration diff --git a/src/lib/Hydra/Schema/CachedGitInputs.pm b/src/lib/Hydra/Schema/CachedGitInputs.pm index 613ddd88..d85f4970 100644 --- a/src/lib/Hydra/Schema/CachedGitInputs.pm +++ b/src/lib/Hydra/Schema/CachedGitInputs.pm @@ -27,11 +27,11 @@ use base 'DBIx::Class::Core'; __PACKAGE__->load_components("+Hydra::Component::ToJSON"); -=head1 TABLE: C +=head1 TABLE: C =cut -__PACKAGE__->table("CachedGitInputs"); +__PACKAGE__->table("cachedgitinputs"); =head1 ACCESSORS @@ -92,7 +92,7 @@ __PACKAGE__->add_columns( __PACKAGE__->set_primary_key("uri", "branch", "revision"); -# Created by DBIx::Class::Schema::Loader v0.07033 @ 2013-06-13 01:54:50 -# DO NOT MODIFY THIS OR ANYTHING ABOVE! md5sum:I4hI02FKRMkw76WV/KBocA +# Created by DBIx::Class::Schema::Loader v0.07049 @ 2020-02-06 12:22:36 +# DO NOT MODIFY THIS OR ANYTHING ABOVE! md5sum:0sdK9uQZpx869oqS5thRLw 1; diff --git a/src/lib/Hydra/Schema/CachedHgInputs.pm b/src/lib/Hydra/Schema/CachedHgInputs.pm index 3cccd818..c1d37091 100644 --- a/src/lib/Hydra/Schema/CachedHgInputs.pm +++ b/src/lib/Hydra/Schema/CachedHgInputs.pm @@ -27,11 +27,11 @@ use base 'DBIx::Class::Core'; __PACKAGE__->load_components("+Hydra::Component::ToJSON"); -=head1 TABLE: C +=head1 TABLE: C =cut -__PACKAGE__->table("CachedHgInputs"); +__PACKAGE__->table("cachedhginputs"); =head1 ACCESSORS @@ -92,8 +92,8 @@ __PACKAGE__->add_columns( __PACKAGE__->set_primary_key("uri", "branch", "revision"); -# Created by DBIx::Class::Schema::Loader v0.07033 @ 2013-06-13 01:54:50 -# DO NOT MODIFY THIS OR ANYTHING ABOVE! md5sum:qS/eiiZXmpc7KpTHdtaT7g +# Created by DBIx::Class::Schema::Loader v0.07049 @ 2020-02-06 12:22:36 +# DO NOT MODIFY THIS OR ANYTHING ABOVE! md5sum:dYfjQ0SJG/mBrsZemAW3zw # You can replace this text with custom content, and it will be preserved on regeneration diff --git a/src/lib/Hydra/Schema/CachedPathInputs.pm b/src/lib/Hydra/Schema/CachedPathInputs.pm index 53716676..1657c7c1 100644 --- a/src/lib/Hydra/Schema/CachedPathInputs.pm +++ b/src/lib/Hydra/Schema/CachedPathInputs.pm @@ -27,11 +27,11 @@ use base 'DBIx::Class::Core'; __PACKAGE__->load_components("+Hydra::Component::ToJSON"); -=head1 TABLE: C +=head1 TABLE: C =cut -__PACKAGE__->table("CachedPathInputs"); +__PACKAGE__->table("cachedpathinputs"); =head1 ACCESSORS @@ -90,7 +90,7 @@ __PACKAGE__->add_columns( __PACKAGE__->set_primary_key("srcpath", "sha256hash"); -# Created by DBIx::Class::Schema::Loader v0.07033 @ 2013-06-13 01:54:50 -# DO NOT MODIFY THIS OR ANYTHING ABOVE! md5sum:28rja0vR1glJJ15hzVfjsQ +# Created by DBIx::Class::Schema::Loader v0.07049 @ 2020-02-06 12:22:36 +# DO NOT MODIFY THIS OR ANYTHING ABOVE! md5sum:oV7tbWLNEMC8byKf9UnAlw 1; diff --git a/src/lib/Hydra/Schema/CachedSubversionInputs.pm b/src/lib/Hydra/Schema/CachedSubversionInputs.pm index 51dc1fa7..bf31de20 100644 --- a/src/lib/Hydra/Schema/CachedSubversionInputs.pm +++ b/src/lib/Hydra/Schema/CachedSubversionInputs.pm @@ -27,11 +27,11 @@ use base 'DBIx::Class::Core'; __PACKAGE__->load_components("+Hydra::Component::ToJSON"); -=head1 TABLE: C +=head1 TABLE: C =cut -__PACKAGE__->table("CachedSubversionInputs"); +__PACKAGE__->table("cachedsubversioninputs"); =head1 ACCESSORS @@ -83,7 +83,7 @@ __PACKAGE__->add_columns( __PACKAGE__->set_primary_key("uri", "revision"); -# Created by DBIx::Class::Schema::Loader v0.07033 @ 2013-06-13 01:54:50 -# DO NOT MODIFY THIS OR ANYTHING ABOVE! md5sum:3qXfnvkOVj25W94bfhQ65w +# Created by DBIx::Class::Schema::Loader v0.07049 @ 2020-02-06 12:22:36 +# DO NOT MODIFY THIS OR ANYTHING ABOVE! md5sum:VGt/0HG84eNZr9OIA8jzow 1; diff --git a/src/lib/Hydra/Schema/FailedPaths.pm b/src/lib/Hydra/Schema/FailedPaths.pm index 082b989d..a3a49733 100644 --- a/src/lib/Hydra/Schema/FailedPaths.pm +++ b/src/lib/Hydra/Schema/FailedPaths.pm @@ -27,11 +27,11 @@ use base 'DBIx::Class::Core'; __PACKAGE__->load_components("+Hydra::Component::ToJSON"); -=head1 TABLE: C +=head1 TABLE: C =cut -__PACKAGE__->table("FailedPaths"); +__PACKAGE__->table("failedpaths"); =head1 ACCESSORS @@ -57,8 +57,8 @@ __PACKAGE__->add_columns("path", { data_type => "text", is_nullable => 0 }); __PACKAGE__->set_primary_key("path"); -# Created by DBIx::Class::Schema::Loader v0.07033 @ 2015-06-10 14:48:16 -# DO NOT MODIFY THIS OR ANYTHING ABOVE! md5sum:WFgjfjH+szE6Ntcicmaflw +# Created by DBIx::Class::Schema::Loader v0.07049 @ 2020-02-06 12:22:36 +# DO NOT MODIFY THIS OR ANYTHING ABOVE! md5sum:jr3XiGO4lWAzqfATbsMwFw # You can replace this text with custom code or comments, and it will be preserved on regeneration diff --git a/src/lib/Hydra/Schema/Jobs.pm b/src/lib/Hydra/Schema/Jobs.pm index cd89ed3d..bdecef3a 100644 --- a/src/lib/Hydra/Schema/Jobs.pm +++ b/src/lib/Hydra/Schema/Jobs.pm @@ -27,11 +27,11 @@ use base 'DBIx::Class::Core'; __PACKAGE__->load_components("+Hydra::Component::ToJSON"); -=head1 TABLE: C +=head1 TABLE: C =cut -__PACKAGE__->table("Jobs"); +__PACKAGE__->table("jobs"); =head1 ACCESSORS @@ -169,7 +169,7 @@ __PACKAGE__->has_many( ); -# Created by DBIx::Class::Schema::Loader v0.07043 @ 2015-07-30 16:52:20 -# DO NOT MODIFY THIS OR ANYTHING ABOVE! md5sum:vDAo9bzLca+QWfhOb9OLMg +# Created by DBIx::Class::Schema::Loader v0.07049 @ 2020-02-06 12:22:36 +# DO NOT MODIFY THIS OR ANYTHING ABOVE! md5sum:sYa6dZNK+stMAnTH0Tmn8A 1; diff --git a/src/lib/Hydra/Schema/JobsetEvalInputs.pm b/src/lib/Hydra/Schema/JobsetEvalInputs.pm index e61f0a21..ed39ad45 100644 --- a/src/lib/Hydra/Schema/JobsetEvalInputs.pm +++ b/src/lib/Hydra/Schema/JobsetEvalInputs.pm @@ -27,11 +27,11 @@ use base 'DBIx::Class::Core'; __PACKAGE__->load_components("+Hydra::Component::ToJSON"); -=head1 TABLE: C +=head1 TABLE: C =cut -__PACKAGE__->table("JobsetEvalInputs"); +__PACKAGE__->table("jobsetevalinputs"); =head1 ACCESSORS @@ -166,8 +166,8 @@ __PACKAGE__->belongs_to( ); -# Created by DBIx::Class::Schema::Loader v0.07033 @ 2013-06-13 01:54:50 -# DO NOT MODIFY THIS OR ANYTHING ABOVE! md5sum:1Dp8B58leBLh4GK0GPw2zg +# Created by DBIx::Class::Schema::Loader v0.07049 @ 2020-02-06 12:22:36 +# DO NOT MODIFY THIS OR ANYTHING ABOVE! md5sum:/cFQGBLhvpmBO1UJztgIAg my %hint = ( columns => [ diff --git a/src/lib/Hydra/Schema/JobsetEvalMembers.pm b/src/lib/Hydra/Schema/JobsetEvalMembers.pm index 5f41e432..9448a203 100644 --- a/src/lib/Hydra/Schema/JobsetEvalMembers.pm +++ b/src/lib/Hydra/Schema/JobsetEvalMembers.pm @@ -27,11 +27,11 @@ use base 'DBIx::Class::Core'; __PACKAGE__->load_components("+Hydra::Component::ToJSON"); -=head1 TABLE: C +=head1 TABLE: C =cut -__PACKAGE__->table("JobsetEvalMembers"); +__PACKAGE__->table("jobsetevalmembers"); =head1 ACCESSORS @@ -110,8 +110,8 @@ __PACKAGE__->belongs_to( ); -# Created by DBIx::Class::Schema::Loader v0.07033 @ 2013-06-13 01:54:50 -# DO NOT MODIFY THIS OR ANYTHING ABOVE! md5sum:ccPNQe/QnSjTAC3uGWe8Ng +# Created by DBIx::Class::Schema::Loader v0.07049 @ 2020-02-06 12:22:36 +# DO NOT MODIFY THIS OR ANYTHING ABOVE! md5sum:T+dJFh/sDO8WsasqYVLRSQ # You can replace this text with custom content, and it will be preserved on regeneration diff --git a/src/lib/Hydra/Schema/JobsetEvals.pm b/src/lib/Hydra/Schema/JobsetEvals.pm index 0bd21da2..5558b39a 100644 --- a/src/lib/Hydra/Schema/JobsetEvals.pm +++ b/src/lib/Hydra/Schema/JobsetEvals.pm @@ -27,11 +27,11 @@ use base 'DBIx::Class::Core'; __PACKAGE__->load_components("+Hydra::Component::ToJSON"); -=head1 TABLE: C +=head1 TABLE: C =cut -__PACKAGE__->table("JobsetEvals"); +__PACKAGE__->table("jobsetevals"); =head1 ACCESSORS @@ -40,6 +40,7 @@ __PACKAGE__->table("JobsetEvals"); data_type: 'integer' is_auto_increment: 1 is_nullable: 0 + sequence: 'jobsetevals_id_seq' =head2 project @@ -92,7 +93,12 @@ __PACKAGE__->table("JobsetEvals"); __PACKAGE__->add_columns( "id", - { data_type => "integer", is_auto_increment => 1, is_nullable => 0 }, + { + data_type => "integer", + is_auto_increment => 1, + is_nullable => 0, + sequence => "jobsetevals_id_seq", + }, "project", { data_type => "text", is_foreign_key => 1, is_nullable => 0 }, "jobset", @@ -188,8 +194,8 @@ __PACKAGE__->belongs_to( ); -# Created by DBIx::Class::Schema::Loader v0.07033 @ 2013-06-13 01:54:50 -# DO NOT MODIFY THIS OR ANYTHING ABOVE! md5sum:SlEiF8oN6FBK262uSiMKiw +# Created by DBIx::Class::Schema::Loader v0.07049 @ 2020-02-06 12:22:36 +# DO NOT MODIFY THIS OR ANYTHING ABOVE! md5sum:4ZaT8Z1tmCCt6k4ein0MNg __PACKAGE__->has_many( "buildIds", diff --git a/src/lib/Hydra/Schema/JobsetInputAlts.pm b/src/lib/Hydra/Schema/JobsetInputAlts.pm index 3802d9f1..e8f030b3 100644 --- a/src/lib/Hydra/Schema/JobsetInputAlts.pm +++ b/src/lib/Hydra/Schema/JobsetInputAlts.pm @@ -27,11 +27,11 @@ use base 'DBIx::Class::Core'; __PACKAGE__->load_components("+Hydra::Component::ToJSON"); -=head1 TABLE: C +=head1 TABLE: C =cut -__PACKAGE__->table("JobsetInputAlts"); +__PACKAGE__->table("jobsetinputalts"); =head1 ACCESSORS @@ -121,7 +121,7 @@ __PACKAGE__->belongs_to( ); -# Created by DBIx::Class::Schema::Loader v0.07033 @ 2013-06-13 01:54:50 -# DO NOT MODIFY THIS OR ANYTHING ABOVE! md5sum:UUO37lIuEYm0GiR92m/fyA +# Created by DBIx::Class::Schema::Loader v0.07049 @ 2020-02-06 12:22:36 +# DO NOT MODIFY THIS OR ANYTHING ABOVE! md5sum:nh8dQDL9FtgzXcwjDufDMQ 1; diff --git a/src/lib/Hydra/Schema/JobsetInputs.pm b/src/lib/Hydra/Schema/JobsetInputs.pm index b67a3519..d0964ab4 100644 --- a/src/lib/Hydra/Schema/JobsetInputs.pm +++ b/src/lib/Hydra/Schema/JobsetInputs.pm @@ -27,11 +27,11 @@ use base 'DBIx::Class::Core'; __PACKAGE__->load_components("+Hydra::Component::ToJSON"); -=head1 TABLE: C +=head1 TABLE: C =cut -__PACKAGE__->table("JobsetInputs"); +__PACKAGE__->table("jobsetinputs"); =head1 ACCESSORS @@ -130,28 +130,9 @@ __PACKAGE__->has_many( undef, ); -=head2 jobsets -Type: has_many - -Related object: L - -=cut - -__PACKAGE__->has_many( - "jobsets", - "Hydra::Schema::Jobsets", - { - "foreign.name" => "self.jobset", - "foreign.nixexprinput" => "self.name", - "foreign.project" => "self.project", - }, - undef, -); - - -# Created by DBIx::Class::Schema::Loader v0.07033 @ 2013-10-08 13:06:15 -# DO NOT MODIFY THIS OR ANYTHING ABOVE! md5sum:+mZZqLjQNwblb/EWW1alLQ +# Created by DBIx::Class::Schema::Loader v0.07049 @ 2020-02-06 12:22:36 +# DO NOT MODIFY THIS OR ANYTHING ABOVE! md5sum:5uKwEhDXso4IR1TFmwRxiA my %hint = ( relations => { diff --git a/src/lib/Hydra/Schema/JobsetRenames.pm b/src/lib/Hydra/Schema/JobsetRenames.pm index 874e7a24..794d6e16 100644 --- a/src/lib/Hydra/Schema/JobsetRenames.pm +++ b/src/lib/Hydra/Schema/JobsetRenames.pm @@ -27,11 +27,11 @@ use base 'DBIx::Class::Core'; __PACKAGE__->load_components("+Hydra::Component::ToJSON"); -=head1 TABLE: C +=head1 TABLE: C =cut -__PACKAGE__->table("JobsetRenames"); +__PACKAGE__->table("jobsetrenames"); =head1 ACCESSORS @@ -110,8 +110,8 @@ __PACKAGE__->belongs_to( ); -# Created by DBIx::Class::Schema::Loader v0.07033 @ 2014-04-23 23:13:51 -# DO NOT MODIFY THIS OR ANYTHING ABOVE! md5sum:SBpKWF9swFc9T1Uc0VFlgA +# Created by DBIx::Class::Schema::Loader v0.07049 @ 2020-02-06 12:22:36 +# DO NOT MODIFY THIS OR ANYTHING ABOVE! md5sum:eOQbJ2O/p0G1317m3IC/KA # You can replace this text with custom code or comments, and it will be preserved on regeneration diff --git a/src/lib/Hydra/Schema/Jobsets.pm b/src/lib/Hydra/Schema/Jobsets.pm index 17b4ab93..a69bd599 100644 --- a/src/lib/Hydra/Schema/Jobsets.pm +++ b/src/lib/Hydra/Schema/Jobsets.pm @@ -27,18 +27,17 @@ use base 'DBIx::Class::Core'; __PACKAGE__->load_components("+Hydra::Component::ToJSON"); -=head1 TABLE: C +=head1 TABLE: C =cut -__PACKAGE__->table("Jobsets"); +__PACKAGE__->table("jobsets"); =head1 ACCESSORS =head2 name data_type: 'text' - is_foreign_key: 1 is_nullable: 0 =head2 project @@ -55,7 +54,6 @@ __PACKAGE__->table("Jobsets"); =head2 nixexprinput data_type: 'text' - is_foreign_key: 1 is_nullable: 0 =head2 nixexprpath @@ -143,13 +141,13 @@ __PACKAGE__->table("Jobsets"); __PACKAGE__->add_columns( "name", - { data_type => "text", is_foreign_key => 1, is_nullable => 0 }, + { data_type => "text", is_nullable => 0 }, "project", { data_type => "text", is_foreign_key => 1, is_nullable => 0 }, "description", { data_type => "text", is_nullable => 1 }, "nixexprinput", - { data_type => "text", is_foreign_key => 1, is_nullable => 0 }, + { data_type => "text", is_nullable => 0 }, "nixexprpath", { data_type => "text", is_nullable => 0 }, "errormsg", @@ -270,21 +268,6 @@ __PACKAGE__->has_many( undef, ); -=head2 jobsetinput - -Type: belongs_to - -Related object: L - -=cut - -__PACKAGE__->belongs_to( - "jobsetinput", - "Hydra::Schema::JobsetInputs", - { jobset => "name", name => "nixexprinput", project => "project" }, - { is_deferrable => 0, on_delete => "NO ACTION", on_update => "NO ACTION" }, -); - =head2 jobsetinputs Type: has_many @@ -352,8 +335,8 @@ __PACKAGE__->has_many( ); -# Created by DBIx::Class::Schema::Loader v0.07045 @ 2017-03-09 13:03:05 -# DO NOT MODIFY THIS OR ANYTHING ABOVE! md5sum:ivYvsUyhEeaeI4EmRQ0/QQ +# Created by DBIx::Class::Schema::Loader v0.07049 @ 2020-02-06 12:22:36 +# DO NOT MODIFY THIS OR ANYTHING ABOVE! md5sum:fYKx6VRlNG5XiDZ73Qr6Rw my %hint = ( columns => [ diff --git a/src/lib/Hydra/Schema/NewsItems.pm b/src/lib/Hydra/Schema/NewsItems.pm index 14d27428..0bd3f04b 100644 --- a/src/lib/Hydra/Schema/NewsItems.pm +++ b/src/lib/Hydra/Schema/NewsItems.pm @@ -27,11 +27,11 @@ use base 'DBIx::Class::Core'; __PACKAGE__->load_components("+Hydra::Component::ToJSON"); -=head1 TABLE: C +=head1 TABLE: C =cut -__PACKAGE__->table("NewsItems"); +__PACKAGE__->table("newsitems"); =head1 ACCESSORS @@ -40,6 +40,7 @@ __PACKAGE__->table("NewsItems"); data_type: 'integer' is_auto_increment: 1 is_nullable: 0 + sequence: 'newsitems_id_seq' =head2 contents @@ -61,7 +62,12 @@ __PACKAGE__->table("NewsItems"); __PACKAGE__->add_columns( "id", - { data_type => "integer", is_auto_increment => 1, is_nullable => 0 }, + { + data_type => "integer", + is_auto_increment => 1, + is_nullable => 0, + sequence => "newsitems_id_seq", + }, "contents", { data_type => "text", is_nullable => 0 }, "createtime", @@ -100,7 +106,7 @@ __PACKAGE__->belongs_to( ); -# Created by DBIx::Class::Schema::Loader v0.07033 @ 2013-06-13 01:54:50 -# DO NOT MODIFY THIS OR ANYTHING ABOVE! md5sum:3CRNsvd+YnZp9c80tuZREQ +# Created by DBIx::Class::Schema::Loader v0.07049 @ 2020-02-06 12:22:36 +# DO NOT MODIFY THIS OR ANYTHING ABOVE! md5sum:r6vX8VG/+NQraIVKFgHzxQ 1; diff --git a/src/lib/Hydra/Schema/NrBuilds.pm b/src/lib/Hydra/Schema/NrBuilds.pm index 27ae2e83..7e6601b2 100644 --- a/src/lib/Hydra/Schema/NrBuilds.pm +++ b/src/lib/Hydra/Schema/NrBuilds.pm @@ -27,11 +27,11 @@ use base 'DBIx::Class::Core'; __PACKAGE__->load_components("+Hydra::Component::ToJSON"); -=head1 TABLE: C +=head1 TABLE: C =cut -__PACKAGE__->table("NrBuilds"); +__PACKAGE__->table("nrbuilds"); =head1 ACCESSORS @@ -67,8 +67,8 @@ __PACKAGE__->add_columns( __PACKAGE__->set_primary_key("what"); -# Created by DBIx::Class::Schema::Loader v0.07033 @ 2013-08-12 17:59:18 -# DO NOT MODIFY THIS OR ANYTHING ABOVE! md5sum:CK8eJGC803nGj0wnete9xg +# Created by DBIx::Class::Schema::Loader v0.07049 @ 2020-02-06 12:22:36 +# DO NOT MODIFY THIS OR ANYTHING ABOVE! md5sum:qv1I8Wu7KXHAs+pyBn2ofA # You can replace this text with custom code or comments, and it will be preserved on regeneration diff --git a/src/lib/Hydra/Schema/ProjectMembers.pm b/src/lib/Hydra/Schema/ProjectMembers.pm index 62b1c542..78f02875 100644 --- a/src/lib/Hydra/Schema/ProjectMembers.pm +++ b/src/lib/Hydra/Schema/ProjectMembers.pm @@ -27,11 +27,11 @@ use base 'DBIx::Class::Core'; __PACKAGE__->load_components("+Hydra::Component::ToJSON"); -=head1 TABLE: C +=head1 TABLE: C =cut -__PACKAGE__->table("ProjectMembers"); +__PACKAGE__->table("projectmembers"); =head1 ACCESSORS @@ -103,8 +103,8 @@ __PACKAGE__->belongs_to( ); -# Created by DBIx::Class::Schema::Loader v0.07033 @ 2013-06-13 01:54:50 -# DO NOT MODIFY THIS OR ANYTHING ABOVE! md5sum:imPoiaitrTbX0vVNlF6dPA +# Created by DBIx::Class::Schema::Loader v0.07049 @ 2020-02-06 12:22:36 +# DO NOT MODIFY THIS OR ANYTHING ABOVE! md5sum:e/hYmoNmcEUoGhRqtwdyQw # You can replace this text with custom content, and it will be preserved on regeneration diff --git a/src/lib/Hydra/Schema/Projects.pm b/src/lib/Hydra/Schema/Projects.pm index 11405561..038ccdc7 100644 --- a/src/lib/Hydra/Schema/Projects.pm +++ b/src/lib/Hydra/Schema/Projects.pm @@ -27,11 +27,11 @@ use base 'DBIx::Class::Core'; __PACKAGE__->load_components("+Hydra::Component::ToJSON"); -=head1 TABLE: C +=head1 TABLE: C =cut -__PACKAGE__->table("Projects"); +__PACKAGE__->table("projects"); =head1 ACCESSORS @@ -303,8 +303,8 @@ Composing rels: L -> username __PACKAGE__->many_to_many("usernames", "projectmembers", "username"); -# Created by DBIx::Class::Schema::Loader v0.07043 @ 2016-03-11 10:39:17 -# DO NOT MODIFY THIS OR ANYTHING ABOVE! md5sum:1ats3brIVhRTWLToIYSoaQ +# Created by DBIx::Class::Schema::Loader v0.07049 @ 2020-02-06 12:22:36 +# DO NOT MODIFY THIS OR ANYTHING ABOVE! md5sum:pcF/8351zyo9VL6N5eimdQ my %hint = ( columns => [ diff --git a/src/lib/Hydra/Schema/ReleaseMembers.pm b/src/lib/Hydra/Schema/ReleaseMembers.pm index b3b0975b..6c6e8a0e 100644 --- a/src/lib/Hydra/Schema/ReleaseMembers.pm +++ b/src/lib/Hydra/Schema/ReleaseMembers.pm @@ -27,11 +27,11 @@ use base 'DBIx::Class::Core'; __PACKAGE__->load_components("+Hydra::Component::ToJSON"); -=head1 TABLE: C +=head1 TABLE: C =cut -__PACKAGE__->table("ReleaseMembers"); +__PACKAGE__->table("releasemembers"); =head1 ACCESSORS @@ -135,7 +135,7 @@ __PACKAGE__->belongs_to( ); -# Created by DBIx::Class::Schema::Loader v0.07033 @ 2013-06-13 01:54:50 -# DO NOT MODIFY THIS OR ANYTHING ABOVE! md5sum:7M7WPlGQT6rNHKJ+82/KSA +# Created by DBIx::Class::Schema::Loader v0.07049 @ 2020-02-06 12:22:36 +# DO NOT MODIFY THIS OR ANYTHING ABOVE! md5sum:k4z2YeB4gRAeAP6hmR93sQ 1; diff --git a/src/lib/Hydra/Schema/Releases.pm b/src/lib/Hydra/Schema/Releases.pm index 8ff01bde..8cfb19e1 100644 --- a/src/lib/Hydra/Schema/Releases.pm +++ b/src/lib/Hydra/Schema/Releases.pm @@ -27,11 +27,11 @@ use base 'DBIx::Class::Core'; __PACKAGE__->load_components("+Hydra::Component::ToJSON"); -=head1 TABLE: C +=head1 TABLE: C =cut -__PACKAGE__->table("Releases"); +__PACKAGE__->table("releases"); =head1 ACCESSORS @@ -119,7 +119,7 @@ __PACKAGE__->has_many( ); -# Created by DBIx::Class::Schema::Loader v0.07033 @ 2013-06-13 01:54:50 -# DO NOT MODIFY THIS OR ANYTHING ABOVE! md5sum:qISBiwvboB8dIdinaE45mg +# Created by DBIx::Class::Schema::Loader v0.07049 @ 2020-02-06 12:22:36 +# DO NOT MODIFY THIS OR ANYTHING ABOVE! md5sum:b4M/tHOhsy234tgTf+wqjQ 1; diff --git a/src/lib/Hydra/Schema/SchemaVersion.pm b/src/lib/Hydra/Schema/SchemaVersion.pm index 8a826e38..4165de9d 100644 --- a/src/lib/Hydra/Schema/SchemaVersion.pm +++ b/src/lib/Hydra/Schema/SchemaVersion.pm @@ -27,11 +27,11 @@ use base 'DBIx::Class::Core'; __PACKAGE__->load_components("+Hydra::Component::ToJSON"); -=head1 TABLE: C +=head1 TABLE: C =cut -__PACKAGE__->table("SchemaVersion"); +__PACKAGE__->table("schemaversion"); =head1 ACCESSORS @@ -45,8 +45,8 @@ __PACKAGE__->table("SchemaVersion"); __PACKAGE__->add_columns("version", { data_type => "integer", is_nullable => 0 }); -# Created by DBIx::Class::Schema::Loader v0.07033 @ 2013-06-13 01:54:50 -# DO NOT MODIFY THIS OR ANYTHING ABOVE! md5sum:08/7gbEQp1TqBiWFJXVY0w +# Created by DBIx::Class::Schema::Loader v0.07049 @ 2020-02-06 12:22:36 +# DO NOT MODIFY THIS OR ANYTHING ABOVE! md5sum:2wy4FsRYVVo2RTCWXcmgvg # You can replace this text with custom code or comments, and it will be preserved on regeneration diff --git a/src/lib/Hydra/Schema/StarredJobs.pm b/src/lib/Hydra/Schema/StarredJobs.pm index 51bde91f..4114d65f 100644 --- a/src/lib/Hydra/Schema/StarredJobs.pm +++ b/src/lib/Hydra/Schema/StarredJobs.pm @@ -27,11 +27,11 @@ use base 'DBIx::Class::Core'; __PACKAGE__->load_components("+Hydra::Component::ToJSON"); -=head1 TABLE: C +=head1 TABLE: C =cut -__PACKAGE__->table("StarredJobs"); +__PACKAGE__->table("starredjobs"); =head1 ACCESSORS @@ -153,8 +153,8 @@ __PACKAGE__->belongs_to( ); -# Created by DBIx::Class::Schema::Loader v0.07033 @ 2013-10-14 15:46:29 -# DO NOT MODIFY THIS OR ANYTHING ABOVE! md5sum:naj5aKWuw8hLE6klmvW9Eg +# Created by DBIx::Class::Schema::Loader v0.07049 @ 2020-02-06 12:22:36 +# DO NOT MODIFY THIS OR ANYTHING ABOVE! md5sum:fw4FfzmOhzDk0ZoSuNr2ww # You can replace this text with custom code or comments, and it will be preserved on regeneration diff --git a/src/lib/Hydra/Schema/SystemStatus.pm b/src/lib/Hydra/Schema/SystemStatus.pm index 7c99e780..6b794fed 100644 --- a/src/lib/Hydra/Schema/SystemStatus.pm +++ b/src/lib/Hydra/Schema/SystemStatus.pm @@ -27,11 +27,11 @@ use base 'DBIx::Class::Core'; __PACKAGE__->load_components("+Hydra::Component::ToJSON"); -=head1 TABLE: C +=head1 TABLE: C =cut -__PACKAGE__->table("SystemStatus"); +__PACKAGE__->table("systemstatus"); =head1 ACCESSORS @@ -67,8 +67,8 @@ __PACKAGE__->add_columns( __PACKAGE__->set_primary_key("what"); -# Created by DBIx::Class::Schema::Loader v0.07043 @ 2015-07-30 16:01:22 -# DO NOT MODIFY THIS OR ANYTHING ABOVE! md5sum:JCYi4+HwM22iucdFkhBjMg +# Created by DBIx::Class::Schema::Loader v0.07049 @ 2020-02-06 12:22:36 +# DO NOT MODIFY THIS OR ANYTHING ABOVE! md5sum:GeXpTVktMXjHENa/P3qOxw # You can replace this text with custom code or comments, and it will be preserved on regeneration diff --git a/src/lib/Hydra/Schema/SystemTypes.pm b/src/lib/Hydra/Schema/SystemTypes.pm index 0d68d467..f9efd973 100644 --- a/src/lib/Hydra/Schema/SystemTypes.pm +++ b/src/lib/Hydra/Schema/SystemTypes.pm @@ -27,11 +27,11 @@ use base 'DBIx::Class::Core'; __PACKAGE__->load_components("+Hydra::Component::ToJSON"); -=head1 TABLE: C +=head1 TABLE: C =cut -__PACKAGE__->table("SystemTypes"); +__PACKAGE__->table("systemtypes"); =head1 ACCESSORS @@ -68,7 +68,7 @@ __PACKAGE__->add_columns( __PACKAGE__->set_primary_key("system"); -# Created by DBIx::Class::Schema::Loader v0.07033 @ 2013-06-13 01:54:50 -# DO NOT MODIFY THIS OR ANYTHING ABOVE! md5sum:8cC34cEw9T3+x+7uRs4KHQ +# Created by DBIx::Class::Schema::Loader v0.07049 @ 2020-02-06 12:22:36 +# DO NOT MODIFY THIS OR ANYTHING ABOVE! md5sum:fYeKQQSS5J8rjO3t+Hbz0g 1; diff --git a/src/lib/Hydra/Schema/UriRevMapper.pm b/src/lib/Hydra/Schema/UriRevMapper.pm index 448015af..cd9d8567 100644 --- a/src/lib/Hydra/Schema/UriRevMapper.pm +++ b/src/lib/Hydra/Schema/UriRevMapper.pm @@ -27,11 +27,11 @@ use base 'DBIx::Class::Core'; __PACKAGE__->load_components("+Hydra::Component::ToJSON"); -=head1 TABLE: C +=head1 TABLE: C =cut -__PACKAGE__->table("UriRevMapper"); +__PACKAGE__->table("urirevmapper"); =head1 ACCESSORS @@ -67,8 +67,8 @@ __PACKAGE__->add_columns( __PACKAGE__->set_primary_key("baseuri"); -# Created by DBIx::Class::Schema::Loader v0.07033 @ 2013-06-13 01:54:50 -# DO NOT MODIFY THIS OR ANYTHING ABOVE! md5sum:G2GAF/Rb7cRkRegH94LwIA +# Created by DBIx::Class::Schema::Loader v0.07049 @ 2020-02-06 12:22:36 +# DO NOT MODIFY THIS OR ANYTHING ABOVE! md5sum:FOg2/BVJK3yg8MAYMrqZOQ # You can replace this text with custom content, and it will be preserved on regeneration diff --git a/src/lib/Hydra/Schema/UserRoles.pm b/src/lib/Hydra/Schema/UserRoles.pm index 2644da7a..a7ce5ed9 100644 --- a/src/lib/Hydra/Schema/UserRoles.pm +++ b/src/lib/Hydra/Schema/UserRoles.pm @@ -27,11 +27,11 @@ use base 'DBIx::Class::Core'; __PACKAGE__->load_components("+Hydra::Component::ToJSON"); -=head1 TABLE: C +=head1 TABLE: C =cut -__PACKAGE__->table("UserRoles"); +__PACKAGE__->table("userroles"); =head1 ACCESSORS @@ -87,7 +87,7 @@ __PACKAGE__->belongs_to( ); -# Created by DBIx::Class::Schema::Loader v0.07033 @ 2013-06-13 01:54:50 -# DO NOT MODIFY THIS OR ANYTHING ABOVE! md5sum:aS+ivlFpndqIv8U578zz9A +# Created by DBIx::Class::Schema::Loader v0.07049 @ 2020-02-06 12:22:36 +# DO NOT MODIFY THIS OR ANYTHING ABOVE! md5sum:LUw2PDFvUHs0E0UZ3oHFxw 1; diff --git a/src/lib/Hydra/Schema/Users.pm b/src/lib/Hydra/Schema/Users.pm index 43842fb9..7789b42c 100644 --- a/src/lib/Hydra/Schema/Users.pm +++ b/src/lib/Hydra/Schema/Users.pm @@ -27,11 +27,11 @@ use base 'DBIx::Class::Core'; __PACKAGE__->load_components("+Hydra::Component::ToJSON"); -=head1 TABLE: C +=head1 TABLE: C =cut -__PACKAGE__->table("Users"); +__PACKAGE__->table("users"); =head1 ACCESSORS @@ -192,8 +192,8 @@ Composing rels: L -> project __PACKAGE__->many_to_many("projects", "projectmembers", "project"); -# Created by DBIx::Class::Schema::Loader v0.07043 @ 2016-05-27 11:32:14 -# DO NOT MODIFY THIS OR ANYTHING ABOVE! md5sum:Az1+V+ztJoWUt50NLQR3xg +# Created by DBIx::Class::Schema::Loader v0.07049 @ 2020-02-06 12:22:36 +# DO NOT MODIFY THIS OR ANYTHING ABOVE! md5sum:4/WZ95asbnGmK+nEHb4sLQ my %hint = ( columns => [ diff --git a/src/sql/Makefile.am b/src/sql/Makefile.am index 85613a5b..cd4bfb9b 100644 --- a/src/sql/Makefile.am +++ b/src/sql/Makefile.am @@ -2,7 +2,6 @@ sqldir = $(libexecdir)/hydra/sql nobase_dist_sql_DATA = \ hydra-postgresql.sql \ hydra.sql \ - hydra-sqlite.sql \ test.sql \ upgrade-*.sql \ update-dbix.pl @@ -10,10 +9,5 @@ nobase_dist_sql_DATA = \ hydra-postgresql.sql: hydra.sql cpp -P -E -traditional-cpp -DPOSTGRESQL hydra.sql > $@ || rm -f $@ -hydra-sqlite.sql: hydra.sql - cpp -P -E -traditional-cpp -DSQLITE hydra.sql > $@ || rm -f $@ - -update-dbix: hydra-sqlite.sql - rm -f tmp.sqlite - sqlite3 tmp.sqlite < hydra-sqlite.sql - perl -I ../lib -MDBIx::Class::Schema::Loader=make_schema_at,dump_to_dir:../lib update-dbix.pl +update-dbix: hydra-postgresql.sql + ./update-dbix-harness.sh diff --git a/src/sql/update-dbix-harness.sh b/src/sql/update-dbix-harness.sh new file mode 100755 index 00000000..d76e9e91 --- /dev/null +++ b/src/sql/update-dbix-harness.sh @@ -0,0 +1,40 @@ +#!/usr/bin/env bash + +readonly scratch=$(mktemp -d -t tmp.XXXXXXXXXX) + +readonly socket=$scratch/socket +readonly data=$scratch/data +readonly dbname=hydra-update-dbix + +function finish { + set +e + pg_ctl -D "$data" \ + -o "-F -h '' -k \"$socket\"" \ + -w stop -m immediate + + if [ -f "$data/postmaster.pid" ]; then + pg_ctl -D "$data" \ + -o "-F -h '' -k \"$socket\"" \ + -w kill TERM "$(cat "$data/postmaster.pid")" + fi + + rm -rf "$scratch" +} +trap finish EXIT + +set -e + +mkdir -p "$socket" +initdb -D "$data" + +pg_ctl -D "$data" \ + -o "-F -h '' -k \"${socket}\"" \ + -w start + +createdb -h "$socket" "$dbname" + +psql -h "$socket" "$dbname" -f ./hydra-postgresql.sql + +perl -I ../lib \ + -MDBIx::Class::Schema::Loader=make_schema_at,dump_to_dir:../lib \ + update-dbix.pl "dbi:Pg:dbname=$dbname;host=$socket" diff --git a/src/sql/update-dbix.pl b/src/sql/update-dbix.pl index 9089617a..5dc2912d 100644 --- a/src/sql/update-dbix.pl +++ b/src/sql/update-dbix.pl @@ -1,8 +1,49 @@ +use Cwd; + +die "$0: dbi connection string required \n" if scalar @ARGV != 1; + make_schema_at("Hydra::Schema", { naming => { ALL => "v5" }, relationships => 1, - moniker_map => sub { return "$_"; }, + moniker_map => { + "aggregateconstituents" => "AggregateConstituents", + "buildinputs" => "BuildInputs", + "buildmetrics" => "BuildMetrics", + "buildoutputs" => "BuildOutputs", + "buildproducts" => "BuildProducts", + "builds" => "Builds", + "buildstepoutputs" => "BuildStepOutputs", + "buildsteps" => "BuildSteps", + "cachedbazaarinputs" => "CachedBazaarInputs", + "cachedcvsinputs" => "CachedCVSInputs", + "cacheddarcsinputs" => "CachedDarcsInputs", + "cachedgitinputs" => "CachedGitInputs", + "cachedhginputs" => "CachedHgInputs", + "cachedpathinputs" => "CachedPathInputs", + "cachedsubversioninputs" => "CachedSubversionInputs", + "failedpaths" => "FailedPaths", + "jobs" => "Jobs", + "jobsetevalinputs" => "JobsetEvalInputs", + "jobsetevalmembers" => "JobsetEvalMembers", + "jobsetevals" => "JobsetEvals", + "jobsetinputalts" => "JobsetInputAlts", + "jobsetinputs" => "JobsetInputs", + "jobsetrenames" => "JobsetRenames", + "jobsets" => "Jobsets", + "newsitems" => "NewsItems", + "nrbuilds" => "NrBuilds", + "projectmembers" => "ProjectMembers", + "projects" => "Projects", + "releasemembers" => "ReleaseMembers", + "releases" => "Releases", + "schemaversion" => "SchemaVersion", + "starredjobs" => "StarredJobs", + "systemstatus" => "SystemStatus", + "systemtypes" => "SystemTypes", + "urirevmapper" => "UriRevMapper", + "userroles" => "UserRoles", + "users" => "Users", + } , #sub { return "$_"; }, components => [ "+Hydra::Component::ToJSON" ], rel_name_map => { buildsteps_builds => "buildsteps" } -}, ["dbi:SQLite:tmp.sqlite"]); - +}, [$ARGV[0]]); From 6fe57ab5fad9f2728f9d6de6b296fe8187a6bd72 Mon Sep 17 00:00:00 2001 From: Graham Christensen Date: Sun, 9 Feb 2020 15:17:24 -0500 Subject: [PATCH 07/52] Copy the flake migration from the flake branch hydra.nixos.org is already running this rev, and it should be safe to apply to everyone else. If we make changes to this migration, we'll need to write another migration anyway. --- src/lib/Hydra/Schema/JobsetEvals.pm | 11 +++++++++-- src/lib/Hydra/Schema/Jobsets.pm | 27 +++++++++++++++++++++------ src/sql/hydra.sql | 13 ++++++++++--- src/sql/upgrade-58.sql | 7 +++++++ 4 files changed, 47 insertions(+), 11 deletions(-) create mode 100644 src/sql/upgrade-58.sql diff --git a/src/lib/Hydra/Schema/JobsetEvals.pm b/src/lib/Hydra/Schema/JobsetEvals.pm index 5558b39a..7e88c25e 100644 --- a/src/lib/Hydra/Schema/JobsetEvals.pm +++ b/src/lib/Hydra/Schema/JobsetEvals.pm @@ -89,6 +89,11 @@ __PACKAGE__->table("jobsetevals"); data_type: 'integer' is_nullable: 1 +=head2 flake + + data_type: 'text' + is_nullable: 1 + =cut __PACKAGE__->add_columns( @@ -117,6 +122,8 @@ __PACKAGE__->add_columns( { data_type => "integer", is_nullable => 1 }, "nrsucceeded", { data_type => "integer", is_nullable => 1 }, + "flake", + { data_type => "text", is_nullable => 1 }, ); =head1 PRIMARY KEY @@ -194,8 +201,8 @@ __PACKAGE__->belongs_to( ); -# Created by DBIx::Class::Schema::Loader v0.07049 @ 2020-02-06 12:22:36 -# DO NOT MODIFY THIS OR ANYTHING ABOVE! md5sum:4ZaT8Z1tmCCt6k4ein0MNg +# Created by DBIx::Class::Schema::Loader v0.07049 @ 2020-02-09 15:21:11 +# DO NOT MODIFY THIS OR ANYTHING ABOVE! md5sum:Ar6GRni8AcAQmuZyg6tFKw __PACKAGE__->has_many( "buildIds", diff --git a/src/lib/Hydra/Schema/Jobsets.pm b/src/lib/Hydra/Schema/Jobsets.pm index a69bd599..9e9d4773 100644 --- a/src/lib/Hydra/Schema/Jobsets.pm +++ b/src/lib/Hydra/Schema/Jobsets.pm @@ -54,12 +54,12 @@ __PACKAGE__->table("jobsets"); =head2 nixexprinput data_type: 'text' - is_nullable: 0 + is_nullable: 1 =head2 nixexprpath data_type: 'text' - is_nullable: 0 + is_nullable: 1 =head2 errormsg @@ -137,6 +137,17 @@ __PACKAGE__->table("jobsets"); data_type: 'integer' is_nullable: 1 +=head2 type + + data_type: 'integer' + default_value: 0 + is_nullable: 0 + +=head2 flake + + data_type: 'text' + is_nullable: 1 + =cut __PACKAGE__->add_columns( @@ -147,9 +158,9 @@ __PACKAGE__->add_columns( "description", { data_type => "text", is_nullable => 1 }, "nixexprinput", - { data_type => "text", is_nullable => 0 }, + { data_type => "text", is_nullable => 1 }, "nixexprpath", - { data_type => "text", is_nullable => 0 }, + { data_type => "text", is_nullable => 1 }, "errormsg", { data_type => "text", is_nullable => 1 }, "errortime", @@ -178,6 +189,10 @@ __PACKAGE__->add_columns( { data_type => "boolean", is_nullable => 1 }, "starttime", { data_type => "integer", is_nullable => 1 }, + "type", + { data_type => "integer", default_value => 0, is_nullable => 0 }, + "flake", + { data_type => "text", is_nullable => 1 }, ); =head1 PRIMARY KEY @@ -335,8 +350,8 @@ __PACKAGE__->has_many( ); -# Created by DBIx::Class::Schema::Loader v0.07049 @ 2020-02-06 12:22:36 -# DO NOT MODIFY THIS OR ANYTHING ABOVE! md5sum:fYKx6VRlNG5XiDZ73Qr6Rw +# Created by DBIx::Class::Schema::Loader v0.07049 @ 2020-02-09 15:21:11 +# DO NOT MODIFY THIS OR ANYTHING ABOVE! md5sum:FVP1/AWjdKTlY6djrG592A my %hint = ( columns => [ diff --git a/src/sql/hydra.sql b/src/sql/hydra.sql index 0c769a7e..33ecab4d 100644 --- a/src/sql/hydra.sql +++ b/src/sql/hydra.sql @@ -54,8 +54,8 @@ create table Jobsets ( name text not null, project text not null, description text, - nixExprInput text not null, -- name of the jobsetInput containing the Nix or Guix expression - nixExprPath text not null, -- relative path of the Nix or Guix expression + nixExprInput text, -- name of the jobsetInput containing the Nix or Guix expression + nixExprPath text, -- relative path of the Nix or Guix expression errorMsg text, -- used to signal the last evaluation error etc. for this jobset errorTime integer, -- timestamp associated with errorMsg lastCheckedTime integer, -- last time the evaluator looked at this jobset @@ -70,7 +70,11 @@ create table Jobsets ( fetchErrorMsg text, forceEval boolean, startTime integer, -- if jobset is currently running + type integer not null default 0, -- 0 == legacy, 1 == flake + flake text, check (schedulingShares > 0), + check ((type = 0) = (nixExprInput is not null and nixExprPath is not null)), + check ((type = 1) = (flake is not null)), primary key (project, name), foreign key (project) references Projects(name) on delete cascade on update cascade #ifdef SQLITE @@ -181,7 +185,8 @@ create table Builds ( -- Copy of the nixExprInput/nixExprPath fields of the jobset that -- instantiated this build. Needed if we want to reproduce this - -- build. + -- build. FIXME: this should be stored in JobsetEvals, storing it + -- here is denormal. nixExprInput text, nixExprPath text, @@ -522,6 +527,8 @@ create table JobsetEvals ( nrBuilds integer, nrSucceeded integer, -- set lazily when all builds are finished + flake text, -- immutable flake reference + foreign key (project) references Projects(name) on delete cascade on update cascade, foreign key (project, jobset) references Jobsets(project, name) on delete cascade on update cascade ); diff --git a/src/sql/upgrade-58.sql b/src/sql/upgrade-58.sql new file mode 100644 index 00000000..b59ee949 --- /dev/null +++ b/src/sql/upgrade-58.sql @@ -0,0 +1,7 @@ +alter table Jobsets alter column nixExprInput drop not null; +alter table Jobsets alter column nixExprPath drop not null; +alter table Jobsets add column type integer default 0; +alter table Jobsets add column flake text; +alter table Jobsets add check ((type = 0) = (nixExprInput is not null and nixExprPath is not null)); +alter table Jobsets add check ((type = 1) = (flake is not null)); +alter table JobsetEvals add column flake text; From e00030563b092361eac2c4571a3e86453566b28f Mon Sep 17 00:00:00 2001 From: Graham Christensen Date: Wed, 5 Feb 2020 14:06:21 -0500 Subject: [PATCH 08/52] Jobsets: add a SERIAL, unique, non-null id column A postgresql column which is non-null and unique is treated with the same optimisations as a primary key, so we have no need to try and recreate the `id` as the primary key. No read paths are impacted by this change, and the database will automatically create an ID for each insert. Thus, no code needs to change. --- src/lib/Hydra/Schema/Jobsets.pm | 36 +++++++++++++++++++++++++++++++-- src/sql/hydra.sql | 4 +++- src/sql/upgrade-59.sql | 4 ++++ 3 files changed, 41 insertions(+), 3 deletions(-) create mode 100644 src/sql/upgrade-59.sql diff --git a/src/lib/Hydra/Schema/Jobsets.pm b/src/lib/Hydra/Schema/Jobsets.pm index 9e9d4773..2615ef87 100644 --- a/src/lib/Hydra/Schema/Jobsets.pm +++ b/src/lib/Hydra/Schema/Jobsets.pm @@ -40,6 +40,13 @@ __PACKAGE__->table("jobsets"); data_type: 'text' is_nullable: 0 +=head2 id + + data_type: 'integer' + is_auto_increment: 1 + is_nullable: 0 + sequence: 'jobsets_id_seq' + =head2 project data_type: 'text' @@ -153,6 +160,13 @@ __PACKAGE__->table("jobsets"); __PACKAGE__->add_columns( "name", { data_type => "text", is_nullable => 0 }, + "id", + { + data_type => "integer", + is_auto_increment => 1, + is_nullable => 0, + sequence => "jobsets_id_seq", + }, "project", { data_type => "text", is_foreign_key => 1, is_nullable => 0 }, "description", @@ -209,6 +223,20 @@ __PACKAGE__->add_columns( __PACKAGE__->set_primary_key("project", "name"); +=head1 UNIQUE CONSTRAINTS + +=head2 C + +=over 4 + +=item * L + +=back + +=cut + +__PACKAGE__->add_unique_constraint("jobsets_id_unique", ["id"]); + =head1 RELATIONS =head2 buildmetrics @@ -350,8 +378,12 @@ __PACKAGE__->has_many( ); -# Created by DBIx::Class::Schema::Loader v0.07049 @ 2020-02-09 15:21:11 -# DO NOT MODIFY THIS OR ANYTHING ABOVE! md5sum:FVP1/AWjdKTlY6djrG592A +# Created by DBIx::Class::Schema::Loader v0.07049 @ 2020-02-09 15:25:17 +# DO NOT MODIFY THIS OR ANYTHING ABOVE! md5sum:1CMOaLf9fYRdJBlYiivmxA + +__PACKAGE__->add_column( + "+id" => { retrieve_on_insert => 1 } +); my %hint = ( columns => [ diff --git a/src/sql/hydra.sql b/src/sql/hydra.sql index 33ecab4d..6b22121c 100644 --- a/src/sql/hydra.sql +++ b/src/sql/hydra.sql @@ -52,6 +52,7 @@ create table ProjectMembers ( -- describing build jobs. create table Jobsets ( name text not null, + id serial not null, project text not null, description text, nixExprInput text, -- name of the jobsetInput containing the Nix or Guix expression @@ -76,7 +77,8 @@ create table Jobsets ( check ((type = 0) = (nixExprInput is not null and nixExprPath is not null)), check ((type = 1) = (flake is not null)), primary key (project, name), - foreign key (project) references Projects(name) on delete cascade on update cascade + foreign key (project) references Projects(name) on delete cascade on update cascade, + constraint Jobsets_id_unique UNIQUE(id) #ifdef SQLITE , foreign key (project, name, nixExprInput) references JobsetInputs(project, jobset, name) diff --git a/src/sql/upgrade-59.sql b/src/sql/upgrade-59.sql new file mode 100644 index 00000000..d0a28971 --- /dev/null +++ b/src/sql/upgrade-59.sql @@ -0,0 +1,4 @@ +-- will automatically add unique IDs to Jobsets. +ALTER TABLE Jobsets + ADD COLUMN id SERIAL NOT NULL, + ADD CONSTRAINT Jobsets_id_unique UNIQUE (id); From efa1f1d4fbdc468c61f7403e8709d0295e052c7f Mon Sep 17 00:00:00 2001 From: Graham Christensen Date: Wed, 5 Feb 2020 17:10:50 -0500 Subject: [PATCH 09/52] Jobs: add a nullable jobset_id foreign key to Jobsets. Also, adds an explicitly named "jobs" accessor to the Jobsets Schema object, which uses the project/jobset name. --- src/lib/Hydra/Schema/Jobs.pm | 32 +++++++++++++++++++++++-- src/lib/Hydra/Schema/Jobsets.pm | 41 +++++++++++++++++++++++++++++---- src/sql/hydra.sql | 2 ++ src/sql/upgrade-60.sql | 10 ++++++++ 4 files changed, 79 insertions(+), 6 deletions(-) create mode 100644 src/sql/upgrade-60.sql diff --git a/src/lib/Hydra/Schema/Jobs.pm b/src/lib/Hydra/Schema/Jobs.pm index bdecef3a..d4126e32 100644 --- a/src/lib/Hydra/Schema/Jobs.pm +++ b/src/lib/Hydra/Schema/Jobs.pm @@ -47,6 +47,12 @@ __PACKAGE__->table("jobs"); is_foreign_key: 1 is_nullable: 0 +=head2 jobset_id + + data_type: 'integer' + is_foreign_key: 1 + is_nullable: 1 + =head2 name data_type: 'text' @@ -59,6 +65,8 @@ __PACKAGE__->add_columns( { data_type => "text", is_foreign_key => 1, is_nullable => 0 }, "jobset", { data_type => "text", is_foreign_key => 1, is_nullable => 0 }, + "jobset_id", + { data_type => "integer", is_foreign_key => 1, is_nullable => 1 }, "name", { data_type => "text", is_nullable => 0 }, ); @@ -130,6 +138,26 @@ Related object: L __PACKAGE__->belongs_to( "jobset", "Hydra::Schema::Jobsets", + { id => "jobset_id" }, + { + is_deferrable => 0, + join_type => "LEFT", + on_delete => "CASCADE", + on_update => "NO ACTION", + }, +); + +=head2 jobset_project_jobset + +Type: belongs_to + +Related object: L + +=cut + +__PACKAGE__->belongs_to( + "jobset_project_jobset", + "Hydra::Schema::Jobsets", { name => "jobset", project => "project" }, { is_deferrable => 0, on_delete => "CASCADE", on_update => "CASCADE" }, ); @@ -169,7 +197,7 @@ __PACKAGE__->has_many( ); -# Created by DBIx::Class::Schema::Loader v0.07049 @ 2020-02-06 12:22:36 -# DO NOT MODIFY THIS OR ANYTHING ABOVE! md5sum:sYa6dZNK+stMAnTH0Tmn8A +# Created by DBIx::Class::Schema::Loader v0.07049 @ 2020-02-06 12:30:58 +# DO NOT MODIFY THIS OR ANYTHING ABOVE! md5sum:dFusVjxb423gIEoadAw9sw 1; diff --git a/src/lib/Hydra/Schema/Jobsets.pm b/src/lib/Hydra/Schema/Jobsets.pm index 2615ef87..60efa962 100644 --- a/src/lib/Hydra/Schema/Jobsets.pm +++ b/src/lib/Hydra/Schema/Jobsets.pm @@ -275,7 +275,7 @@ __PACKAGE__->has_many( undef, ); -=head2 jobs +=head2 jobs_jobset_ids Type: has_many @@ -284,7 +284,22 @@ Related object: L =cut __PACKAGE__->has_many( - "jobs", + "jobs_jobset_ids", + "Hydra::Schema::Jobs", + { "foreign.jobset_id" => "self.id" }, + undef, +); + +=head2 jobs_project_jobsets + +Type: has_many + +Related object: L + +=cut + +__PACKAGE__->has_many( + "jobs_project_jobsets", "Hydra::Schema::Jobs", { "foreign.jobset" => "self.name", @@ -378,8 +393,26 @@ __PACKAGE__->has_many( ); -# Created by DBIx::Class::Schema::Loader v0.07049 @ 2020-02-09 15:25:17 -# DO NOT MODIFY THIS OR ANYTHING ABOVE! md5sum:1CMOaLf9fYRdJBlYiivmxA +# Created by DBIx::Class::Schema::Loader v0.07049 @ 2020-02-09 15:26:15 +# DO NOT MODIFY THIS OR ANYTHING ABOVE! md5sum:DH1jX0smo2rFvyr4V+qJcw + +=head2 jobs + +Type: has_many + +Related object: L + +=cut + +__PACKAGE__->has_many( + "jobs", + "Hydra::Schema::Jobs", + { + "foreign.jobset" => "self.name", + "foreign.project" => "self.project", + }, + undef, +); __PACKAGE__->add_column( "+id" => { retrieve_on_insert => 1 } diff --git a/src/sql/hydra.sql b/src/sql/hydra.sql index 6b22121c..77b5822a 100644 --- a/src/sql/hydra.sql +++ b/src/sql/hydra.sql @@ -146,9 +146,11 @@ create table JobsetInputAlts ( create table Jobs ( project text not null, jobset text not null, + jobset_id integer null, name text not null, primary key (project, jobset, name), + foreign key (jobset_id) references Jobsets(id) on delete cascade, foreign key (project) references Projects(name) on delete cascade on update cascade, foreign key (project, jobset) references Jobsets(project, name) on delete cascade on update cascade ); diff --git a/src/sql/upgrade-60.sql b/src/sql/upgrade-60.sql new file mode 100644 index 00000000..3cdb00c3 --- /dev/null +++ b/src/sql/upgrade-60.sql @@ -0,0 +1,10 @@ +-- Add the jobset_id columns to the Jobs table. This will go +-- quickly, since the field is nullable. Note this is just part one of +-- this migration. Future steps involve a piecemeal backfilling, and +-- then making the column non-null. + +ALTER TABLE Jobs + ADD COLUMN jobset_id integer NULL, + ADD FOREIGN KEY (jobset_id) + REFERENCES Jobsets(id) + ON DELETE CASCADE; From 624f1d8d2d3641694acef83367a5c557cfe9bfde Mon Sep 17 00:00:00 2001 From: Graham Christensen Date: Wed, 5 Feb 2020 17:30:11 -0500 Subject: [PATCH 10/52] Jobs: populate Jobs.jobset_id field when writing from hydra-eval-jobset --- src/script/hydra-eval-jobset | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/src/script/hydra-eval-jobset b/src/script/hydra-eval-jobset index fcc60a6c..5e68f124 100755 --- a/src/script/hydra-eval-jobset +++ b/src/script/hydra-eval-jobset @@ -417,7 +417,12 @@ sub checkBuild { my $build; txn_do($db, sub { - my $job = $jobset->jobs->update_or_create({ name => $jobName }); + my $job = $jobset->jobs->update_or_create({ + name => $jobName, + jobset_id => $jobset->id, + project => $jobset->project, + jobset => $jobset->name, + }); # Don't add a build that has already been scheduled for this # job, or has been built but is still a "current" build for From ddf00fa6278532c9cce6bfeceeb1ee3d9c3e45cd Mon Sep 17 00:00:00 2001 From: Graham Christensen Date: Wed, 5 Feb 2020 15:11:56 -0500 Subject: [PATCH 11/52] Builds: add a nullable jobset_id foreign key to Jobsets. Also, adds an explicitly named "builds" accessor to the Jobsets Schema object, which uses the project/jobset name. --- src/lib/Hydra/Schema/Builds.pm | 32 +++++++++++++++++++++++-- src/lib/Hydra/Schema/Jobsets.pm | 42 +++++++++++++++++++++++++++++---- src/sql/hydra.sql | 2 ++ src/sql/upgrade-61.sql | 10 ++++++++ 4 files changed, 80 insertions(+), 6 deletions(-) create mode 100644 src/sql/upgrade-61.sql diff --git a/src/lib/Hydra/Schema/Builds.pm b/src/lib/Hydra/Schema/Builds.pm index 013fd09d..53454867 100644 --- a/src/lib/Hydra/Schema/Builds.pm +++ b/src/lib/Hydra/Schema/Builds.pm @@ -64,6 +64,12 @@ __PACKAGE__->table("builds"); is_foreign_key: 1 is_nullable: 0 +=head2 jobset_id + + data_type: 'integer' + is_foreign_key: 1 + is_nullable: 1 + =head2 job data_type: 'text' @@ -215,6 +221,8 @@ __PACKAGE__->add_columns( { data_type => "text", is_foreign_key => 1, is_nullable => 0 }, "jobset", { data_type => "text", is_foreign_key => 1, is_nullable => 0 }, + "jobset_id", + { data_type => "integer", is_foreign_key => 1, is_nullable => 1 }, "job", { data_type => "text", is_foreign_key => 1, is_nullable => 0 }, "nixname", @@ -457,6 +465,26 @@ Related object: L __PACKAGE__->belongs_to( "jobset", "Hydra::Schema::Jobsets", + { id => "jobset_id" }, + { + is_deferrable => 0, + join_type => "LEFT", + on_delete => "CASCADE", + on_update => "NO ACTION", + }, +); + +=head2 jobset_project_jobset + +Type: belongs_to + +Related object: L + +=cut + +__PACKAGE__->belongs_to( + "jobset_project_jobset", + "Hydra::Schema::Jobsets", { name => "jobset", project => "project" }, { is_deferrable => 0, on_delete => "NO ACTION", on_update => "CASCADE" }, ); @@ -550,8 +578,8 @@ __PACKAGE__->many_to_many( ); -# Created by DBIx::Class::Schema::Loader v0.07049 @ 2020-02-06 12:22:36 -# DO NOT MODIFY THIS OR ANYTHING ABOVE! md5sum:H3hs+zEywsUmwTWKfSE8wQ +# Created by DBIx::Class::Schema::Loader v0.07049 @ 2020-02-06 12:32:28 +# DO NOT MODIFY THIS OR ANYTHING ABOVE! md5sum:RvrINOAowDcde8Nd9VD6rQ __PACKAGE__->has_many( "dependents", diff --git a/src/lib/Hydra/Schema/Jobsets.pm b/src/lib/Hydra/Schema/Jobsets.pm index 60efa962..fbbb253c 100644 --- a/src/lib/Hydra/Schema/Jobsets.pm +++ b/src/lib/Hydra/Schema/Jobsets.pm @@ -257,7 +257,7 @@ __PACKAGE__->has_many( undef, ); -=head2 builds +=head2 builds_jobset_ids Type: has_many @@ -266,7 +266,22 @@ Related object: L =cut __PACKAGE__->has_many( - "builds", + "builds_jobset_ids", + "Hydra::Schema::Builds", + { "foreign.jobset_id" => "self.id" }, + undef, +); + +=head2 builds_project_jobsets + +Type: has_many + +Related object: L + +=cut + +__PACKAGE__->has_many( + "builds_project_jobsets", "Hydra::Schema::Builds", { "foreign.jobset" => "self.name", @@ -393,8 +408,27 @@ __PACKAGE__->has_many( ); -# Created by DBIx::Class::Schema::Loader v0.07049 @ 2020-02-09 15:26:15 -# DO NOT MODIFY THIS OR ANYTHING ABOVE! md5sum:DH1jX0smo2rFvyr4V+qJcw +# Created by DBIx::Class::Schema::Loader v0.07049 @ 2020-02-09 15:32:17 +# DO NOT MODIFY THIS OR ANYTHING ABOVE! md5sum:P8+t7rgpOqkGwRdM2b+3Bw + + +=head2 builds + +Type: has_many + +Related object: L + +=cut + +__PACKAGE__->has_many( + "builds", + "Hydra::Schema::Builds", + { + "foreign.jobset" => "self.name", + "foreign.project" => "self.project", + }, + undef, +); =head2 jobs diff --git a/src/sql/hydra.sql b/src/sql/hydra.sql index 77b5822a..8349e3be 100644 --- a/src/sql/hydra.sql +++ b/src/sql/hydra.sql @@ -170,6 +170,7 @@ create table Builds ( -- Info about the inputs. project text not null, jobset text not null, + jobset_id integer null, job text not null, -- Info about the build result. @@ -236,6 +237,7 @@ create table Builds ( check (finished = 0 or (stoptime is not null and stoptime != 0)), check (finished = 0 or (starttime is not null and starttime != 0)), + foreign key (jobset_id) references Jobsets(id) on delete cascade, foreign key (project) references Projects(name) on update cascade, foreign key (project, jobset) references Jobsets(project, name) on update cascade, foreign key (project, jobset, job) references Jobs(project, jobset, name) on update cascade diff --git a/src/sql/upgrade-61.sql b/src/sql/upgrade-61.sql new file mode 100644 index 00000000..bfa6b8b3 --- /dev/null +++ b/src/sql/upgrade-61.sql @@ -0,0 +1,10 @@ +-- Add the jobset_id columns to the Builds table. This will go +-- quickly, since the field is nullable. Note this is just part one of +-- this migration. Future steps involve a piecemeal backfilling, and +-- then making the column non-null. + +ALTER TABLE Builds + ADD COLUMN jobset_id integer NULL, + ADD FOREIGN KEY (jobset_id) + REFERENCES Jobsets(id) + ON DELETE CASCADE; From f3a561aecd4469e456bd214f4f3f6648ce46d916 Mon Sep 17 00:00:00 2001 From: Graham Christensen Date: Wed, 5 Feb 2020 16:24:43 -0500 Subject: [PATCH 12/52] Builds: populate Builds.jobset_id in hydra-eval-jobset --- src/script/hydra-eval-jobset | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/script/hydra-eval-jobset b/src/script/hydra-eval-jobset index 5e68f124..d0cc3e8a 100755 --- a/src/script/hydra-eval-jobset +++ b/src/script/hydra-eval-jobset @@ -469,6 +469,9 @@ sub checkBuild { # Add the build to the database. $build = $job->builds->create( { timestamp => $time + , project => $jobset->project + , jobset => $jobset->name + , jobset_id => $jobset->id , description => null($buildInfo->{description}) , license => null($buildInfo->{license}) , homepage => null($buildInfo->{homepage}) From f69260118bf7e78a4b7830ea58e2f0ba8c85d6b7 Mon Sep 17 00:00:00 2001 From: Graham Christensen Date: Wed, 5 Feb 2020 21:12:49 -0500 Subject: [PATCH 13/52] hydra-backfill-ids: create to add jobset_id values to Builds and Jobs Vacuum every 10 iterations, update 10k at a time. --- src/script/Makefile.am | 1 + src/script/hydra-backfill-ids | 164 ++++++++++++++++++++++++++++++++++ 2 files changed, 165 insertions(+) create mode 100755 src/script/hydra-backfill-ids diff --git a/src/script/Makefile.am b/src/script/Makefile.am index 5852bc85..9deb6f29 100644 --- a/src/script/Makefile.am +++ b/src/script/Makefile.am @@ -3,6 +3,7 @@ EXTRA_DIST = \ hydra-eval-guile-jobs.in distributable_scripts = \ + hydra-backfill-ids \ hydra-init \ hydra-eval-jobset \ hydra-server \ diff --git a/src/script/hydra-backfill-ids b/src/script/hydra-backfill-ids new file mode 100755 index 00000000..d9fc362a --- /dev/null +++ b/src/script/hydra-backfill-ids @@ -0,0 +1,164 @@ +#! /usr/bin/env perl + +use strict; +use utf8; +use Hydra::Model::DB; + +STDOUT->autoflush(); +STDERR->autoflush(1); +binmode STDERR, ":encoding(utf8)"; + +my $db = Hydra::Model::DB->new(); +my $vacuum = $db->storage->dbh->prepare("VACUUM;"); + +my $dryRun = defined $ENV{'HYDRA_DRY_RUN'}; + +my $batchSize = 10000; +my $iterationsPerVacuum = 500; + +sub backfillJobsJobsetId { + my ($skipLocked) = @_; + my $logPrefix; + + if ($skipLocked) { + $logPrefix = "(pass 1/2)"; + } else { + $logPrefix = "(pass 2/2)"; + } + + print STDERR "$logPrefix Backfilling Jobs records where jobset_id is NULL...\n"; + + my $totalToGoSth = $db->storage->dbh->prepare(<execute(); + my ($totalToGo) = $totalToGoSth->fetchrow_array; + + my $skipLockedStmt = $skipLocked ? "FOR UPDATE SKIP LOCKED" : ""; + my $update10kJobs = $db->storage->dbh->prepare(<execute($batchSize); + print STDERR "$logPrefix (batch #$iteration; $totalToGo remaining) Jobs.jobset_id: affected $affected rows...\n"; + $totalToGo -= $affected; + + if ($iteration % $iterationsPerVacuum == 0) { + print STDERR "$logPrefix (batch #$iteration) Vacuuming...\n"; + $vacuum->execute(); + } + } while ($affected > 0); + + + if ($skipLocked) { + backfillJobsJobsetId(0); + } +} + + +sub backfillBuildsJobsetId { + my ($skipLocked) = @_; + my $logPrefix; + + if ($skipLocked) { + $logPrefix = "(pass 1/2)"; + print STDERR "$logPrefix Backfilling unlocked Builds records where jobset_id is NULL...\n"; + } else { + $logPrefix = "(pass 2/2)"; + print STDERR "$logPrefix Backfilling all Builds records where jobset_id is NULL...\n"; + } + + my $skipLockedStmt = $skipLocked ? "FOR UPDATE SKIP LOCKED" : ""; + my $update10kBuilds = $db->storage->dbh->prepare(<<"QUERY"); +WITH updateprogress AS ( + UPDATE builds + SET jobset_id = ( + SELECT jobsets.id + FROM jobsets + WHERE jobsets.name = builds.jobset + AND jobsets.project = builds.project + ) + WHERE builds.id in ( + SELECT buildprime.id + FROM builds buildprime + WHERE buildprime.jobset_id IS NULL + AND buildprime.id >= ? + ORDER BY buildprime.id + $skipLockedStmt + LIMIT ? + ) + RETURNING id +) +SELECT + count(*) AS affected, + max(updateprogress.id) AS highest_id +FROM updateprogress; + +QUERY + + my $lowestNullIdSth = $db->storage->dbh->prepare(<execute(); + my ($highestId) = $lowestNullIdSth->fetchrow_array; + + my $totalToGoSth = $db->storage->dbh->prepare(<= ? +QUERY + $totalToGoSth->execute($highestId); + my ($totalToGo) = $totalToGoSth->fetchrow_array; + + print STDERR "$logPrefix Total Builds records without a jobset_id: $totalToGo, starting at $highestId\n"; + + my $iteration = 0; + my $affected; + do { + my $previousHighId = $highestId; + $iteration++; + $update10kBuilds->execute($highestId, $batchSize); + ($affected, $highestId) = $update10kBuilds->fetchrow_array; + + print STDERR "$logPrefix (batch #$iteration; $totalToGo remaining) Builds.jobset_id: affected $affected rows; max ID: $previousHighId -> $highestId\n"; + $totalToGo -= $affected; + + if ($iteration % $iterationsPerVacuum == 0) { + print STDERR "$logPrefix (batch #$iteration) Vacuuming...\n"; + $vacuum->execute(); + } + } while ($affected > 0); + + if ($skipLocked) { + backfillBuildsJobsetId(0); + } +} + +die "syntax: $0\n" unless @ARGV == 0; + +print STDERR "Beginning with a VACUUM\n"; +$vacuum->execute(); + +backfillJobsJobsetId(1); +backfillBuildsJobsetId(1); + +print STDERR "Ending with a VACUUM\n"; +$vacuum->execute(); From c4cc72f94449564b56ad9f71d530ee5917af3a97 Mon Sep 17 00:00:00 2001 From: Graham Christensen Date: Thu, 6 Feb 2020 14:26:38 -0500 Subject: [PATCH 14/52] hydra-init: Warn about the schema version migration --- src/script/hydra-init | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/src/script/hydra-init b/src/script/hydra-init index d813d9fe..1a5f2439 100755 --- a/src/script/hydra-init +++ b/src/script/hydra-init @@ -44,6 +44,17 @@ my @versions = $db->resultset('SchemaVersion')->all; die "couldn't get Hydra schema version!" if scalar @versions != 1; my $schemaVersion = $versions[0]->version; +if ($schemaVersion <= 60) { + print STDERR < Date: Wed, 5 Feb 2020 19:42:07 -0500 Subject: [PATCH 15/52] Jobs.jobset_id: make not-null --- src/lib/Hydra/Schema/Jobs.pm | 15 +++++---------- src/sql/hydra.sql | 2 +- src/sql/upgrade-63.sql | 7 +++++++ 3 files changed, 13 insertions(+), 11 deletions(-) create mode 100644 src/sql/upgrade-63.sql diff --git a/src/lib/Hydra/Schema/Jobs.pm b/src/lib/Hydra/Schema/Jobs.pm index d4126e32..b00f3e0f 100644 --- a/src/lib/Hydra/Schema/Jobs.pm +++ b/src/lib/Hydra/Schema/Jobs.pm @@ -51,7 +51,7 @@ __PACKAGE__->table("jobs"); data_type: 'integer' is_foreign_key: 1 - is_nullable: 1 + is_nullable: 0 =head2 name @@ -66,7 +66,7 @@ __PACKAGE__->add_columns( "jobset", { data_type => "text", is_foreign_key => 1, is_nullable => 0 }, "jobset_id", - { data_type => "integer", is_foreign_key => 1, is_nullable => 1 }, + { data_type => "integer", is_foreign_key => 1, is_nullable => 0 }, "name", { data_type => "text", is_nullable => 0 }, ); @@ -139,12 +139,7 @@ __PACKAGE__->belongs_to( "jobset", "Hydra::Schema::Jobsets", { id => "jobset_id" }, - { - is_deferrable => 0, - join_type => "LEFT", - on_delete => "CASCADE", - on_update => "NO ACTION", - }, + { is_deferrable => 0, on_delete => "CASCADE", on_update => "NO ACTION" }, ); =head2 jobset_project_jobset @@ -197,7 +192,7 @@ __PACKAGE__->has_many( ); -# Created by DBIx::Class::Schema::Loader v0.07049 @ 2020-02-06 12:30:58 -# DO NOT MODIFY THIS OR ANYTHING ABOVE! md5sum:dFusVjxb423gIEoadAw9sw +# Created by DBIx::Class::Schema::Loader v0.07049 @ 2020-02-06 12:33:28 +# DO NOT MODIFY THIS OR ANYTHING ABOVE! md5sum:C5Tyh8Ke4yC6q7KIFVOHcQ 1; diff --git a/src/sql/hydra.sql b/src/sql/hydra.sql index 8349e3be..f6c05bf7 100644 --- a/src/sql/hydra.sql +++ b/src/sql/hydra.sql @@ -146,7 +146,7 @@ create table JobsetInputAlts ( create table Jobs ( project text not null, jobset text not null, - jobset_id integer null, + jobset_id integer not null, name text not null, primary key (project, jobset, name), diff --git a/src/sql/upgrade-63.sql b/src/sql/upgrade-63.sql new file mode 100644 index 00000000..a7476e68 --- /dev/null +++ b/src/sql/upgrade-63.sql @@ -0,0 +1,7 @@ +-- Make the Jobs.jobset_id column NOT NULL. If this upgrade fails, +-- either the admin didn't run the backfiller or there is a bug. If +-- the admin ran the backfiller and there are null columns, it is +-- very important to figure out where the nullable columns came from. + +ALTER TABLE Jobs + ALTER COLUMN jobset_id SET NOT NULL; From 8ef08f1385dbfa530de49857610c902006210013 Mon Sep 17 00:00:00 2001 From: Graham Christensen Date: Wed, 5 Feb 2020 15:25:52 -0500 Subject: [PATCH 16/52] Builds.jobset_id: make not-null --- src/lib/Hydra/Schema/Builds.pm | 15 +++++---------- src/sql/hydra.sql | 2 +- src/sql/upgrade-64.sql | 7 +++++++ 3 files changed, 13 insertions(+), 11 deletions(-) create mode 100644 src/sql/upgrade-64.sql diff --git a/src/lib/Hydra/Schema/Builds.pm b/src/lib/Hydra/Schema/Builds.pm index 53454867..937bd5fc 100644 --- a/src/lib/Hydra/Schema/Builds.pm +++ b/src/lib/Hydra/Schema/Builds.pm @@ -68,7 +68,7 @@ __PACKAGE__->table("builds"); data_type: 'integer' is_foreign_key: 1 - is_nullable: 1 + is_nullable: 0 =head2 job @@ -222,7 +222,7 @@ __PACKAGE__->add_columns( "jobset", { data_type => "text", is_foreign_key => 1, is_nullable => 0 }, "jobset_id", - { data_type => "integer", is_foreign_key => 1, is_nullable => 1 }, + { data_type => "integer", is_foreign_key => 1, is_nullable => 0 }, "job", { data_type => "text", is_foreign_key => 1, is_nullable => 0 }, "nixname", @@ -466,12 +466,7 @@ __PACKAGE__->belongs_to( "jobset", "Hydra::Schema::Jobsets", { id => "jobset_id" }, - { - is_deferrable => 0, - join_type => "LEFT", - on_delete => "CASCADE", - on_update => "NO ACTION", - }, + { is_deferrable => 0, on_delete => "CASCADE", on_update => "NO ACTION" }, ); =head2 jobset_project_jobset @@ -578,8 +573,8 @@ __PACKAGE__->many_to_many( ); -# Created by DBIx::Class::Schema::Loader v0.07049 @ 2020-02-06 12:32:28 -# DO NOT MODIFY THIS OR ANYTHING ABOVE! md5sum:RvrINOAowDcde8Nd9VD6rQ +# Created by DBIx::Class::Schema::Loader v0.07049 @ 2020-02-06 12:34:25 +# DO NOT MODIFY THIS OR ANYTHING ABOVE! md5sum:EEXlcKN/ydXJ129vT0jTUw __PACKAGE__->has_many( "dependents", diff --git a/src/sql/hydra.sql b/src/sql/hydra.sql index f6c05bf7..f1d143dc 100644 --- a/src/sql/hydra.sql +++ b/src/sql/hydra.sql @@ -170,7 +170,7 @@ create table Builds ( -- Info about the inputs. project text not null, jobset text not null, - jobset_id integer null, + jobset_id integer not null, job text not null, -- Info about the build result. diff --git a/src/sql/upgrade-64.sql b/src/sql/upgrade-64.sql new file mode 100644 index 00000000..76afa9e7 --- /dev/null +++ b/src/sql/upgrade-64.sql @@ -0,0 +1,7 @@ +-- Make the Builds.jobset_id column NOT NULL. If this upgrade fails, +-- either the admin didn't run the backfiller or there is a bug. If +-- the admin ran the backfiller and there are null columns, it is +-- very important to figure out where the nullable columns came from. + +ALTER TABLE Builds + ALTER COLUMN jobset_id SET NOT NULL; From 3c392b8cd8917e4cb403735904d1c4d71d26b4e1 Mon Sep 17 00:00:00 2001 From: Graham Christensen Date: Wed, 5 Feb 2020 17:30:23 -0500 Subject: [PATCH 17/52] Jobsets.jobs: Fetch via Jobsets.id --- src/lib/Hydra/Schema/Jobsets.pm | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/src/lib/Hydra/Schema/Jobsets.pm b/src/lib/Hydra/Schema/Jobsets.pm index fbbb253c..249d6aa4 100644 --- a/src/lib/Hydra/Schema/Jobsets.pm +++ b/src/lib/Hydra/Schema/Jobsets.pm @@ -441,10 +441,7 @@ Related object: L __PACKAGE__->has_many( "jobs", "Hydra::Schema::Jobs", - { - "foreign.jobset" => "self.name", - "foreign.project" => "self.project", - }, + { "foreign.jobset_id" => "self.id" }, undef, ); From 7c71f9df28ee8fe552f2aeaed734112542fd0638 Mon Sep 17 00:00:00 2001 From: Graham Christensen Date: Wed, 5 Feb 2020 16:35:39 -0500 Subject: [PATCH 18/52] Jobsets.builds: Fetch via Jobsets.id --- src/lib/Hydra/Schema/Jobsets.pm | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/src/lib/Hydra/Schema/Jobsets.pm b/src/lib/Hydra/Schema/Jobsets.pm index 249d6aa4..572b5642 100644 --- a/src/lib/Hydra/Schema/Jobsets.pm +++ b/src/lib/Hydra/Schema/Jobsets.pm @@ -423,10 +423,7 @@ Related object: L __PACKAGE__->has_many( "builds", "Hydra::Schema::Builds", - { - "foreign.jobset" => "self.name", - "foreign.project" => "self.project", - }, + { "foreign.jobset_id" => "self.id" }, undef, ); From 66fbbd969251b96ef0a9958c3fa08ea32baa3808 Mon Sep 17 00:00:00 2001 From: Graham Christensen Date: Wed, 5 Feb 2020 18:27:03 -0500 Subject: [PATCH 19/52] Jobs.builds: Fetch via Jobs.jobset_id --- src/lib/Hydra/Schema/Jobs.pm | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/src/lib/Hydra/Schema/Jobs.pm b/src/lib/Hydra/Schema/Jobs.pm index b00f3e0f..714497f1 100644 --- a/src/lib/Hydra/Schema/Jobs.pm +++ b/src/lib/Hydra/Schema/Jobs.pm @@ -195,4 +195,22 @@ __PACKAGE__->has_many( # Created by DBIx::Class::Schema::Loader v0.07049 @ 2020-02-06 12:33:28 # DO NOT MODIFY THIS OR ANYTHING ABOVE! md5sum:C5Tyh8Ke4yC6q7KIFVOHcQ +=head2 builds + +Type: has_many + +Related object: L + +=cut + +__PACKAGE__->has_many( + "builds", + "Hydra::Schema::Builds", + { + "foreign.job" => "self.name", + "foreign.jobset_id" => "self.jobset_id", + }, + undef, +); + 1; From f0f41eaaff4474c8433f1eaf2924357cf95e015a Mon Sep 17 00:00:00 2001 From: Graham Christensen Date: Wed, 5 Feb 2020 17:02:56 -0500 Subject: [PATCH 20/52] LatestSucceededForJob{,set}: Filter with jobset_id --- src/lib/Hydra/Controller/Job.pm | 6 +++--- src/lib/Hydra/Controller/Jobset.pm | 2 +- src/lib/Hydra/Schema/Builds.pm | 4 ++-- src/script/hydra-eval-jobset | 2 +- 4 files changed, 7 insertions(+), 7 deletions(-) diff --git a/src/lib/Hydra/Controller/Job.pm b/src/lib/Hydra/Controller/Job.pm index 1b36e41f..bed362e6 100644 --- a/src/lib/Hydra/Controller/Job.pm +++ b/src/lib/Hydra/Controller/Job.pm @@ -82,7 +82,7 @@ sub overview : Chained('job') PathPart('') Args(0) { # If this is an aggregate job, then get its constituents. my @constituents = $c->model('DB::Builds')->search( { aggregate => { -in => $job->builds->search({}, { columns => ["id"], order_by => "id desc", rows => 15 })->as_query } }, - { join => 'aggregateconstituents_constituents', + { join => 'aggregateconstituents_constituents', columns => ['id', 'job', 'finished', 'buildstatus'], +select => ['aggregateconstituents_constituents.aggregate'], +as => ['aggregate'] @@ -99,7 +99,7 @@ sub overview : Chained('job') PathPart('') Args(0) { foreach my $agg (keys %$aggregates) { # FIXME: could be done in one query. - $aggregates->{$agg}->{build} = + $aggregates->{$agg}->{build} = $c->model('DB::Builds')->find({id => $agg}, {columns => [@buildListColumns]}) or die; } @@ -172,7 +172,7 @@ sub get_builds : Chained('job') PathPart('') CaptureArgs(0) { my ($self, $c) = @_; $c->stash->{allBuilds} = $c->stash->{job}->builds; $c->stash->{latestSucceeded} = $c->model('DB')->resultset('LatestSucceededForJob') - ->search({}, {bind => [$c->stash->{project}->name, $c->stash->{jobset}->name, $c->stash->{job}->name]}); + ->search({}, {bind => [$c->stash->{jobset}->name, $c->stash->{job}->name]}); $c->stash->{channelBaseName} = $c->stash->{project}->name . "-" . $c->stash->{jobset}->name . "-" . $c->stash->{job}->name; } diff --git a/src/lib/Hydra/Controller/Jobset.pm b/src/lib/Hydra/Controller/Jobset.pm index 75284c10..fdf5e1c3 100644 --- a/src/lib/Hydra/Controller/Jobset.pm +++ b/src/lib/Hydra/Controller/Jobset.pm @@ -162,7 +162,7 @@ sub get_builds : Chained('jobsetChain') PathPart('') CaptureArgs(0) { my ($self, $c) = @_; $c->stash->{allBuilds} = $c->stash->{jobset}->builds; $c->stash->{latestSucceeded} = $c->model('DB')->resultset('LatestSucceededForJobset') - ->search({}, {bind => [$c->stash->{project}->name, $c->stash->{jobset}->name]}); + ->search({}, {bind => [$c->stash->{jobset}->name]}); $c->stash->{channelBaseName} = $c->stash->{project}->name . "-" . $c->stash->{jobset}->name; } diff --git a/src/lib/Hydra/Schema/Builds.pm b/src/lib/Hydra/Schema/Builds.pm index 937bd5fc..fdbc5ed6 100644 --- a/src/lib/Hydra/Schema/Builds.pm +++ b/src/lib/Hydra/Schema/Builds.pm @@ -637,8 +637,8 @@ QUERY makeQueries('', ""); makeQueries('ForProject', "and project = ?"); -makeQueries('ForJobset', "and project = ? and jobset = ?"); -makeQueries('ForJob', "and project = ? and jobset = ? and job = ?"); +makeQueries('ForJobset', "and jobset_id = (select id from jobsets j where j.name = ?)"); +makeQueries('ForJob', "and jobset_id = (select id from jobsets j where j.name = ?) and job = ?"); my %hint = ( diff --git a/src/script/hydra-eval-jobset b/src/script/hydra-eval-jobset index d0cc3e8a..97da1084 100755 --- a/src/script/hydra-eval-jobset +++ b/src/script/hydra-eval-jobset @@ -143,7 +143,7 @@ sub fetchInputSystemBuild { $jobsetName ||= $jobset->name; my @latestBuilds = $db->resultset('LatestSucceededForJob') - ->search({}, {bind => [$projectName, $jobsetName, $jobName]}); + ->search({}, {bind => [$jobsetName, $jobName]}); my @validBuilds = (); foreach my $build (@latestBuilds) { From 2637a7ad76e67b956c835cb8c7c8a75437c9c31e Mon Sep 17 00:00:00 2001 From: Graham Christensen Date: Sun, 9 Feb 2020 15:48:11 -0500 Subject: [PATCH 21/52] Builds: index literally what latest-finished queries --- src/sql/hydra.sql | 1 + src/sql/upgrade-65.sql | 4 ++++ 2 files changed, 5 insertions(+) create mode 100644 src/sql/upgrade-65.sql diff --git a/src/sql/hydra.sql b/src/sql/hydra.sql index f1d143dc..1f4288f7 100644 --- a/src/sql/hydra.sql +++ b/src/sql/hydra.sql @@ -682,6 +682,7 @@ create index IndexBuildsOnProject on Builds(project); create index IndexBuildsOnTimestamp on Builds(timestamp); create index IndexBuildsOnFinishedStopTime on Builds(finished, stoptime DESC); create index IndexBuildsOnJobFinishedId on builds(project, jobset, job, system, finished, id DESC); +create index IndexFinishedSuccessfulBuilds on Builds(id DESC, buildstatus, finished, job, jobset_id) where buildstatus = 0 and finished = 1; create index IndexBuildsOnDrvPath on Builds(drvPath); create index IndexCachedHgInputsOnHash on CachedHgInputs(uri, branch, sha256hash); create index IndexCachedGitInputsOnHash on CachedGitInputs(uri, branch, sha256hash); diff --git a/src/sql/upgrade-65.sql b/src/sql/upgrade-65.sql new file mode 100644 index 00000000..05986887 --- /dev/null +++ b/src/sql/upgrade-65.sql @@ -0,0 +1,4 @@ +-- Index more exactly what the latest-finished query looks for. +create index IndexFinishedSuccessfulBuilds + on Builds(id DESC, buildstatus, finished, job, jobset_id) + where buildstatus = 0 and finished = 1; From 834793468f8f6667181bc0f74d5e00282c439ead Mon Sep 17 00:00:00 2001 From: Graham Christensen Date: Tue, 11 Feb 2020 08:35:35 -0500 Subject: [PATCH 22/52] fixup: d'oh, make the migrations from #710 part-2 sequential --- src/sql/upgrade-62.sql | 7 +++++++ src/sql/upgrade-63.sql | 4 ++-- src/sql/upgrade-64.sql | 11 ++++------- src/sql/upgrade-65.sql | 4 ---- 4 files changed, 13 insertions(+), 13 deletions(-) create mode 100644 src/sql/upgrade-62.sql delete mode 100644 src/sql/upgrade-65.sql diff --git a/src/sql/upgrade-62.sql b/src/sql/upgrade-62.sql new file mode 100644 index 00000000..a7476e68 --- /dev/null +++ b/src/sql/upgrade-62.sql @@ -0,0 +1,7 @@ +-- Make the Jobs.jobset_id column NOT NULL. If this upgrade fails, +-- either the admin didn't run the backfiller or there is a bug. If +-- the admin ran the backfiller and there are null columns, it is +-- very important to figure out where the nullable columns came from. + +ALTER TABLE Jobs + ALTER COLUMN jobset_id SET NOT NULL; diff --git a/src/sql/upgrade-63.sql b/src/sql/upgrade-63.sql index a7476e68..76afa9e7 100644 --- a/src/sql/upgrade-63.sql +++ b/src/sql/upgrade-63.sql @@ -1,7 +1,7 @@ --- Make the Jobs.jobset_id column NOT NULL. If this upgrade fails, +-- Make the Builds.jobset_id column NOT NULL. If this upgrade fails, -- either the admin didn't run the backfiller or there is a bug. If -- the admin ran the backfiller and there are null columns, it is -- very important to figure out where the nullable columns came from. -ALTER TABLE Jobs +ALTER TABLE Builds ALTER COLUMN jobset_id SET NOT NULL; diff --git a/src/sql/upgrade-64.sql b/src/sql/upgrade-64.sql index 76afa9e7..05986887 100644 --- a/src/sql/upgrade-64.sql +++ b/src/sql/upgrade-64.sql @@ -1,7 +1,4 @@ --- Make the Builds.jobset_id column NOT NULL. If this upgrade fails, --- either the admin didn't run the backfiller or there is a bug. If --- the admin ran the backfiller and there are null columns, it is --- very important to figure out where the nullable columns came from. - -ALTER TABLE Builds - ALTER COLUMN jobset_id SET NOT NULL; +-- Index more exactly what the latest-finished query looks for. +create index IndexFinishedSuccessfulBuilds + on Builds(id DESC, buildstatus, finished, job, jobset_id) + where buildstatus = 0 and finished = 1; diff --git a/src/sql/upgrade-65.sql b/src/sql/upgrade-65.sql deleted file mode 100644 index 05986887..00000000 --- a/src/sql/upgrade-65.sql +++ /dev/null @@ -1,4 +0,0 @@ --- Index more exactly what the latest-finished query looks for. -create index IndexFinishedSuccessfulBuilds - on Builds(id DESC, buildstatus, finished, job, jobset_id) - where buildstatus = 0 and finished = 1; From 027668f0dbf37ad07b9e28152fbfac092d4540d0 Mon Sep 17 00:00:00 2001 From: Graham Christensen Date: Tue, 11 Feb 2020 12:52:28 -0500 Subject: [PATCH 23/52] hydra.sql: add an index for slow queries in production These queries used to use (jobset, project) based indexes, and the addition of jobset_id makes most of those indexes unusable now. --- src/sql/hydra.sql | 1 + src/sql/upgrade-65.sql | 2 ++ 2 files changed, 3 insertions(+) create mode 100644 src/sql/upgrade-65.sql diff --git a/src/sql/hydra.sql b/src/sql/hydra.sql index 1f4288f7..8144dd30 100644 --- a/src/sql/hydra.sql +++ b/src/sql/hydra.sql @@ -682,6 +682,7 @@ create index IndexBuildsOnProject on Builds(project); create index IndexBuildsOnTimestamp on Builds(timestamp); create index IndexBuildsOnFinishedStopTime on Builds(finished, stoptime DESC); create index IndexBuildsOnJobFinishedId on builds(project, jobset, job, system, finished, id DESC); +create index IndexBuildsOnJobsetIdFinishedId on Builds(id DESC, finished, job, jobset_id); create index IndexFinishedSuccessfulBuilds on Builds(id DESC, buildstatus, finished, job, jobset_id) where buildstatus = 0 and finished = 1; create index IndexBuildsOnDrvPath on Builds(drvPath); create index IndexCachedHgInputsOnHash on CachedHgInputs(uri, branch, sha256hash); diff --git a/src/sql/upgrade-65.sql b/src/sql/upgrade-65.sql new file mode 100644 index 00000000..11d143e5 --- /dev/null +++ b/src/sql/upgrade-65.sql @@ -0,0 +1,2 @@ +-- Add an index like IndexBuildsOnJobFinishedId using jobset_id +create index IndexBuildsOnJobsetIdFinishedId on Builds(id DESC, finished, job, jobset_id); From 15187b059be8ea15423222d7fa81fa938737787e Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Sat, 15 Feb 2020 15:12:14 +0100 Subject: [PATCH 24/52] Remove hydra-eval-guile-jobs This hasn't been used in a long time (Guix uses its own CI system), and it probably doesn't work anymore. (cherry picked from commit 23c9ca3e94669087d463642baea0cf35a0b8d72f) --- .gitignore | 1 - configure.ac | 13 +- release.nix | 1 - src/lib/Hydra/Controller/Jobset.pm | 5 - src/script/Makefile.am | 6 +- src/script/hydra-eval-guile-jobs.in | 249 ---------------------------- src/script/hydra-eval-jobset | 77 +++------ 7 files changed, 28 insertions(+), 324 deletions(-) delete mode 100644 src/script/hydra-eval-guile-jobs.in diff --git a/.gitignore b/.gitignore index e53ae6a8..5663a9d6 100644 --- a/.gitignore +++ b/.gitignore @@ -15,7 +15,6 @@ Makefile.in /aclocal.m4 /missing /install-sh -/src/script/hydra-eval-guile-jobs /src/sql/hydra-postgresql.sql /src/sql/hydra-sqlite.sql /src/sql/tmp.sqlite diff --git a/configure.ac b/configure.ac index f992e49e..f99d4c9d 100644 --- a/configure.ac +++ b/configure.ac @@ -53,15 +53,6 @@ fi PKG_CHECK_MODULES([NIX], [nix-main nix-expr nix-store]) -PKG_CHECK_MODULES([GUILE], [guile-2.0], [HAVE_GUILE=yes], [HAVE_GUILE=no]) - -if test "x$HAVE_GUILE" = xyes; then - AC_PATH_PROG([GUILE], [guile]) -else - GUILE="guile" -fi -AC_SUBST([GUILE]) - testPath="$(dirname $(type -p expr))" AC_SUBST(testPath) @@ -80,13 +71,11 @@ AC_CONFIG_FILES([ src/lib/Makefile src/root/Makefile src/script/Makefile - src/script/hydra-eval-guile-jobs tests/Makefile tests/jobs/config.nix ]) -AC_CONFIG_COMMANDS([executable-scripts], - [chmod +x src/script/hydra-eval-guile-jobs]) +AC_CONFIG_COMMANDS([executable-scripts], []) AC_CONFIG_HEADER([hydra-config.h]) diff --git a/release.nix b/release.nix index 408e3f69..cf17657e 100644 --- a/release.nix +++ b/release.nix @@ -129,7 +129,6 @@ rec { buildInputs = [ makeWrapper autoconf automake libtool unzip nukeReferences pkgconfig sqlite libpqxx gitAndTools.topGit mercurial darcs subversion bazaar openssl bzip2 libxslt - guile # optional, for Guile + Guix support perlDeps perl nix postgresql95 # for running the tests boost diff --git a/src/lib/Hydra/Controller/Jobset.pm b/src/lib/Hydra/Controller/Jobset.pm index fdf5e1c3..91e21dd4 100644 --- a/src/lib/Hydra/Controller/Jobset.pm +++ b/src/lib/Hydra/Controller/Jobset.pm @@ -223,11 +223,6 @@ sub updateJobset { error($c, "Cannot rename jobset to ‘$jobsetName’ since that identifier is already taken.") if $jobsetName ne $oldName && defined $c->stash->{project}->jobsets->find({ name => $jobsetName }); - # When the expression is in a .scm file, assume it's a Guile + Guix - # build expression. - my $exprType = - $c->stash->{params}->{"nixexprpath"} =~ /.scm$/ ? "guile" : "nix"; - my ($nixExprPath, $nixExprInput) = nixExprPathFromParams $c; my $enabled = int($c->stash->{params}->{enabled}); diff --git a/src/script/Makefile.am b/src/script/Makefile.am index 9deb6f29..466d3153 100644 --- a/src/script/Makefile.am +++ b/src/script/Makefile.am @@ -1,6 +1,5 @@ EXTRA_DIST = \ - $(distributable_scripts) \ - hydra-eval-guile-jobs.in + $(distributable_scripts) distributable_scripts = \ hydra-backfill-ids \ @@ -17,5 +16,4 @@ distributable_scripts = \ nix-prefetch-hg bin_SCRIPTS = \ - $(distributable_scripts) \ - hydra-eval-guile-jobs + $(distributable_scripts) diff --git a/src/script/hydra-eval-guile-jobs.in b/src/script/hydra-eval-guile-jobs.in deleted file mode 100644 index 8c5df125..00000000 --- a/src/script/hydra-eval-guile-jobs.in +++ /dev/null @@ -1,249 +0,0 @@ -#!/bin/sh -# Aside from this initial boilerplate, this is actually -*- scheme -*- code. -main="(module-ref (resolve-interface '(hydra-eval-guile-jobs)) 'eval-guile-jobs)" - -# Keep the host's GUILE_LOAD_PATH unchanged to allow the installed Guix to -# be used. This moves Guix modules possibly out of control, but solves -# bootstrapping issues. -# -# Use `--fresh-auto-compile' to ignore any available .go, and force -# recompilation. This is because checkouts in the store has mtime set to -# the epoch, and thus .go files look newer, even though they may not -# correspond. - -exec ${GUILE:-@GUILE@} --no-auto-compile --fresh-auto-compile \ - -l "$0" -c "(apply $main (cdr (command-line)))" "$@" -!# -;;; Copyright © 2012, 2013, 2014 Ludovic Courtès -;;; -;;; This file is part of Hydra. -;;; -;;; Hydra is free software: you can redistribute it and/or modify -;;; it under the terms of the GNU General Public License as published by -;;; the Free Software Foundation, either version 3 of the License, or -;;; (at your option) any later version. -;;; -;;; Hydra is distributed in the hope that it will be useful, -;;; but WITHOUT ANY WARRANTY; without even the implied warranty of -;;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -;;; GNU General Public License for more details. -;;; -;;; You should have received a copy of the GNU General Public License -;;; along with Hydra. If not, see . - -(define-module (hydra-eval-guile-jobs) - #:use-module (sxml simple) - #:use-module (ice-9 match) - #:use-module (ice-9 regex) - #:use-module (srfi srfi-1) - #:use-module (srfi srfi-11) - #:export (job-evaluations->xml - eval-guile-jobs)) - -(define (guix-variable module name) - "Dynamically link variable NAME under Guix module MODULE and return it. -Note: this is used instead of `@', because when using `@' in an uncompiled -file, Guile tries to load the module directly as it reads the source, which -fails in our case, leading to the creation of empty (guix ...) modules." - ;; TODO: fail with an XML error description - (let ((m (resolve-interface `(guix ,module)))) - (module-ref m name))) - -(define (%derivation-system drv) - ;; XXX: Awful hack to workaround the fact that `derivation-system', which - ;; is a macro, cannot be referred to dynamically. - (struct-ref drv 3)) - -(define strip-store-path - (let* ((store (or (getenv "NIX_STORE_DIR") "/nix/store")) - (store-path-rx - (make-regexp (string-append "^.*" (regexp-quote store) - "/[^-]+-(.+)$")))) - (lambda (path) - (or (and=> (regexp-exec store-path-rx path) - (lambda (match) - (let ((path (match:substring match 1))) - path))) - path)))) - -(define (derivation-path->name drv) - "Return the base name of DRV, sans hash and `.drv' extension." - (let ((d (strip-store-path drv))) - (if (string-suffix? ".drv" d) - (string-drop-right d 4) - d))) - -(define (register-gc-root drv roots-dir) - "Register a permanent garbage collector root under ROOTS-DIR for DRV." - (let ((root (string-append roots-dir "/" (basename drv)))) - (unless (file-exists? root) - (symlink drv root)))) - -(define* (job-evaluations->sxml jobs - #:key gc-roots-dir) - "Return the hydra-eval-jobs SXML form for the result of JOBS, a list of -symbol/thunk pairs." - `(*TOP* - (*PI* xml "version='1.0' encoding='utf-8'") - "\n" - (jobs "\n" - ,@(map (match-lambda - (((? symbol? name) . (? thunk? thunk)) - (let* ((result (save-module-excursion - (lambda () - (set-current-module %user-module) - (with-output-to-port (%make-void-port "w") - thunk)))) - (drv (assoc-ref result 'derivation))) - (define (opt-attr xml-name name) - (match (assoc name result) - ((_ . value) - `((,xml-name ,value))) - (_ - '()))) - - (when gc-roots-dir - ;; Register DRV as a GC root so that it's not collected by - ;; the time 'hydra-queue-runner' attempts to build it. - (register-gc-root drv gc-roots-dir)) - - ;; XXX: Add tags? - `(job (@ (jobName ,name) - (drvPath ,drv) - ,@(opt-attr 'homepage 'home-page) - (license - ,(let loop ((license (assoc-ref result 'license))) - (match license - ((? struct?) - (struct-ref license 0)) - ((l ...) - (string-join (map loop l))) - (_ "")))) - ,@(opt-attr 'description 'description) - (maintainers - ,(string-join (or (assoc-ref result 'maintainers) - '()) - ", ")) - (maxSilent - ,(number->string (or (assoc-ref result - 'max-silent-time) - 3600))) - (timeout - ,(number->string (or (assoc-ref result 'timeout) - 72000))) - (nixName ,(derivation-path->name drv)) - (schedulingPriority - ,(number->string (or (assoc-ref result - 'scheduling-priority) - 10))) - (system - ,(call-with-input-file drv - (compose %derivation-system - (guix-variable 'derivations - 'read-derivation))))) - ;; Resolve Guix modules lazily. - ,(map (match-lambda - ((name . path) - `(output (@ (name ,name) (path ,path))))) - ((guix-variable 'derivations - 'derivation-path->output-paths) - drv)) - - "\n")))) - jobs)))) - -(define* (job-evaluations->xml jobs port - #:key gc-roots-dir) - (set-port-encoding! port "UTF-8") - (sxml->xml (job-evaluations->sxml jobs #:gc-roots-dir gc-roots-dir) - port)) - - -;;; -;;; Command-line entry point. -;;; - -(define (parse-arguments args) - "Traverse ARGS, a list of command-line arguments compatible with -`hydra-eval-jobs', and return the name of the file that defines the jobs, an -expression that returns the entry point in that file (a unary procedure), the -list of name/value pairs passed to that entry point, as well as a GC root -directory or #f." - (define (module-directory dir) - (let ((d (string-append dir "/share/guile/site/2.0"))) - (if (file-exists? d) - d - dir))) - - (let loop ((args args) - (result '()) - (file #f) - (entry 'hydra-jobs) - (roots-dir #f)) - (match args - (() - (if (not file) - (error "hydra-eval-guile-jobs: no expression file given") - (values file entry (reverse result) roots-dir))) - (("-I" name=dir rest ...) - (let* ((dir (match (string-tokenize name=dir - (char-set-complement (char-set - #\=))) - ((_ dir) dir) - ((dir) dir))) - (dir* (module-directory dir))) - (format (current-error-port) "adding `~a' to the load path~%" dir*) - (set! %load-path (cons dir* %load-path)) - (set! %load-compiled-path (cons dir* %load-compiled-path))) - (loop rest result file entry roots-dir)) - (("--argstr" name value rest ...) - (loop rest (alist-cons (string->symbol name) value result) - file entry roots-dir)) - (("--arg" name expr rest ...) - (let ((value (eval (call-with-input-string expr read) - (current-module)))) - (loop rest (alist-cons (string->symbol name) value result) - file entry roots-dir))) - (("--gc-roots-dir" dir rest ...) - (loop rest result file entry dir)) - (("-j" _ rest ...) ; XXX: what's this? - (loop rest result file entry roots-dir)) - (("--entry" expr rest ...) ; entry point, like `guile -e' - (let ((expr (call-with-input-string expr read))) - (loop rest result file expr roots-dir))) - ((file rest ...) ; source file that defines the jobs - (loop rest result file entry roots-dir)) - (_ - (error "hydra-eval-guile-jobs: invalid arguments" args))))) - -(define %user-module - ;; Hydra user module. - ;; TODO: Make it a sandbox. - (let ((m (make-module))) - (beautify-user-module! m) - m)) - -(define (eval-guile-jobs . args) - (setlocale LC_ALL "") - - (let-values (((file entry args gc-roots-dir) - (parse-arguments args))) - - (save-module-excursion - (lambda () - (set-current-module %user-module) - - ;; The standard output must contain only XML. - (with-output-to-port (%make-void-port "w") - (lambda () - (primitive-load file))))) - - (let* ((entry (eval entry %user-module)) - (store ((guix-variable 'store 'open-connection))) - (jobs (entry store args))) - (unless (string? gc-roots-dir) - (format (current-error-port) - "warning: --gc-roots-dir not specified~%")) - - (job-evaluations->xml jobs (current-output-port) - #:gc-roots-dir gc-roots-dir)))) diff --git a/src/script/hydra-eval-jobset b/src/script/hydra-eval-jobset index 97da1084..6ebd59f1 100755 --- a/src/script/hydra-eval-jobset +++ b/src/script/hydra-eval-jobset @@ -264,53 +264,31 @@ sub fetchInput { sub booleanToString { - my ($exprType, $value) = @_; - my $result; - if ($exprType eq "guile") { - if ($value eq "true") { - $result = "#t"; - } else { - $result = "#f"; - } - $result = $value; - } else { - $result = $value; - } - return $result; + my ($value) = @_; + return $value; } sub buildInputToString { - my ($exprType, $input) = @_; - my $result; - if ($exprType eq "guile") { - $result = "'((file-name . \"" . ${input}->{storePath} . "\")" . - (defined $input->{revision} ? "(revision . \"" . $input->{revision} . "\")" : "") . - (defined $input->{revCount} ? "(revision-count . " . $input->{revCount} . ")" : "") . - (defined $input->{gitTag} ? "(git-tag . \"" . $input->{gitTag} . "\")" : "") . - (defined $input->{shortRev} ? "(short-revision . \"" . $input->{shortRev} . "\")" : "") . - (defined $input->{version} ? "(version . \"" . $input->{version} . "\")" : "") . - ")"; - } else { - $result = "{ outPath = builtins.storePath " . $input->{storePath} . "" . - "; inputType = \"" . $input->{type} . "\"" . - (defined $input->{uri} ? "; uri = \"" . $input->{uri} . "\"" : "") . - (defined $input->{revNumber} ? "; rev = " . $input->{revNumber} . "" : "") . - (defined $input->{revision} ? "; rev = \"" . $input->{revision} . "\"" : "") . - (defined $input->{revCount} ? "; revCount = " . $input->{revCount} . "" : "") . - (defined $input->{gitTag} ? "; gitTag = \"" . $input->{gitTag} . "\"" : "") . - (defined $input->{shortRev} ? "; shortRev = \"" . $input->{shortRev} . "\"" : "") . - (defined $input->{version} ? "; version = \"" . $input->{version} . "\"" : "") . - (defined $input->{outputName} ? "; outputName = \"" . $input->{outputName} . "\"" : "") . - (defined $input->{drvPath} ? "; drvPath = builtins.storePath " . $input->{drvPath} . "" : "") . - ";}"; - } - return $result; + my ($input) = @_; + return + "{ outPath = builtins.storePath " . $input->{storePath} . "" . + "; inputType = \"" . $input->{type} . "\"" . + (defined $input->{uri} ? "; uri = \"" . $input->{uri} . "\"" : "") . + (defined $input->{revNumber} ? "; rev = " . $input->{revNumber} . "" : "") . + (defined $input->{revision} ? "; rev = \"" . $input->{revision} . "\"" : "") . + (defined $input->{revCount} ? "; revCount = " . $input->{revCount} . "" : "") . + (defined $input->{gitTag} ? "; gitTag = \"" . $input->{gitTag} . "\"" : "") . + (defined $input->{shortRev} ? "; shortRev = \"" . $input->{shortRev} . "\"" : "") . + (defined $input->{version} ? "; version = \"" . $input->{version} . "\"" : "") . + (defined $input->{outputName} ? "; outputName = \"" . $input->{outputName} . "\"" : "") . + (defined $input->{drvPath} ? "; drvPath = builtins.storePath " . $input->{drvPath} . "" : "") . + ";}"; } sub inputsToArgs { - my ($inputInfo, $exprType) = @_; + my ($inputInfo) = @_; my @res = (); foreach my $input (sort keys %{$inputInfo}) { @@ -327,14 +305,12 @@ sub inputsToArgs { push @res, "--argstr", $input, $alt->{value}; } elsif ($alt->{type} eq "boolean") { - push @res, "--arg", $input, booleanToString($exprType, $alt->{value}); + push @res, "--arg", $input, booleanToString($alt->{value}); } elsif ($alt->{type} eq "nix") { - die "input type ‘nix’ only supported for Nix-based jobsets\n" unless $exprType eq "nix"; push @res, "--arg", $input, $alt->{value}; } elsif ($alt->{type} eq "eval") { - die "input type ‘eval’ only supported for Nix-based jobsets\n" unless $exprType eq "nix"; my $s = "{ "; # FIXME: escape $_. But dots should not be escaped. $s .= "$_ = builtins.storePath ${\$alt->{jobs}->{$_}}; " @@ -343,7 +319,7 @@ sub inputsToArgs { push @res, "--arg", $input, $s; } else { - push @res, "--arg", $input, buildInputToString($exprType, $alt); + push @res, "--arg", $input, buildInputToString($alt); } } @@ -352,18 +328,16 @@ sub inputsToArgs { sub evalJobs { - my ($inputInfo, $exprType, $nixExprInputName, $nixExprPath) = @_; + my ($inputInfo, $nixExprInputName, $nixExprPath) = @_; my $nixExprInput = $inputInfo->{$nixExprInputName}->[0] or die "cannot find the input containing the job expression\n"; - my $evaluator = ($exprType eq "guile") ? "hydra-eval-guile-jobs" : "hydra-eval-jobs"; - - my @cmd = ($evaluator, + my @cmd = ("hydra-eval-jobs", "<" . $nixExprInputName . "/" . $nixExprPath . ">", "--gc-roots-dir", getGCRootsDir, "-j", 1, - inputsToArgs($inputInfo, $exprType)); + inputsToArgs($inputInfo)); if (defined $ENV{'HYDRA_DEBUG'}) { sub escape { @@ -376,7 +350,7 @@ sub evalJobs { } (my $res, my $jobsJSON, my $stderr) = captureStdoutStderr(21600, @cmd); - die "$evaluator returned " . ($res & 127 ? "signal $res" : "exit code " . ($res >> 8)) + die "hydra-eval-jobs returned " . ($res & 127 ? "signal $res" : "exit code " . ($res >> 8)) . ":\n" . ($stderr ? decode("utf-8", $stderr) : "(no output)\n") if $res; @@ -595,7 +569,6 @@ sub checkJobsetWrapped { $jobset->discard_changes; $inputInfo->{"declInput"} = [ $declInput ]; } - my $exprType = $jobset->nixexprpath =~ /.scm$/ ? "guile" : "nix"; # Fetch all values for all inputs. my $checkoutStart = clock_gettime(CLOCK_MONOTONIC); @@ -621,7 +594,7 @@ sub checkJobsetWrapped { # Hash the arguments to hydra-eval-jobs and check the # JobsetInputHashes to see if the previous evaluation had the same # inputs. If so, bail out. - my @args = ($jobset->nixexprinput, $jobset->nixexprpath, inputsToArgs($inputInfo, $exprType)); + my @args = ($jobset->nixexprinput, $jobset->nixexprpath, inputsToArgs($inputInfo)); my $argsHash = sha256_hex("@args"); my $prevEval = getPrevJobsetEval($db, $jobset, 0); if (defined $prevEval && $prevEval->hash eq $argsHash && !$dryRun && !$jobset->forceeval) { @@ -636,7 +609,7 @@ sub checkJobsetWrapped { # Evaluate the job expression. my $evalStart = clock_gettime(CLOCK_MONOTONIC); - my ($jobs, $nixExprInput) = evalJobs($inputInfo, $exprType, $jobset->nixexprinput, $jobset->nixexprpath); + my ($jobs, $nixExprInput) = evalJobs($inputInfo, $jobset->nixexprinput, $jobset->nixexprpath); my $evalStop = clock_gettime(CLOCK_MONOTONIC); if ($jobsetsJobset) { From 2a50daa3774c521a08c0443242ee3f80715a5189 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Mon, 17 Feb 2020 16:33:25 +0100 Subject: [PATCH 25/52] Update aggregate handling (cherry picked from commit cf961ac8933b76d9f0a3ac3eb49cc08879f5bcc9) --- src/script/hydra-eval-jobset | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/script/hydra-eval-jobset b/src/script/hydra-eval-jobset index 6ebd59f1..cdb09b74 100755 --- a/src/script/hydra-eval-jobset +++ b/src/script/hydra-eval-jobset @@ -697,7 +697,7 @@ sub checkJobsetWrapped { foreach my $job (values %{$jobs}) { next unless $job->{constituents}; my $x = $drvPathToId{$job->{drvPath}} or die; - foreach my $drvPath (split / /, $job->{constituents}) { + foreach my $drvPath (@{$job->{constituents}}) { my $constituent = $drvPathToId{$drvPath}; if (defined $constituent) { $db->resultset('AggregateConstituents')->update_or_create({aggregate => $x->{id}, constituent => $constituent->{id}}); From b790a007296db1994a514b68d63694f24a39137e Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Wed, 8 May 2019 21:22:52 +0200 Subject: [PATCH 26/52] Disable deprecation warnings (cherry picked from commit 950e8bef6cd90befcf14e96826053d1d154e39fe) --- src/hydra-evaluator/Makefile.am | 2 +- src/hydra-queue-runner/Makefile.am | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/hydra-evaluator/Makefile.am b/src/hydra-evaluator/Makefile.am index 161f8567..73638cfe 100644 --- a/src/hydra-evaluator/Makefile.am +++ b/src/hydra-evaluator/Makefile.am @@ -2,4 +2,4 @@ bin_PROGRAMS = hydra-evaluator hydra_evaluator_SOURCES = hydra-evaluator.cc hydra_evaluator_LDADD = $(NIX_LIBS) -lpqxx -hydra_evaluator_CXXFLAGS = $(NIX_CFLAGS) -Wall -I ../libhydra +hydra_evaluator_CXXFLAGS = $(NIX_CFLAGS) -Wall -I ../libhydra -Wno-deprecated-declarations diff --git a/src/hydra-queue-runner/Makefile.am b/src/hydra-queue-runner/Makefile.am index b360faed..1726d0df 100644 --- a/src/hydra-queue-runner/Makefile.am +++ b/src/hydra-queue-runner/Makefile.am @@ -4,4 +4,4 @@ hydra_queue_runner_SOURCES = hydra-queue-runner.cc queue-monitor.cc dispatcher.c builder.cc build-result.cc build-remote.cc \ build-result.hh counter.hh token-server.hh state.hh db.hh hydra_queue_runner_LDADD = $(NIX_LIBS) -lpqxx -hydra_queue_runner_CXXFLAGS = $(NIX_CFLAGS) -Wall -I ../libhydra +hydra_queue_runner_CXXFLAGS = $(NIX_CFLAGS) -Wall -I ../libhydra -Wno-deprecated-declarations From e4f5156c414050274a9ea6db4eab9a2f334a79d5 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Mon, 30 Dec 2019 22:49:26 +0100 Subject: [PATCH 27/52] Build against nix-master (cherry picked from commit e7f2139e251cb73195eea6fb84e2a6167b4db968) --- src/hydra-eval-jobs/Makefile.am | 2 +- src/hydra-eval-jobs/hydra-eval-jobs.cc | 5 +- src/hydra-queue-runner/Makefile.am | 2 +- src/hydra-queue-runner/build-remote.cc | 76 +++++++++-------- src/hydra-queue-runner/build-result.cc | 28 +++---- src/hydra-queue-runner/builder.cc | 44 +++++----- src/hydra-queue-runner/dispatcher.cc | 4 +- src/hydra-queue-runner/hydra-queue-runner.cc | 29 ++++--- src/hydra-queue-runner/queue-monitor.cc | 87 +++++++++++--------- src/hydra-queue-runner/state.hh | 20 ++--- src/hydra-queue-runner/token-server.hh | 2 +- src/libhydra/db.hh | 2 +- src/libhydra/hydra-config.hh | 4 +- 13 files changed, 166 insertions(+), 139 deletions(-) diff --git a/src/hydra-eval-jobs/Makefile.am b/src/hydra-eval-jobs/Makefile.am index 7a4e9c91..b41a4eb8 100644 --- a/src/hydra-eval-jobs/Makefile.am +++ b/src/hydra-eval-jobs/Makefile.am @@ -1,5 +1,5 @@ bin_PROGRAMS = hydra-eval-jobs hydra_eval_jobs_SOURCES = hydra-eval-jobs.cc -hydra_eval_jobs_LDADD = $(NIX_LIBS) +hydra_eval_jobs_LDADD = $(NIX_LIBS) -lnixrust hydra_eval_jobs_CXXFLAGS = $(NIX_CFLAGS) -I ../libhydra diff --git a/src/hydra-eval-jobs/hydra-eval-jobs.cc b/src/hydra-eval-jobs/hydra-eval-jobs.cc index ce6967b5..8abdea7e 100644 --- a/src/hydra-eval-jobs/hydra-eval-jobs.cc +++ b/src/hydra-eval-jobs/hydra-eval-jobs.cc @@ -147,8 +147,9 @@ static void findJobsWrapped(EvalState & state, JSONObject & top, done. */ auto localStore = state.store.dynamic_pointer_cast(); if (gcRootsDir != "" && localStore) { - Path root = gcRootsDir + "/" + baseNameOf(drvPath); - if (!pathExists(root)) localStore->addPermRoot(drvPath, root, false); + Path root = gcRootsDir + "/" + std::string(baseNameOf(drvPath)); + if (!pathExists(root)) + localStore->addPermRoot(localStore->parseStorePath(drvPath), root, false); } auto res2 = res.object("outputs"); diff --git a/src/hydra-queue-runner/Makefile.am b/src/hydra-queue-runner/Makefile.am index 1726d0df..af95a3f9 100644 --- a/src/hydra-queue-runner/Makefile.am +++ b/src/hydra-queue-runner/Makefile.am @@ -3,5 +3,5 @@ bin_PROGRAMS = hydra-queue-runner hydra_queue_runner_SOURCES = hydra-queue-runner.cc queue-monitor.cc dispatcher.cc \ builder.cc build-result.cc build-remote.cc \ build-result.hh counter.hh token-server.hh state.hh db.hh -hydra_queue_runner_LDADD = $(NIX_LIBS) -lpqxx +hydra_queue_runner_LDADD = $(NIX_LIBS) -lpqxx -lnixrust hydra_queue_runner_CXXFLAGS = $(NIX_CFLAGS) -Wall -I ../libhydra -Wno-deprecated-declarations diff --git a/src/hydra-queue-runner/build-remote.cc b/src/hydra-queue-runner/build-remote.cc index 81692849..6070bd4e 100644 --- a/src/hydra-queue-runner/build-remote.cc +++ b/src/hydra-queue-runner/build-remote.cc @@ -82,10 +82,10 @@ static void openConnection(Machine::ptr machine, Path tmpDir, int stderrFD, Chil static void copyClosureTo(std::timed_mutex & sendMutex, ref destStore, - FdSource & from, FdSink & to, const PathSet & paths, + FdSource & from, FdSink & to, const StorePathSet & paths, bool useSubstitutes = false) { - PathSet closure; + StorePathSet closure; for (auto & path : paths) destStore->computeFSClosure(path, closure); @@ -94,20 +94,21 @@ static void copyClosureTo(std::timed_mutex & sendMutex, ref destStore, garbage-collect paths that are already there. Optionally, ask the remote host to substitute missing paths. */ // FIXME: substitute output pollutes our build log - to << cmdQueryValidPaths << 1 << useSubstitutes << closure; + to << cmdQueryValidPaths << 1 << useSubstitutes; + writeStorePaths(*destStore, to, closure); to.flush(); /* Get back the set of paths that are already valid on the remote host. */ - auto present = readStorePaths(*destStore, from); + auto present = readStorePaths(*destStore, from); if (present.size() == closure.size()) return; - Paths sorted = destStore->topoSortPaths(closure); + auto sorted = destStore->topoSortPaths(closure); - Paths missing; + StorePathSet missing; for (auto i = sorted.rbegin(); i != sorted.rend(); ++i) - if (present.find(*i) == present.end()) missing.push_back(*i); + if (!present.count(*i)) missing.insert(i->clone()); printMsg(lvlDebug, format("sending %1% missing paths") % missing.size()); @@ -131,7 +132,7 @@ void State::buildRemote(ref destStore, { assert(BuildResult::TimedOut == 8); - string base = baseNameOf(step->drvPath); + string base(step->drvPath.to_string()); result.logFile = logDir + "/" + string(base, 0, 2) + "/" + string(base, 2); AutoDelete autoDelete(result.logFile, false); @@ -217,22 +218,22 @@ void State::buildRemote(ref destStore, outputs of the input derivations. */ updateStep(ssSendingInputs); - PathSet inputs; - BasicDerivation basicDrv(step->drv); + StorePathSet inputs; + BasicDerivation basicDrv(*step->drv); if (sendDerivation) - inputs.insert(step->drvPath); + inputs.insert(step->drvPath.clone()); else - for (auto & p : step->drv.inputSrcs) - inputs.insert(p); + for (auto & p : step->drv->inputSrcs) + inputs.insert(p.clone()); - for (auto & input : step->drv.inputDrvs) { - Derivation drv2 = readDerivation(input.first); + for (auto & input : step->drv->inputDrvs) { + Derivation drv2 = readDerivation(*localStore, localStore->printStorePath(input.first)); for (auto & name : input.second) { auto i = drv2.outputs.find(name); if (i == drv2.outputs.end()) continue; - inputs.insert(i->second.path); - basicDrv.inputSrcs.insert(i->second.path); + inputs.insert(i->second.path.clone()); + basicDrv.inputSrcs.insert(i->second.path.clone()); } } @@ -241,14 +242,15 @@ void State::buildRemote(ref destStore, this will copy the inputs to the binary cache from the local store. */ if (localStore != std::shared_ptr(destStore)) - copyClosure(ref(localStore), destStore, step->drv.inputSrcs, NoRepair, NoCheckSigs); + copyClosure(ref(localStore), destStore, step->drv->inputSrcs, NoRepair, NoCheckSigs); /* Copy the input closure. */ if (!machine->isLocalhost()) { auto mc1 = std::make_shared>(nrStepsWaiting); mc1.reset(); MaintainCount mc2(nrStepsCopyingTo); - printMsg(lvlDebug, format("sending closure of ‘%1%’ to ‘%2%’") % step->drvPath % machine->sshName); + printMsg(lvlDebug, "sending closure of ‘%s’ to ‘%s’", + localStore->printStorePath(step->drvPath), machine->sshName); auto now1 = std::chrono::steady_clock::now(); @@ -272,14 +274,19 @@ void State::buildRemote(ref destStore, logFD = -1; /* Do the build. */ - printMsg(lvlDebug, format("building ‘%1%’ on ‘%2%’") % step->drvPath % machine->sshName); + printMsg(lvlDebug, "building ‘%s’ on ‘%s’", + localStore->printStorePath(step->drvPath), + machine->sshName); updateStep(ssBuilding); - if (sendDerivation) - to << cmdBuildPaths << PathSet({step->drvPath}); - else - to << cmdBuildDerivation << step->drvPath << basicDrv; + if (sendDerivation) { + to << cmdBuildPaths; + writeStorePaths(*localStore, to, singleton(step->drvPath)); + } else { + to << cmdBuildDerivation << localStore->printStorePath(step->drvPath); + writeDerivation(to, *localStore, basicDrv); + } to << maxSilentTime << buildTimeout; if (GET_PROTOCOL_MINOR(remoteVersion) >= 2) to << maxLogSize; @@ -380,7 +387,8 @@ void State::buildRemote(ref destStore, /* If the path was substituted or already valid, then we didn't get a build log. */ if (result.isCached) { - printMsg(lvlInfo, format("outputs of ‘%1%’ substituted or already valid on ‘%2%’") % step->drvPath % machine->sshName); + printMsg(lvlInfo, "outputs of ‘%s’ substituted or already valid on ‘%s’", + localStore->printStorePath(step->drvPath), machine->sshName); unlink(result.logFile.c_str()); result.logFile = ""; } @@ -395,13 +403,12 @@ void State::buildRemote(ref destStore, auto now1 = std::chrono::steady_clock::now(); - PathSet outputs; - for (auto & output : step->drv.outputs) - outputs.insert(output.second.path); + auto outputs = step->drv->outputPaths(); /* Query the size of the output paths. */ size_t totalNarSize = 0; - to << cmdQueryPathInfos << outputs; + to << cmdQueryPathInfos; + writeStorePaths(*localStore, to, outputs); to.flush(); while (true) { if (readString(from) == "") break; @@ -416,8 +423,8 @@ void State::buildRemote(ref destStore, return; } - printMsg(lvlDebug, format("copying outputs of ‘%s’ from ‘%s’ (%d bytes)") - % step->drvPath % machine->sshName % totalNarSize); + printMsg(lvlDebug, "copying outputs of ‘%s’ from ‘%s’ (%d bytes)", + localStore->printStorePath(step->drvPath), machine->sshName, totalNarSize); /* Block until we have the required amount of memory available, which is twice the NAR size (namely the @@ -431,10 +438,11 @@ void State::buildRemote(ref destStore, auto resMs = std::chrono::duration_cast(resStop - resStart).count(); if (resMs >= 1000) - printMsg(lvlError, format("warning: had to wait %d ms for %d memory tokens for %s") - % resMs % totalNarSize % step->drvPath); + printMsg(lvlError, "warning: had to wait %d ms for %d memory tokens for %s", + resMs, totalNarSize, localStore->printStorePath(step->drvPath)); - to << cmdExportPaths << 0 << outputs; + to << cmdExportPaths << 0; + writeStorePaths(*localStore, to, outputs); to.flush(); destStore->importPaths(from, result.accessor, NoCheckSigs); diff --git a/src/hydra-queue-runner/build-result.cc b/src/hydra-queue-runner/build-result.cc index 25e8c41a..cd8f0a39 100644 --- a/src/hydra-queue-runner/build-result.cc +++ b/src/hydra-queue-runner/build-result.cc @@ -14,16 +14,14 @@ BuildOutput getBuildOutput(nix::ref store, BuildOutput res; /* Compute the closure size. */ - PathSet outputs; - for (auto & output : drv.outputs) - outputs.insert(output.second.path); - PathSet closure; + auto outputs = drv.outputPaths(); + StorePathSet closure; for (auto & output : outputs) - store->computeFSClosure(output, closure); + store->computeFSClosure(singleton(output), closure); for (auto & path : closure) { auto info = store->queryPathInfo(path); res.closureSize += info->narSize; - if (outputs.find(path) != outputs.end()) res.size += info->narSize; + if (outputs.count(path)) res.size += info->narSize; } /* Get build products. */ @@ -39,11 +37,13 @@ BuildOutput getBuildOutput(nix::ref store, , std::regex::extended); for (auto & output : outputs) { - Path failedFile = output + "/nix-support/failed"; + auto outputS = store->printStorePath(output); + + Path failedFile = outputS + "/nix-support/failed"; if (accessor->stat(failedFile).type == FSAccessor::Type::tRegular) res.failed = true; - Path productsFile = output + "/nix-support/hydra-build-products"; + Path productsFile = outputS + "/nix-support/hydra-build-products"; if (accessor->stat(productsFile).type != FSAccessor::Type::tRegular) continue; @@ -72,7 +72,7 @@ BuildOutput getBuildOutput(nix::ref store, auto st = accessor->stat(product.path); if (st.type == FSAccessor::Type::tMissing) continue; - product.name = product.path == output ? "" : baseNameOf(product.path); + product.name = product.path == store->printStorePath(output) ? "" : baseNameOf(product.path); if (st.type == FSAccessor::Type::tRegular) { product.isRegular = true; @@ -91,14 +91,14 @@ BuildOutput getBuildOutput(nix::ref store, if (!explicitProducts) { for (auto & output : drv.outputs) { BuildProduct product; - product.path = output.second.path; + product.path = store->printStorePath(output.second.path); product.type = "nix-build"; product.subtype = output.first == "out" ? "" : output.first; - product.name = storePathToName(product.path); + product.name = output.second.path.name(); auto st = accessor->stat(product.path); if (st.type == FSAccessor::Type::tMissing) - throw Error(format("getting status of ‘%1%’") % product.path); + throw Error("getting status of ‘%s’", product.path); if (st.type == FSAccessor::Type::tDirectory) res.products.push_back(product); } @@ -106,7 +106,7 @@ BuildOutput getBuildOutput(nix::ref store, /* Get the release name from $output/nix-support/hydra-release-name. */ for (auto & output : outputs) { - Path p = output + "/nix-support/hydra-release-name"; + auto p = store->printStorePath(output) + "/nix-support/hydra-release-name"; if (accessor->stat(p).type != FSAccessor::Type::tRegular) continue; try { res.releaseName = trim(accessor->readFile(p)); @@ -116,7 +116,7 @@ BuildOutput getBuildOutput(nix::ref store, /* Get metrics. */ for (auto & output : outputs) { - Path metricsFile = output + "/nix-support/hydra-metrics"; + auto metricsFile = store->printStorePath(output) + "/nix-support/hydra-metrics"; if (accessor->stat(metricsFile).type != FSAccessor::Type::tRegular) continue; for (auto & line : tokenizeString(accessor->readFile(metricsFile), "\n")) { auto fields = tokenizeString>(line); diff --git a/src/hydra-queue-runner/builder.cc b/src/hydra-queue-runner/builder.cc index edd4b1f7..a0fc01c3 100644 --- a/src/hydra-queue-runner/builder.cc +++ b/src/hydra-queue-runner/builder.cc @@ -18,7 +18,7 @@ void setThreadName(const std::string & name) void State::builder(MachineReservation::ptr reservation) { - setThreadName("bld~" + baseNameOf(reservation->step->drvPath)); + setThreadName("bld~" + std::string(reservation->step->drvPath.to_string())); StepResult res = sRetry; @@ -39,8 +39,10 @@ void State::builder(MachineReservation::ptr reservation) auto destStore = getDestStore(); res = doBuildStep(destStore, reservation, activeStep); } catch (std::exception & e) { - printMsg(lvlError, format("uncaught exception building ‘%1%’ on ‘%2%’: %3%") - % reservation->step->drvPath % reservation->machine->sshName % e.what()); + printMsg(lvlError, "uncaught exception building ‘%s’ on ‘%s’: %s", + localStore->printStorePath(reservation->step->drvPath), + reservation->machine->sshName, + e.what()); } } @@ -60,7 +62,7 @@ void State::builder(MachineReservation::ptr reservation) nrRetries++; if (step_->tries > maxNrRetries) maxNrRetries = step_->tries; // yeah yeah, not atomic int delta = retryInterval * std::pow(retryBackoff, step_->tries - 1) + (rand() % 10); - printMsg(lvlInfo, format("will retry ‘%1%’ after %2%s") % step->drvPath % delta); + printMsg(lvlInfo, "will retry ‘%s’ after %ss", localStore->printStorePath(step->drvPath), delta); step_->after = std::chrono::system_clock::now() + std::chrono::seconds(delta); } @@ -95,7 +97,7 @@ State::StepResult State::doBuildStep(nix::ref destStore, cancelled (namely if there are no more Builds referring to it). */ BuildID buildId; - Path buildDrvPath; + std::optional buildDrvPath; unsigned int maxSilentTime, buildTimeout; unsigned int repeats = step->isDeterministic ? 1 : 0; @@ -116,7 +118,7 @@ State::StepResult State::doBuildStep(nix::ref destStore, possibility, we retry this step (putting it back in the runnable queue). If there are really no strong pointers to the step, it will be deleted. */ - printMsg(lvlInfo, format("maybe cancelling build step ‘%1%’") % step->drvPath); + printMsg(lvlInfo, "maybe cancelling build step ‘%s’", localStore->printStorePath(step->drvPath)); return sMaybeCancelled; } @@ -138,15 +140,15 @@ State::StepResult State::doBuildStep(nix::ref destStore, if (!build) build = *dependents.begin(); buildId = build->id; - buildDrvPath = build->drvPath; + buildDrvPath = build->drvPath.clone(); maxSilentTime = build->maxSilentTime; buildTimeout = build->buildTimeout; printInfo("performing step ‘%s’ %d times on ‘%s’ (needed by build %d and %d others)", - step->drvPath, repeats + 1, machine->sshName, buildId, (dependents.size() - 1)); + localStore->printStorePath(step->drvPath), repeats + 1, machine->sshName, buildId, (dependents.size() - 1)); } - bool quit = buildId == buildOne && step->drvPath == buildDrvPath; + bool quit = buildId == buildOne && step->drvPath == *buildDrvPath; RemoteResult result; BuildOutput res; @@ -166,7 +168,7 @@ State::StepResult State::doBuildStep(nix::ref destStore, try { auto store = destStore.dynamic_pointer_cast(); if (uploadLogsToBinaryCache && store && pathExists(result.logFile)) { - store->upsertFile("log/" + baseNameOf(step->drvPath), readFile(result.logFile), "text/plain; charset=utf-8"); + store->upsertFile("log/" + std::string(step->drvPath.to_string()), readFile(result.logFile), "text/plain; charset=utf-8"); unlink(result.logFile.c_str()); } } catch (...) { @@ -218,7 +220,7 @@ State::StepResult State::doBuildStep(nix::ref destStore, if (result.stepStatus == bsSuccess) { updateStep(ssPostProcessing); - res = getBuildOutput(destStore, ref(result.accessor), step->drv); + res = getBuildOutput(destStore, ref(result.accessor), *step->drv); } result.accessor = 0; @@ -255,8 +257,8 @@ State::StepResult State::doBuildStep(nix::ref destStore, /* The step had a hopefully temporary failure (e.g. network issue). Retry a number of times. */ if (result.canRetry) { - printMsg(lvlError, format("possibly transient failure building ‘%1%’ on ‘%2%’: %3%") - % step->drvPath % machine->sshName % result.errorMsg); + printMsg(lvlError, "possibly transient failure building ‘%s’ on ‘%s’: %s", + localStore->printStorePath(step->drvPath), machine->sshName, result.errorMsg); assert(stepNr); bool retry; { @@ -275,7 +277,7 @@ State::StepResult State::doBuildStep(nix::ref destStore, assert(stepNr); - for (auto & path : step->drv.outputPaths()) + for (auto & path : step->drv->outputPaths()) addRoot(path); /* Register success in the database for all Build objects that @@ -308,7 +310,8 @@ State::StepResult State::doBuildStep(nix::ref destStore, no new referrers can have been added in the meantime or be added afterwards. */ if (direct.empty()) { - printMsg(lvlDebug, format("finishing build step ‘%1%’") % step->drvPath); + printMsg(lvlDebug, "finishing build step ‘%s’", + localStore->printStorePath(step->drvPath)); steps_->erase(step->drvPath); } } @@ -393,7 +396,8 @@ State::StepResult State::doBuildStep(nix::ref destStore, be certain no new referrers can be added. */ if (indirect.empty()) { for (auto & s : steps) { - printMsg(lvlDebug, format("finishing build step ‘%1%’") % s->drvPath); + printMsg(lvlDebug, "finishing build step ‘%s’", + localStore->printStorePath(s->drvPath)); steps_->erase(s->drvPath); } } @@ -437,8 +441,8 @@ State::StepResult State::doBuildStep(nix::ref destStore, /* Remember failed paths in the database so that they won't be built again. */ if (result.stepStatus != bsCachedFailure && result.canCache) - for (auto & path : step->drv.outputPaths()) - txn.parameterized("insert into FailedPaths values ($1)")(path).exec(); + for (auto & path : step->drv->outputPaths()) + txn.parameterized("insert into FailedPaths values ($1)")(localStore->printStorePath(path)).exec(); txn.commit(); } @@ -478,8 +482,8 @@ State::StepResult State::doBuildStep(nix::ref destStore, } -void State::addRoot(const Path & storePath) +void State::addRoot(const StorePath & storePath) { - auto root = rootsDir + "/" + baseNameOf(storePath); + auto root = rootsDir + "/" + std::string(storePath.to_string()); if (!pathExists(root)) writeFile(root, ""); } diff --git a/src/hydra-queue-runner/dispatcher.cc b/src/hydra-queue-runner/dispatcher.cc index 068d5c57..86596ff5 100644 --- a/src/hydra-queue-runner/dispatcher.cc +++ b/src/hydra-queue-runner/dispatcher.cc @@ -10,7 +10,7 @@ using namespace nix; void State::makeRunnable(Step::ptr step) { - printMsg(lvlChatty, format("step ‘%1%’ is now runnable") % step->drvPath); + printMsg(lvlChatty, "step ‘%s’ is now runnable", localStore->printStorePath(step->drvPath)); { auto step_(step->state.lock()); @@ -248,7 +248,7 @@ system_time State::doDispatch() /* Can this machine do this step? */ if (!mi.machine->supportsStep(step)) { debug("machine '%s' does not support step '%s' (system type '%s')", - mi.machine->sshName, step->drvPath, step->drv.platform); + mi.machine->sshName, localStore->printStorePath(step->drvPath), step->drv->platform); continue; } diff --git a/src/hydra-queue-runner/hydra-queue-runner.cc b/src/hydra-queue-runner/hydra-queue-runner.cc index f97a1c95..3d5dad4d 100644 --- a/src/hydra-queue-runner/hydra-queue-runner.cc +++ b/src/hydra-queue-runner/hydra-queue-runner.cc @@ -38,9 +38,9 @@ static uint64_t getMemSize() std::string getEnvOrDie(const std::string & key) { - char * value = getenv(key.c_str()); + auto value = getEnv(key); if (!value) throw Error("environment variable '%s' is not set", key); - return value; + return *value; } @@ -160,7 +160,7 @@ void State::monitorMachinesFile() { string defaultMachinesFile = "/etc/nix/machines"; auto machinesFiles = tokenizeString>( - getEnv("NIX_REMOTE_SYSTEMS", pathExists(defaultMachinesFile) ? defaultMachinesFile : ""), ":"); + getEnv("NIX_REMOTE_SYSTEMS").value_or(pathExists(defaultMachinesFile) ? defaultMachinesFile : ""), ":"); if (machinesFiles.empty()) { parseMachines("localhost " + @@ -252,10 +252,10 @@ unsigned int State::createBuildStep(pqxx::work & txn, time_t startTime, BuildID (buildId) (stepNr) (0) // == build - (step->drvPath) + (localStore->printStorePath(step->drvPath)) (status == bsBusy ? 1 : 0) (startTime, startTime != 0) - (step->drv.platform) + (step->drv->platform) ((int) status, status != bsBusy) (propagatedFrom, propagatedFrom != 0) (errorMsg, errorMsg != "") @@ -264,10 +264,10 @@ unsigned int State::createBuildStep(pqxx::work & txn, time_t startTime, BuildID if (r.affected_rows() == 0) goto restart; - for (auto & output : step->drv.outputs) + for (auto & output : step->drv->outputs) txn.parameterized ("insert into BuildStepOutputs (build, stepnr, name, path) values ($1, $2, $3, $4)") - (buildId)(stepNr)(output.first)(output.second.path).exec(); + (buildId)(stepNr)(output.first)(localStore->printStorePath(output.second.path)).exec(); if (status == bsBusy) txn.exec(fmt("notify step_started, '%d\t%d'", buildId, stepNr)); @@ -310,7 +310,7 @@ void State::finishBuildStep(pqxx::work & txn, const RemoteResult & result, int State::createSubstitutionStep(pqxx::work & txn, time_t startTime, time_t stopTime, - Build::ptr build, const Path & drvPath, const string & outputName, const Path & storePath) + Build::ptr build, const StorePath & drvPath, const string & outputName, const StorePath & storePath) { restart: auto stepNr = allocBuildStep(txn, build->id); @@ -320,7 +320,7 @@ int State::createSubstitutionStep(pqxx::work & txn, time_t startTime, time_t sto (build->id) (stepNr) (1) // == substitution - (drvPath) + (localStore->printStorePath(drvPath)) (0) (0) (startTime) @@ -330,7 +330,10 @@ int State::createSubstitutionStep(pqxx::work & txn, time_t startTime, time_t sto txn.parameterized ("insert into BuildStepOutputs (build, stepnr, name, path) values ($1, $2, $3, $4)") - (build->id)(stepNr)(outputName)(storePath).exec(); + (build->id) + (stepNr) + (outputName) + (localStore->printStorePath(storePath)).exec(); return stepNr; } @@ -450,8 +453,8 @@ void State::markSucceededBuild(pqxx::work & txn, Build::ptr build, bool State::checkCachedFailure(Step::ptr step, Connection & conn) { pqxx::work txn(conn); - for (auto & path : step->drv.outputPaths()) - if (!txn.parameterized("select 1 from FailedPaths where path = $1")(path).exec().empty()) + for (auto & path : step->drv->outputPaths()) + if (!txn.parameterized("select 1 from FailedPaths where path = $1")(localStore->printStorePath(path)).exec().empty()) return true; return false; } @@ -763,7 +766,7 @@ void State::run(BuildID buildOne) Store::Params localParams; localParams["max-connections"] = "16"; localParams["max-connection-age"] = "600"; - localStore = openStore(getEnv("NIX_REMOTE"), localParams); + localStore = openStore(getEnv("NIX_REMOTE").value_or(""), localParams); auto storeUri = config->getStrOption("store_uri"); _destStore = storeUri == "" ? localStore : openStore(storeUri); diff --git a/src/hydra-queue-runner/queue-monitor.cc b/src/hydra-queue-runner/queue-monitor.cc index e657a4b8..62de134a 100644 --- a/src/hydra-queue-runner/queue-monitor.cc +++ b/src/hydra-queue-runner/queue-monitor.cc @@ -83,7 +83,7 @@ bool State::getQueuedBuilds(Connection & conn, them yet (since we don't want a long-running transaction). */ std::vector newIDs; std::map newBuildsByID; - std::multimap newBuildsByPath; + std::multimap newBuildsByPath; unsigned int newLastBuildId = lastBuildId; @@ -104,7 +104,7 @@ bool State::getQueuedBuilds(Connection & conn, auto build = std::make_shared(); build->id = id; - build->drvPath = row["drvPath"].as(); + build->drvPath = localStore->parseStorePath(row["drvPath"].as()); build->projectName = row["project"].as(); build->jobsetName = row["jobset"].as(); build->jobName = row["job"].as(); @@ -117,14 +117,14 @@ bool State::getQueuedBuilds(Connection & conn, newIDs.push_back(id); newBuildsByID[id] = build; - newBuildsByPath.emplace(std::make_pair(build->drvPath, id)); + newBuildsByPath.emplace(std::make_pair(build->drvPath.clone(), id)); } } std::set newRunnable; unsigned int nrAdded; std::function createBuild; - std::set finishedDrvs; + std::set finishedDrvs; createBuild = [&](Build::ptr build) { printMsg(lvlTalkative, format("loading build %1% (%2%)") % build->id % build->fullJobName()); @@ -160,7 +160,8 @@ bool State::getQueuedBuilds(Connection & conn, /* Some step previously failed, so mark the build as failed right away. */ - printMsg(lvlError, format("marking build %d as cached failure due to ‘%s’") % build->id % ex.step->drvPath); + printMsg(lvlError, "marking build %d as cached failure due to ‘%s’", + build->id, localStore->printStorePath(ex.step->drvPath)); if (!build->finishedInDB) { auto mc = startDbUpdate(); pqxx::work txn(conn); @@ -171,14 +172,14 @@ bool State::getQueuedBuilds(Connection & conn, auto res = txn.parameterized ("select max(build) from BuildSteps where drvPath = $1 and startTime != 0 and stopTime != 0 and status = 1") - (ex.step->drvPath).exec(); + (localStore->printStorePath(ex.step->drvPath)).exec(); if (!res[0][0].is_null()) propagatedFrom = res[0][0].as(); if (!propagatedFrom) { - for (auto & output : ex.step->drv.outputs) { + for (auto & output : ex.step->drv->outputs) { auto res = txn.parameterized ("select max(s.build) from BuildSteps s join BuildStepOutputs o on s.build = o.build where path = $1 and startTime != 0 and stopTime != 0 and status = 1") - (output.second.path).exec(); + (localStore->printStorePath(output.second.path)).exec(); if (!res[0][0].is_null()) { propagatedFrom = res[0][0].as(); break; @@ -217,7 +218,7 @@ bool State::getQueuedBuilds(Connection & conn, /* If we didn't get a step, it means the step's outputs are all valid. So we mark this as a finished, cached build. */ if (!step) { - Derivation drv = readDerivation(build->drvPath); + Derivation drv = readDerivation(*localStore, localStore->printStorePath(build->drvPath)); BuildOutput res = getBuildOutputCached(conn, destStore, drv); for (auto & path : drv.outputPaths()) @@ -227,7 +228,7 @@ bool State::getQueuedBuilds(Connection & conn, auto mc = startDbUpdate(); pqxx::work txn(conn); time_t now = time(0); - printMsg(lvlInfo, format("marking build %1% as succeeded (cached)") % build->id); + printMsg(lvlInfo, "marking build %1% as succeeded (cached)", build->id); markSucceededBuild(txn, build, res, true, now, now); notifyBuildFinished(txn, build->id, {}); txn.commit(); @@ -250,8 +251,8 @@ bool State::getQueuedBuilds(Connection & conn, build->propagatePriorities(); - printMsg(lvlChatty, format("added build %1% (top-level step %2%, %3% new steps)") - % build->id % step->drvPath % newSteps.size()); + printMsg(lvlChatty, "added build %1% (top-level step %2%, %3% new steps)", + build->id, localStore->printStorePath(step->drvPath), newSteps.size()); }; /* Now instantiate build steps for each new build. The builder @@ -271,7 +272,7 @@ bool State::getQueuedBuilds(Connection & conn, try { createBuild(build); } catch (Error & e) { - e.addPrefix(format("while loading build %1%: ") % build->id); + e.addPrefix(fmt("while loading build %1%: ", build->id)); throw; } @@ -358,10 +359,12 @@ void State::processQueueChange(Connection & conn) activeStepState->cancelled = true; if (activeStepState->pid != -1) { printInfo("killing builder process %d of build step ‘%s’", - activeStepState->pid, activeStep->step->drvPath); + activeStepState->pid, + localStore->printStorePath(activeStep->step->drvPath)); if (kill(activeStepState->pid, SIGINT) == -1) printError("error killing build step ‘%s’: %s", - activeStep->step->drvPath, strerror(errno)); + localStore->printStorePath(activeStep->step->drvPath), + strerror(errno)); } } } @@ -370,8 +373,8 @@ void State::processQueueChange(Connection & conn) Step::ptr State::createStep(ref destStore, - Connection & conn, Build::ptr build, const Path & drvPath, - Build::ptr referringBuild, Step::ptr referringStep, std::set & finishedDrvs, + Connection & conn, Build::ptr build, const StorePath & drvPath, + Build::ptr referringBuild, Step::ptr referringStep, std::set & finishedDrvs, std::set & newSteps, std::set & newRunnable) { if (finishedDrvs.find(drvPath) != finishedDrvs.end()) return 0; @@ -400,7 +403,7 @@ Step::ptr State::createStep(ref destStore, /* If it doesn't exist, create it. */ if (!step) { step = std::make_shared(); - step->drvPath = drvPath; + step->drvPath = drvPath.clone(); isNew = true; } @@ -414,28 +417,28 @@ Step::ptr State::createStep(ref destStore, if (referringStep) step_->rdeps.push_back(referringStep); - (*steps_)[drvPath] = step; + steps_->insert_or_assign(drvPath.clone(), step); } if (!isNew) return step; - printMsg(lvlDebug, format("considering derivation ‘%1%’") % drvPath); + printMsg(lvlDebug, "considering derivation ‘%1%’", localStore->printStorePath(drvPath)); /* Initialize the step. Note that the step may be visible in ‘steps’ before this point, but that doesn't matter because it's not runnable yet, and other threads won't make it runnable while step->created == false. */ - step->drv = readDerivation(drvPath); - step->parsedDrv = std::make_unique(drvPath, step->drv); + step->drv = std::make_unique(readDerivation(*localStore, localStore->printStorePath(drvPath))); + step->parsedDrv = std::make_unique(drvPath.clone(), *step->drv); step->preferLocalBuild = step->parsedDrv->willBuildLocally(); - step->isDeterministic = get(step->drv.env, "isDetermistic", "0") == "1"; + step->isDeterministic = get(step->drv->env, "isDetermistic").value_or("0") == "1"; - step->systemType = step->drv.platform; + step->systemType = step->drv->platform; { - auto i = step->drv.env.find("requiredSystemFeatures"); + auto i = step->drv->env.find("requiredSystemFeatures"); StringSet features; - if (i != step->drv.env.end()) + if (i != step->drv->env.end()) features = step->requiredSystemFeatures = tokenizeString>(i->second); if (step->preferLocalBuild) features.insert("local"); @@ -451,12 +454,13 @@ Step::ptr State::createStep(ref destStore, /* Are all outputs valid? */ bool valid = true; - PathSet outputs = step->drv.outputPaths(); + auto outputs = step->drv->outputPaths(); DerivationOutputs missing; - for (auto & i : step->drv.outputs) + for (auto & i : step->drv->outputs) if (!destStore->isValidPath(i.second.path)) { valid = false; - missing[i.first] = i.second; + missing.insert_or_assign(i.first, + DerivationOutput(i.second.path.clone(), std::string(i.second.hashAlgo), std::string(i.second.hash))); } /* Try to copy the missing paths from the local store or from @@ -469,7 +473,7 @@ Step::ptr State::createStep(ref destStore, avail++; else if (useSubstitutes) { SubstitutablePathInfos infos; - localStore->querySubstitutablePathInfos({i.second.path}, infos); + localStore->querySubstitutablePathInfos(singleton(i.second.path), infos); if (infos.size() == 1) avail++; } @@ -482,14 +486,18 @@ Step::ptr State::createStep(ref destStore, time_t startTime = time(0); if (localStore->isValidPath(i.second.path)) - printInfo("copying output ‘%1%’ of ‘%2%’ from local store", i.second.path, drvPath); + printInfo("copying output ‘%1%’ of ‘%2%’ from local store", + localStore->printStorePath(i.second.path), + localStore->printStorePath(drvPath)); else { - printInfo("substituting output ‘%1%’ of ‘%2%’", i.second.path, drvPath); + printInfo("substituting output ‘%1%’ of ‘%2%’", + localStore->printStorePath(i.second.path), + localStore->printStorePath(drvPath)); localStore->ensurePath(i.second.path); // FIXME: should copy directly from substituter to destStore. } - copyClosure(ref(localStore), destStore, {i.second.path}); + copyClosure(ref(localStore), destStore, singleton(i.second.path)); time_t stopTime = time(0); @@ -501,7 +509,10 @@ Step::ptr State::createStep(ref destStore, } } catch (Error & e) { - printError("while copying/substituting output ‘%s’ of ‘%s’: %s", i.second.path, drvPath, e.what()); + printError("while copying/substituting output ‘%s’ of ‘%s’: %s", + localStore->printStorePath(i.second.path), + localStore->printStorePath(drvPath), + e.what()); valid = false; break; } @@ -511,15 +522,15 @@ Step::ptr State::createStep(ref destStore, // FIXME: check whether all outputs are in the binary cache. if (valid) { - finishedDrvs.insert(drvPath); + finishedDrvs.insert(drvPath.clone()); return 0; } /* No, we need to build. */ - printMsg(lvlDebug, format("creating build step ‘%1%’") % drvPath); + printMsg(lvlDebug, "creating build step ‘%1%’", localStore->printStorePath(drvPath)); /* Create steps for the dependencies. */ - for (auto & i : step->drv.inputDrvs) { + for (auto & i : step->drv->inputDrvs) { auto dep = createStep(destStore, conn, build, i.first, 0, step, finishedDrvs, newSteps, newRunnable); if (dep) { auto step_(step->state.lock()); @@ -607,7 +618,7 @@ BuildOutput State::getBuildOutputCached(Connection & conn, nix::ref ("select id, buildStatus, releaseName, closureSize, size from Builds b " "join BuildOutputs o on b.id = o.build " "where finished = 1 and (buildStatus = 0 or buildStatus = 6) and path = $1") - (output.second.path).exec(); + (localStore->printStorePath(output.second.path)).exec(); if (r.empty()) continue; BuildID id = r[0][0].as(); diff --git a/src/hydra-queue-runner/state.hh b/src/hydra-queue-runner/state.hh index a95cdb61..de74b768 100644 --- a/src/hydra-queue-runner/state.hh +++ b/src/hydra-queue-runner/state.hh @@ -123,8 +123,8 @@ struct Build typedef std::weak_ptr wptr; BuildID id; - nix::Path drvPath; - std::map outputs; + nix::StorePath drvPath; + std::map outputs; std::string projectName, jobsetName, jobName; time_t timestamp; unsigned int maxSilentTime, buildTimeout; @@ -150,8 +150,8 @@ struct Step typedef std::shared_ptr ptr; typedef std::weak_ptr wptr; - nix::Path drvPath; - nix::Derivation drv; + nix::StorePath drvPath; + std::unique_ptr drv; std::unique_ptr parsedDrv; std::set requiredSystemFeatures; bool preferLocalBuild; @@ -252,7 +252,7 @@ struct Machine { /* Check that this machine is of the type required by the step. */ - if (!systemTypes.count(step->drv.platform == "builtin" ? nix::settings.thisSystem : step->drv.platform)) + if (!systemTypes.count(step->drv->platform == "builtin" ? nix::settings.thisSystem : step->drv->platform)) return false; /* Check that the step requires all mandatory features of this @@ -313,7 +313,7 @@ private: queued builds). Note that these are weak pointers. Steps are kept alive by being reachable from Builds or by being in progress. */ - typedef std::map Steps; + typedef std::map Steps; nix::Sync steps; /* Build steps that have no unbuilt dependencies. */ @@ -454,7 +454,7 @@ private: const std::string & machine); int createSubstitutionStep(pqxx::work & txn, time_t startTime, time_t stopTime, - Build::ptr build, const nix::Path & drvPath, const std::string & outputName, const nix::Path & storePath); + Build::ptr build, const nix::StorePath & drvPath, const std::string & outputName, const nix::StorePath & storePath); void updateBuild(pqxx::work & txn, Build::ptr build, BuildStatus status); @@ -473,8 +473,8 @@ private: const nix::Derivation & drv); Step::ptr createStep(nix::ref store, - Connection & conn, Build::ptr build, const nix::Path & drvPath, - Build::ptr referringBuild, Step::ptr referringStep, std::set & finishedDrvs, + Connection & conn, Build::ptr build, const nix::StorePath & drvPath, + Build::ptr referringBuild, Step::ptr referringStep, std::set & finishedDrvs, std::set & newSteps, std::set & newRunnable); Jobset::ptr createJobset(pqxx::work & txn, @@ -523,7 +523,7 @@ private: void dumpStatus(Connection & conn, bool log); - void addRoot(const nix::Path & storePath); + void addRoot(const nix::StorePath & storePath); public: diff --git a/src/hydra-queue-runner/token-server.hh b/src/hydra-queue-runner/token-server.hh index e00004d0..d8004f73 100644 --- a/src/hydra-queue-runner/token-server.hh +++ b/src/hydra-queue-runner/token-server.hh @@ -7,7 +7,7 @@ namespace nix { -MakeError(NoTokens, Error) +MakeError(NoTokens, Error); /* This class hands out tokens. There are only ‘maxTokens’ tokens available. Calling get(N) will return a Token object, representing diff --git a/src/libhydra/db.hh b/src/libhydra/db.hh index 29af7cc2..35d78edf 100644 --- a/src/libhydra/db.hh +++ b/src/libhydra/db.hh @@ -12,7 +12,7 @@ struct Connection : pqxx::connection std::string getFlags() { using namespace nix; - auto s = getEnv("HYDRA_DBI", "dbi:Pg:dbname=hydra;"); + auto s = getEnv("HYDRA_DBI").value_or("dbi:Pg:dbname=hydra;"); std::string prefix = "dbi:Pg:"; if (std::string(s, 0, prefix.size()) != prefix) throw Error("$HYDRA_DBI does not denote a PostgreSQL database"); diff --git a/src/libhydra/hydra-config.hh b/src/libhydra/hydra-config.hh index a4050666..98d73d47 100644 --- a/src/libhydra/hydra-config.hh +++ b/src/libhydra/hydra-config.hh @@ -14,9 +14,9 @@ struct Config /* Read hydra.conf. */ auto hydraConfigFile = getEnv("HYDRA_CONFIG"); - if (pathExists(hydraConfigFile)) { + if (hydraConfigFile && pathExists(*hydraConfigFile)) { - for (auto line : tokenizeString(readFile(hydraConfigFile), "\n")) { + for (auto line : tokenizeString(readFile(*hydraConfigFile), "\n")) { line = trim(string(line, 0, line.find('#'))); auto eq = line.find('='); From 5308e514ad64cde0dafac104c0774a7a77f9517e Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Thu, 20 Feb 2020 10:26:18 +0100 Subject: [PATCH 28/52] Fix nlohmann_json dependency --- release.nix | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/release.nix b/release.nix index cf17657e..ce07010a 100644 --- a/release.nix +++ b/release.nix @@ -132,7 +132,7 @@ rec { perlDeps perl nix postgresql95 # for running the tests boost - nlohmann_json + (nlohmann_json.override { multipleHeaders = true; }) ]; hydraPath = lib.makeBinPath ( From adf61e5cf8571a4c50664361c2d8e3b89266e11e Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Sat, 15 Feb 2020 14:54:21 +0100 Subject: [PATCH 29/52] Fix build (cherry picked from commit 639c660abfd5de62ecfcd8d3cbc2eb6924c7ec75) --- src/hydra-queue-runner/queue-monitor.cc | 7 +++---- src/hydra-queue-runner/state.hh | 6 ++++++ 2 files changed, 9 insertions(+), 4 deletions(-) diff --git a/src/hydra-queue-runner/queue-monitor.cc b/src/hydra-queue-runner/queue-monitor.cc index 62de134a..3c02e1aa 100644 --- a/src/hydra-queue-runner/queue-monitor.cc +++ b/src/hydra-queue-runner/queue-monitor.cc @@ -102,9 +102,9 @@ bool State::getQueuedBuilds(Connection & conn, if (id > newLastBuildId) newLastBuildId = id; if (builds_->count(id)) continue; - auto build = std::make_shared(); + auto build = std::make_shared( + localStore->parseStorePath(row["drvPath"].as())); build->id = id; - build->drvPath = localStore->parseStorePath(row["drvPath"].as()); build->projectName = row["project"].as(); build->jobsetName = row["jobset"].as(); build->jobName = row["job"].as(); @@ -402,8 +402,7 @@ Step::ptr State::createStep(ref destStore, /* If it doesn't exist, create it. */ if (!step) { - step = std::make_shared(); - step->drvPath = drvPath.clone(); + step = std::make_shared(drvPath.clone()); isNew = true; } diff --git a/src/hydra-queue-runner/state.hh b/src/hydra-queue-runner/state.hh index de74b768..180907e9 100644 --- a/src/hydra-queue-runner/state.hh +++ b/src/hydra-queue-runner/state.hh @@ -136,6 +136,9 @@ struct Build std::atomic_bool finishedInDB{false}; + Build(nix::StorePath && drvPath) : drvPath(std::move(drvPath)) + { } + std::string fullJobName() { return projectName + ":" + jobsetName + ":" + jobName; @@ -201,6 +204,9 @@ struct Step nix::Sync state; + Step(nix::StorePath && drvPath) : drvPath(std::move(drvPath)) + { } + ~Step() { //printMsg(lvlError, format("destroying step %1%") % drvPath); From c642f787ee5f4b1fcb14309b19c5841e2a2e5e75 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Wed, 19 Feb 2020 21:10:22 +0100 Subject: [PATCH 30/52] hydra-eval-jobs: Parallelize (cherry picked from commit be8eb9d00d6a3fcac520bc7dfd5740c0b3ade746) --- src/hydra-eval-jobs/hydra-eval-jobs.cc | 542 ++++++++++++++----------- 1 file changed, 315 insertions(+), 227 deletions(-) diff --git a/src/hydra-eval-jobs/hydra-eval-jobs.cc b/src/hydra-eval-jobs/hydra-eval-jobs.cc index 8abdea7e..1514cf4b 100644 --- a/src/hydra-eval-jobs/hydra-eval-jobs.cc +++ b/src/hydra-eval-jobs/hydra-eval-jobs.cc @@ -1,35 +1,63 @@ #include #include -#define GC_LINUX_THREADS 1 -#include - #include "shared.hh" #include "store-api.hh" #include "eval.hh" #include "eval-inline.hh" #include "util.hh" -#include "json.hh" #include "get-drvs.hh" #include "globals.hh" #include "common-eval-args.hh" +#include "attr-path.hh" +#include "derivations.hh" #include "hydra-config.hh" #include #include +#include + +#include using namespace nix; - static Path gcRootsDir; +static size_t maxMemorySize; +struct MyArgs : MixEvalArgs, MixCommonArgs +{ + Path releaseExpr; + bool dryRun = false; -static void findJobs(EvalState & state, JSONObject & top, - Bindings & autoArgs, Value & v, const string & attrPath); + MyArgs() : MixCommonArgs("hydra-eval-jobs") + { + mkFlag() + .longName("help") + .description("show usage information") + .handler([&]() { + printHelp(programName, std::cout); + throw Exit(); + }); + mkFlag() + .longName("gc-roots-dir") + .description("garbage collector roots directory") + .labels({"path"}) + .dest(&gcRootsDir); -static string queryMetaStrings(EvalState & state, DrvInfo & drv, const string & name, const string & subAttribute) + mkFlag() + .longName("dry-run") + .description("don't create store derivations") + .set(&dryRun, true); + + expectArg("expr", &releaseExpr); + } +}; + +static MyArgs myArgs; + +static std::string queryMetaStrings(EvalState & state, DrvInfo & drv, const string & name, const string & subAttribute) { Strings res; std::function rec; @@ -54,170 +82,135 @@ static string queryMetaStrings(EvalState & state, DrvInfo & drv, const string & return concatStringsSep(", ", res); } - -static std::string lastAttrPath; -static bool comma = false; -static size_t maxHeapSize; - - -struct BailOut { }; - - -bool lte(const std::string & s1, const std::string & s2) +static void worker( + EvalState & state, + Bindings & autoArgs, + AutoCloseFD & to, + AutoCloseFD & from) { - size_t p1 = 0, p2 = 0; + Value vTop; + state.evalFile(lookupFileArg(state, myArgs.releaseExpr), vTop); + + auto vRoot = state.allocValue(); + state.autoCallFunction(autoArgs, vTop, *vRoot); while (true) { - if (p1 == s1.size()) return p2 == s2.size(); - if (p2 == s2.size()) return true; + /* Wait for the master to send us a job name. */ + writeLine(to.get(), "next"); - auto d1 = s1.find('.', p1); - auto d2 = s2.find('.', p2); + auto s = readLine(from.get()); + if (s == "exit") break; + if (!hasPrefix(s, "do ")) abort(); + std::string attrPath(s, 3); - auto c = s1.compare(p1, d1 - p1, s2, p2, d2 - p2); + debug("worker process %d at '%s'", getpid(), attrPath); - if (c < 0) return true; - if (c > 0) return false; + /* Evaluate it and send info back to the master. */ + nlohmann::json reply; - p1 = d1 == std::string::npos ? s1.size() : d1 + 1; - p2 = d2 == std::string::npos ? s2.size() : d2 + 1; - } -} + try { + auto v = findAlongAttrPath(state, attrPath, autoArgs, *vRoot).first; + + state.forceValue(*v); + + if (auto drv = getDerivation(state, *v, false)) { + + DrvInfo::Outputs outputs = drv->queryOutputs(); + + if (drv->querySystem() == "unknown") + throw EvalError("derivation must have a 'system' attribute"); + + auto drvPath = drv->queryDrvPath(); + + nlohmann::json job; + + job["nixName"] = drv->queryName(); + job["system"] =drv->querySystem(); + job["drvPath"] = drvPath; + job["description"] = drv->queryMetaString("description"); + job["license"] = queryMetaStrings(state, *drv, "license", "shortName"); + job["homepage"] = drv->queryMetaString("homepage"); + job["maintainers"] = queryMetaStrings(state, *drv, "maintainers", "email"); + job["schedulingPriority"] = drv->queryMetaInt("schedulingPriority", 100); + job["timeout"] = drv->queryMetaInt("timeout", 36000); + job["maxSilent"] = drv->queryMetaInt("maxSilent", 7200); + job["isChannel"] = drv->queryMetaBool("isHydraChannel", false); + + /* If this is an aggregate, then get its constituents. */ + auto a = v->attrs->get(state.symbols.create("_hydraAggregate")); + if (a && state.forceBool(*a->value, *a->pos)) { + auto a = v->attrs->get(state.symbols.create("constituents")); + if (!a) + throw EvalError("derivation must have a ‘constituents’ attribute"); -static void findJobsWrapped(EvalState & state, JSONObject & top, - Bindings & autoArgs, Value & vIn, const string & attrPath) -{ - if (lastAttrPath != "" && lte(attrPath, lastAttrPath)) return; + PathSet context; + state.coerceToString(*a->pos, *a->value, context, true, false); + for (auto & i : context) + if (i.at(0) == '!') { + size_t index = i.find("!", 1); + job["constituents"].push_back(string(i, index + 1)); + } - debug(format("at path `%1%'") % attrPath); - - checkInterrupt(); - - Value v; - state.autoCallFunction(autoArgs, vIn, v); - - if (v.type == tAttrs) { - - auto drv = getDerivation(state, v, false); - - if (drv) { - Path drvPath; - - DrvInfo::Outputs outputs = drv->queryOutputs(); - - if (drv->querySystem() == "unknown") - throw EvalError("derivation must have a ‘system’ attribute"); - - if (comma) { std::cout << ","; comma = false; } - - { - auto res = top.object(attrPath); - res.attr("nixName", drv->queryName()); - res.attr("system", drv->querySystem()); - res.attr("drvPath", drvPath = drv->queryDrvPath()); - res.attr("description", drv->queryMetaString("description")); - res.attr("license", queryMetaStrings(state, *drv, "license", "shortName")); - res.attr("homepage", drv->queryMetaString("homepage")); - res.attr("maintainers", queryMetaStrings(state, *drv, "maintainers", "email")); - res.attr("schedulingPriority", drv->queryMetaInt("schedulingPriority", 100)); - res.attr("timeout", drv->queryMetaInt("timeout", 36000)); - res.attr("maxSilent", drv->queryMetaInt("maxSilent", 7200)); - res.attr("isChannel", drv->queryMetaBool("isHydraChannel", false)); - - /* If this is an aggregate, then get its constituents. */ - Bindings::iterator a = v.attrs->find(state.symbols.create("_hydraAggregate")); - if (a != v.attrs->end() && state.forceBool(*a->value, *a->pos)) { - Bindings::iterator a = v.attrs->find(state.symbols.create("constituents")); - if (a == v.attrs->end()) - throw EvalError("derivation must have a ‘constituents’ attribute"); - PathSet context; - state.coerceToString(*a->pos, *a->value, context, true, false); - PathSet drvs; - for (auto & i : context) - if (i.at(0) == '!') { - size_t index = i.find("!", 1); - drvs.insert(string(i, index + 1)); + state.forceList(*a->value, *a->pos); + for (unsigned int n = 0; n < a->value->listSize(); ++n) { + auto v = a->value->listElems()[n]; + state.forceValue(*v); + if (v->type == tString) + job["namedConstituents"].push_back(state.forceStringNoCtx(*v)); } - res.attr("constituents", concatStringsSep(" ", drvs)); + } + + /* Register the derivation as a GC root. !!! This + registers roots for jobs that we may have already + done. */ + auto localStore = state.store.dynamic_pointer_cast(); + if (gcRootsDir != "" && localStore) { + Path root = gcRootsDir + "/" + std::string(baseNameOf(drvPath)); + if (!pathExists(root)) + localStore->addPermRoot(localStore->parseStorePath(drvPath), root, false); + } + + nlohmann::json out; + for (auto & j : outputs) + out[j.first] = j.second; + job["outputs"] = std::move(out); + + reply["job"] = std::move(job); } - /* Register the derivation as a GC root. !!! This - registers roots for jobs that we may have already - done. */ - auto localStore = state.store.dynamic_pointer_cast(); - if (gcRootsDir != "" && localStore) { - Path root = gcRootsDir + "/" + std::string(baseNameOf(drvPath)); - if (!pathExists(root)) - localStore->addPermRoot(localStore->parseStorePath(drvPath), root, false); - } - - auto res2 = res.object("outputs"); - for (auto & j : outputs) - res2.attr(j.first, j.second); - - } - - GC_prof_stats_s gc; - GC_get_prof_stats(&gc, sizeof(gc)); - - if (gc.heapsize_full > maxHeapSize) { - printInfo("restarting hydra-eval-jobs after job '%s' because heap size is at %d bytes", attrPath, gc.heapsize_full); - lastAttrPath = attrPath; - throw BailOut(); - } - } - - else { - if (!state.isDerivation(v)) { - for (auto & i : v.attrs->lexicographicOrder()) { + else if (v->type == tAttrs) { + auto attrs = nlohmann::json::array(); + StringSet ss; + for (auto & i : v->attrs->lexicographicOrder()) { std::string name(i->name); - - /* Skip jobs with dots in the name. */ - if (name.find('.') != std::string::npos) { + if (name.find('.') != std::string::npos || name.find(' ') != std::string::npos) { printError("skipping job with illegal name '%s'", name); continue; } - - findJobs(state, top, autoArgs, *i->value, - (attrPath.empty() ? "" : attrPath + ".") + name); + attrs.push_back(name); } + reply["attrs"] = std::move(attrs); } + + } catch (EvalError & e) { + reply["error"] = filterANSIEscapes(e.msg(), true); } + + writeLine(to.get(), reply.dump()); + + /* If our RSS exceeds the maximum, exit. The master will + start a new process. */ + struct rusage r; + getrusage(RUSAGE_SELF, &r); + if ((size_t) r.ru_maxrss > maxMemorySize * 1024) break; } - else if (v.type == tNull) { - // allow null values, meaning 'do nothing' - } - - else - throw TypeError(format("unsupported value: %1%") % v); + writeLine(to.get(), "restart"); } - -static void findJobs(EvalState & state, JSONObject & top, - Bindings & autoArgs, Value & v, const string & attrPath) -{ - try { - findJobsWrapped(state, top, autoArgs, v, attrPath); - } catch (EvalError & e) { - if (comma) { std::cout << ","; comma = false; } - auto res = top.object(attrPath); - res.attr("error", filterANSIEscapes(e.msg(), true)); - } -} - - int main(int argc, char * * argv) { - assert(lte("abc", "def")); - assert(lte("abc", "def.foo")); - assert(!lte("def", "abc")); - assert(lte("nixpkgs.hello", "nixpkgs")); - assert(lte("nixpkgs.hello", "nixpkgs.hellooo")); - assert(lte("gitAndTools.git-annex.x86_64-darwin", "gitAndTools.git-annex.x86_64-linux")); - assert(lte("gitAndTools.git-annex.x86_64-linux", "gitAndTools.git-annex-remote-b2.aarch64-linux")); - /* Prevent undeclared dependencies in the evaluation via $NIX_PATH. */ unsetenv("NIX_PATH"); @@ -226,116 +219,211 @@ int main(int argc, char * * argv) auto config = std::make_unique<::Config>(); - auto initialHeapSize = config->getStrOption("evaluator_initial_heap_size", ""); - if (initialHeapSize != "") - setenv("GC_INITIAL_HEAP_SIZE", initialHeapSize.c_str(), 1); - - maxHeapSize = config->getIntOption("evaluator_max_heap_size", 1UL << 30); + auto nrWorkers = config->getIntOption("evaluator_workers", 1); + maxMemorySize = config->getIntOption("evaluator_max_memory_size", 4096); initNix(); initGC(); - /* Read the current heap size, which is the initial heap size. */ - GC_prof_stats_s gc; - GC_get_prof_stats(&gc, sizeof(gc)); - auto initialHeapSizeInt = gc.heapsize_full; - - /* Then make sure the maximum heap size will be bigger than the initial heap size. */ - if (initialHeapSizeInt > maxHeapSize) { - printInfo("warning: evaluator_initial_heap_size (%d) bigger than evaluator_max_heap_size (%d).", initialHeapSizeInt, maxHeapSize); - maxHeapSize = initialHeapSizeInt * 1.1; - printInfo(" evaluator_max_heap_size now set to %d.", maxHeapSize); - } - - Path releaseExpr; - - struct MyArgs : LegacyArgs, MixEvalArgs - { - using LegacyArgs::LegacyArgs; - }; - - MyArgs myArgs(baseNameOf(argv[0]), [&](Strings::iterator & arg, const Strings::iterator & end) { - if (*arg == "--gc-roots-dir") - gcRootsDir = getArg(*arg, arg, end); - else if (*arg == "--dry-run") - settings.readOnlyMode = true; - else if (*arg != "" && arg->at(0) == '-') - return false; - else - releaseExpr = *arg; - return true; - }); - myArgs.parseCmdline(argvToStrings(argc, argv)); - JSONObject json(std::cout, true); - std::cout.flush(); + /* FIXME: The build hook in conjunction with import-from-derivation is causing "unexpected EOF" during eval */ + settings.builders = ""; - do { + /* Prevent access to paths outside of the Nix search path and + to the environment. */ + evalSettings.restrictEval = true; - Pipe pipe; - pipe.create(); + if (myArgs.dryRun) settings.readOnlyMode = true; - ProcessOptions options; - options.allowVfork = false; + if (myArgs.releaseExpr == "") throw UsageError("no expression specified"); - GC_atfork_prepare(); + if (gcRootsDir == "") printMsg(lvlError, "warning: `--gc-roots-dir' not specified"); - auto pid = startProcess([&]() { - pipe.readSide = -1; + struct State + { + std::set todo{""}; + std::set active; + nlohmann::json jobs; + std::exception_ptr exc; + }; - GC_atfork_child(); - GC_start_mark_threads(); + std::condition_variable wakeup; - if (lastAttrPath != "") debug("resuming from '%s'", lastAttrPath); + Sync state_; - /* FIXME: The build hook in conjunction with import-from-derivation is causing "unexpected EOF" during eval */ - settings.builders = ""; + /* Start a handler thread per worker process. */ + auto handler = [&]() + { + try { + pid_t pid = -1; + AutoCloseFD from, to; - /* Prevent access to paths outside of the Nix search path and - to the environment. */ - evalSettings.restrictEval = true; + while (true) { - if (releaseExpr == "") throw UsageError("no expression specified"); + /* Start a new worker process if necessary. */ + if (pid == -1) { + Pipe toPipe, fromPipe; + toPipe.create(); + fromPipe.create(); + pid = startProcess( + [&, + to{std::make_shared(std::move(fromPipe.writeSide))}, + from{std::make_shared(std::move(toPipe.readSide))} + ]() + { + try { + EvalState state(myArgs.searchPath, openStore()); + Bindings & autoArgs = *myArgs.getAutoArgs(state); + worker(state, autoArgs, *to, *from); + } catch (std::exception & e) { + nlohmann::json err; + err["error"] = e.what(); + writeLine(to->get(), err.dump()); + } + }, + ProcessOptions { .allowVfork = false }); + from = std::move(fromPipe.readSide); + to = std::move(toPipe.writeSide); + debug("created worker process %d", pid); + } - if (gcRootsDir == "") printMsg(lvlError, "warning: `--gc-roots-dir' not specified"); + /* Check whether the existing worker process is still there. */ + auto s = readLine(from.get()); + if (s == "restart") { + pid = -1; + continue; + } else if (s != "next") { + auto json = nlohmann::json::parse(s); + throw Error("worker error: %s", (std::string) json["error"]); + } - EvalState state(myArgs.searchPath, openStore()); + /* Wait for a job name to become available. */ + std::string attrPath; - Bindings & autoArgs = *myArgs.getAutoArgs(state); + while (true) { + checkInterrupt(); + auto state(state_.lock()); + if ((state->todo.empty() && state->active.empty()) || state->exc) { + writeLine(to.get(), "exit"); + return; + } + if (!state->todo.empty()) { + attrPath = *state->todo.begin(); + state->todo.erase(state->todo.begin()); + state->active.insert(attrPath); + break; + } else + state.wait(wakeup); + } - Value v; - state.evalFile(lookupFileArg(state, releaseExpr), v); + /* Tell the worker to evaluate it. */ + writeLine(to.get(), "do " + attrPath); - comma = lastAttrPath != ""; + /* Wait for the response. */ + auto response = nlohmann::json::parse(readLine(from.get())); - try { - findJobs(state, json, autoArgs, v, ""); - lastAttrPath = ""; - } catch (BailOut &) { } + /* Handle the response. */ + StringSet newAttrs; - writeFull(pipe.writeSide.get(), lastAttrPath); + if (response.find("job") != response.end()) { + auto state(state_.lock()); + state->jobs[attrPath] = response["job"]; + } - exit(0); - }, options); + if (response.find("attrs") != response.end()) { + for (auto & i : response["attrs"]) { + auto s = (attrPath.empty() ? "" : attrPath + ".") + (std::string) i; + newAttrs.insert(s); + } + } - GC_atfork_parent(); + if (response.find("error") != response.end()) { + auto state(state_.lock()); + state->jobs[attrPath]["error"] = response["error"]; + } - pipe.writeSide = -1; + /* Add newly discovered job names to the queue. */ + { + auto state(state_.lock()); + state->active.erase(attrPath); + for (auto & s : newAttrs) + state->todo.insert(s); + wakeup.notify_all(); + } + } + } catch (...) { + auto state(state_.lock()); + state->exc = std::current_exception(); + wakeup.notify_all(); + } + }; - int status; - while (true) { - checkInterrupt(); - if (waitpid(pid, &status, 0) == pid) break; - if (errno != EINTR) continue; + std::vector threads; + for (size_t i = 0; i < nrWorkers; i++) + threads.emplace_back(std::thread(handler)); + + for (auto & thread : threads) + thread.join(); + + auto state(state_.lock()); + + if (state->exc) + std::rethrow_exception(state->exc); + + /* For aggregate jobs that have named consistuents + (i.e. constituents that are a job name rather than a + derivation), look up the referenced job and add it to the + dependencies of the aggregate derivation. */ + auto store = openStore(); + + for (auto i = state->jobs.begin(); i != state->jobs.end(); ++i) { + auto jobName = i.key(); + auto & job = i.value(); + + auto named = job.find("namedConstituents"); + if (named == job.end()) continue; + + if (myArgs.dryRun) { + for (std::string jobName2 : *named) { + auto job2 = state->jobs.find(jobName2); + if (job2 == state->jobs.end()) + throw Error("aggregate job '%s' references non-existent job '%s'", jobName, jobName2); + std::string drvPath2 = (*job2)["drvPath"]; + job["constituents"].push_back(drvPath2); + } + } else { + std::string drvPath = job["drvPath"]; + auto drv = readDerivation(*store, drvPath); + + for (std::string jobName2 : *named) { + auto job2 = state->jobs.find(jobName2); + if (job2 == state->jobs.end()) + throw Error("aggregate job '%s' references non-existent job '%s'", jobName, jobName2); + std::string drvPath2 = (*job2)["drvPath"]; + auto drv2 = readDerivation(*store, drvPath2); + job["constituents"].push_back(drvPath2); + drv.inputDrvs[store->parseStorePath(drvPath2)] = {drv2.outputs.begin()->first}; + } + + std::string drvName(store->parseStorePath(drvPath).name()); + assert(hasSuffix(drvName, drvExtension)); + drvName.resize(drvName.size() - drvExtension.size()); + auto h = hashDerivationModulo(*store, drv, true); + auto outPath = store->makeOutputPath("out", h, drvName); + drv.env["out"] = store->printStorePath(outPath); + drv.outputs.insert_or_assign("out", DerivationOutput(outPath.clone(), "", "")); + auto newDrvPath = store->printStorePath(writeDerivation(store, drv, drvName)); + + debug("rewrote aggregate derivation %s -> %s", drvPath, newDrvPath); + + job["drvPath"] = newDrvPath; + job["outputs"]["out"] = store->printStorePath(outPath); } - if (status != 0) - throw Exit(WIFEXITED(status) ? WEXITSTATUS(status) : 99); + job.erase("namedConstituents"); + } - maxHeapSize += 64 * 1024 * 1024; - - lastAttrPath = drainFD(pipe.readSide.get()); - } while (lastAttrPath != ""); + std::cout << state->jobs.dump(2) << "\n"; }); } From eb5873ae539affe3e91e04928ab4457446b377b3 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Thu, 20 Feb 2020 11:19:45 +0100 Subject: [PATCH 31/52] Fix build --- release.nix | 6 ++---- src/hydra-eval-jobs/hydra-eval-jobs.cc | 12 ++++++------ 2 files changed, 8 insertions(+), 10 deletions(-) diff --git a/release.nix b/release.nix index ce07010a..a3e2ffb5 100644 --- a/release.nix +++ b/release.nix @@ -1,5 +1,5 @@ { hydraSrc ? builtins.fetchGit ./. -, nixpkgs ? builtins.fetchGit { url = https://github.com/NixOS/nixpkgs-channels.git; ref = "nixos-19.09-small"; } +, nixpkgs ? builtins.fetchTarball https://github.com/NixOS/nixpkgs/archive/release-19.09.tar.gz , officialRelease ? false , shell ? false }: @@ -154,9 +154,7 @@ rec { preConfigure = "autoreconf -vfi"; - NIX_LDFLAGS = [ - "-lpthread" - ]; + NIX_LDFLAGS = [ "-lpthread" ]; enableParallelBuilding = true; diff --git a/src/hydra-eval-jobs/hydra-eval-jobs.cc b/src/hydra-eval-jobs/hydra-eval-jobs.cc index 1514cf4b..deffaeae 100644 --- a/src/hydra-eval-jobs/hydra-eval-jobs.cc +++ b/src/hydra-eval-jobs/hydra-eval-jobs.cc @@ -109,7 +109,7 @@ static void worker( nlohmann::json reply; try { - auto v = findAlongAttrPath(state, attrPath, autoArgs, *vRoot).first; + auto v = findAlongAttrPath(state, attrPath, autoArgs, *vRoot); state.forceValue(*v); @@ -138,23 +138,23 @@ static void worker( /* If this is an aggregate, then get its constituents. */ auto a = v->attrs->get(state.symbols.create("_hydraAggregate")); - if (a && state.forceBool(*a->value, *a->pos)) { + if (a && state.forceBool(*(*a)->value, *(*a)->pos)) { auto a = v->attrs->get(state.symbols.create("constituents")); if (!a) throw EvalError("derivation must have a ‘constituents’ attribute"); PathSet context; - state.coerceToString(*a->pos, *a->value, context, true, false); + state.coerceToString(*(*a)->pos, *(*a)->value, context, true, false); for (auto & i : context) if (i.at(0) == '!') { size_t index = i.find("!", 1); job["constituents"].push_back(string(i, index + 1)); } - state.forceList(*a->value, *a->pos); - for (unsigned int n = 0; n < a->value->listSize(); ++n) { - auto v = a->value->listElems()[n]; + state.forceList(*(*a)->value, *(*a)->pos); + for (unsigned int n = 0; n < (*a)->value->listSize(); ++n) { + auto v = (*a)->value->listElems()[n]; state.forceValue(*v); if (v->type == tString) job["namedConstituents"].push_back(state.forceStringNoCtx(*v)); From eaa65f51f4e6a63719f5a9f1315a3694a35d127e Mon Sep 17 00:00:00 2001 From: Graham Christensen Date: Tue, 3 Mar 2020 18:17:21 -0500 Subject: [PATCH 32/52] hydra-evaluator: make the logic of the scheduler easier to read --- src/hydra-evaluator/hydra-evaluator.cc | 33 ++++++++++++++++++++++---- 1 file changed, 28 insertions(+), 5 deletions(-) diff --git a/src/hydra-evaluator/hydra-evaluator.cc b/src/hydra-evaluator/hydra-evaluator.cc index 091a4e9c..e9103b84 100644 --- a/src/hydra-evaluator/hydra-evaluator.cc +++ b/src/hydra-evaluator/hydra-evaluator.cc @@ -129,19 +129,42 @@ struct Evaluator childStarted.notify_one(); } + bool shouldEvaluate(Jobset & jobset) + { + if (jobset.pid != -1) { + // Already running. + return false; + } + + if (jobset.triggerTime == std::numeric_limits::max()) { + // An evaluation of this Jobset is requested + return true; + } + + if (jobset.checkInterval <= 0) { + // Automatic scheduling is disabled. We allow requested + // evaluations, but never schedule start one. + return false; + } + + + if (jobset.lastCheckedTime + jobset.checkInterval <= time(0)) { + // Time to schedule a fresh evaluation + return true; + } + + return false; + } + void startEvals(State & state) { std::vector sorted; - time_t now = time(0); - /* Filter out jobsets that have been evaluated recently and have not been triggered. */ for (auto i = state.jobsets.begin(); i != state.jobsets.end(); ++i) if (evalOne || - (i->second.pid == -1 && - (i->second.triggerTime != std::numeric_limits::max() || - (i->second.checkInterval > 0 && i->second.lastCheckedTime + i->second.checkInterval <= now)))) + (i->second.evaluation_style && shouldEvaluate(i->second))) sorted.push_back(i); /* Put jobsets in order of ascending trigger time, last checked From 5fae9d96a25c658e4b3ea4f1c121b8d815ba6492 Mon Sep 17 00:00:00 2001 From: Graham Christensen Date: Tue, 3 Mar 2020 19:53:18 -0500 Subject: [PATCH 33/52] hydra-evaluator: add a 'ONE_AT_A_TIME' evaluator style In the past, jobsets which are automatically evaluated are evaluated regularly, on a schedule. This schedule means a new evaluation is created every checkInterval seconds (assuming something changed.) This model works well for architectures where our build farm can easily keep up with demand. This commit adds a new type of evaluation, called ONE_AT_A_TIME, which only schedules a new evaluation if the previous evaluation of the jobset has no unfinished builds. This model of evaluation lets us have 'low-tier' architectures. For example, we could now have a jobset for ARMv7l builds, where the buildfarm only has a single, underpowered ARMv7l builder. Configuring that jobset as ONE_AT_A_TIME will create an evaluation and then won't schedule another evaluation until every job of the existing evaluation is complete. This way, the cache will have a complete collection of pre-built software for some commits, but the underpowered architecture will never become backlogged in ancient revisions. --- src/hydra-evaluator/hydra-evaluator.cc | 87 ++++++++++++++++++++++++-- src/lib/Hydra/Controller/Jobset.pm | 2 +- src/root/edit-jobset.tt | 1 + src/root/jobset.tt | 2 +- src/sql/hydra.sql | 2 +- 5 files changed, 86 insertions(+), 8 deletions(-) diff --git a/src/hydra-evaluator/hydra-evaluator.cc b/src/hydra-evaluator/hydra-evaluator.cc index e9103b84..364a5351 100644 --- a/src/hydra-evaluator/hydra-evaluator.cc +++ b/src/hydra-evaluator/hydra-evaluator.cc @@ -15,6 +15,13 @@ using namespace nix; typedef std::pair JobsetName; +enum class EvaluationStyle +{ + SCHEDULE = 1, + ONESHOT = 2, + ONE_AT_A_TIME = 3, +}; + struct Evaluator { std::unique_ptr config; @@ -24,6 +31,7 @@ struct Evaluator struct Jobset { JobsetName name; + std::optional evaluation_style; time_t lastCheckedTime, triggerTime; int checkInterval; Pid pid; @@ -60,7 +68,7 @@ struct Evaluator pqxx::work txn(*conn); auto res = txn.parameterized - ("select project, j.name, lastCheckedTime, triggerTime, checkInterval from Jobsets j join Projects p on j.project = p.name " + ("select project, j.name, lastCheckedTime, triggerTime, checkInterval, j.enabled as jobset_enabled from Jobsets j join Projects p on j.project = p.name " "where j.enabled != 0 and p.enabled != 0").exec(); auto state(state_.lock()); @@ -78,6 +86,17 @@ struct Evaluator jobset.lastCheckedTime = row["lastCheckedTime"].as(0); jobset.triggerTime = row["triggerTime"].as(notTriggered); jobset.checkInterval = row["checkInterval"].as(); + switch (row["jobset_enabled"].as(0)) { + case 1: + jobset.evaluation_style = EvaluationStyle::SCHEDULE; + break; + case 2: + jobset.evaluation_style = EvaluationStyle::ONESHOT; + break; + case 3: + jobset.evaluation_style = EvaluationStyle::ONE_AT_A_TIME; + break; + } seen.insert(name); } @@ -133,24 +152,82 @@ struct Evaluator { if (jobset.pid != -1) { // Already running. + debug("shouldEvaluate %s:%s? no: already running", + jobset.name.first, jobset.name.second); return false; } - if (jobset.triggerTime == std::numeric_limits::max()) { + if (jobset.triggerTime != std::numeric_limits::max()) { // An evaluation of this Jobset is requested + debug("shouldEvaluate %s:%s? yes: requested", + jobset.name.first, jobset.name.second); return true; } if (jobset.checkInterval <= 0) { // Automatic scheduling is disabled. We allow requested // evaluations, but never schedule start one. + debug("shouldEvaluate %s:%s? no: checkInterval <= 0", + jobset.name.first, jobset.name.second); return false; } - if (jobset.lastCheckedTime + jobset.checkInterval <= time(0)) { - // Time to schedule a fresh evaluation - return true; + // Time to schedule a fresh evaluation. If the jobset + // is a ONE_AT_A_TIME jobset, ensure the previous jobset + // has no remaining, unfinished work. + + auto conn(dbPool.get()); + + pqxx::work txn(*conn); + + if (jobset.evaluation_style == EvaluationStyle::ONE_AT_A_TIME) { + auto evaluation_res = txn.parameterized + ("select id from JobsetEvals " + "where project = $1 and jobset = $2 " + "order by id desc limit 1") + (jobset.name.first) + (jobset.name.second) + .exec(); + + if (evaluation_res.empty()) { + // First evaluation, so allow scheduling. + debug("shouldEvaluate(one-at-a-time) %s:%s? yes: no prior eval", + jobset.name.first, jobset.name.second); + return true; + } + + auto evaluation_id = evaluation_res[0][0].as(); + + auto unfinished_build_res = txn.parameterized + ("select id from Builds " + "join JobsetEvalMembers " + " on (JobsetEvalMembers.build = Builds.id) " + "where JobsetEvalMembers.eval = $1 " + " and builds.finished = 0 " + " limit 1") + (evaluation_id) + .exec(); + + // If the previous evaluation has no unfinished builds + // schedule! + if (unfinished_build_res.empty()) { + debug("shouldEvaluate(one-at-a-time) %s:%s? yes: no unfinished builds", + jobset.name.first, jobset.name.second); + return true; + } else { + debug("shouldEvaluate(one-at-a-time) %s:%s? no: at least one unfinished build", + jobset.name.first, jobset.name.second); + return false; + } + + + } else { + // EvaluationStyle::ONESHOT, EvaluationStyle::SCHEDULED + debug("shouldEvaluate(oneshot/scheduled) %s:%s? yes: checkInterval elapsed", + jobset.name.first, jobset.name.second); + return true; + } } return false; diff --git a/src/lib/Hydra/Controller/Jobset.pm b/src/lib/Hydra/Controller/Jobset.pm index 91e21dd4..5ce4aab4 100644 --- a/src/lib/Hydra/Controller/Jobset.pm +++ b/src/lib/Hydra/Controller/Jobset.pm @@ -226,7 +226,7 @@ sub updateJobset { my ($nixExprPath, $nixExprInput) = nixExprPathFromParams $c; my $enabled = int($c->stash->{params}->{enabled}); - die if $enabled < 0 || $enabled > 2; + die if $enabled < 0 || $enabled > 3; my $shares = int($c->stash->{params}->{schedulingshares} // 1); error($c, "The number of scheduling shares must be positive.") if $shares <= 0; diff --git a/src/root/edit-jobset.tt b/src/root/edit-jobset.tt index 6c380a3a..35ac668f 100644 --- a/src/root/edit-jobset.tt +++ b/src/root/edit-jobset.tt @@ -68,6 +68,7 @@ + diff --git a/src/root/jobset.tt b/src/root/jobset.tt index 9cf1202a..50be0f65 100644 --- a/src/root/jobset.tt +++ b/src/root/jobset.tt @@ -129,7 +129,7 @@ - + diff --git a/src/sql/hydra.sql b/src/sql/hydra.sql index 8144dd30..a5fdc802 100644 --- a/src/sql/hydra.sql +++ b/src/sql/hydra.sql @@ -61,7 +61,7 @@ create table Jobsets ( errorTime integer, -- timestamp associated with errorMsg lastCheckedTime integer, -- last time the evaluator looked at this jobset triggerTime integer, -- set if we were triggered by a push event - enabled integer not null default 1, -- 0 = disabled, 1 = enabled, 2 = one-shot + enabled integer not null default 1, -- 0 = disabled, 1 = enabled, 2 = one-shot, 3 = one-at-a-time enableEmail integer not null default 1, hidden integer not null default 0, emailOverride text not null, From 113a312f6717ae81327b4d5bb91a16ae06e0933e Mon Sep 17 00:00:00 2001 From: Graham Christensen Date: Tue, 3 Mar 2020 22:32:13 -0500 Subject: [PATCH 34/52] handleDeclarativeJobsetBuild: handle errors from readNixFile --- src/lib/Hydra/Helper/AddBuilds.pm | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/src/lib/Hydra/Helper/AddBuilds.pm b/src/lib/Hydra/Helper/AddBuilds.pm index 40375c6b..296afbc5 100644 --- a/src/lib/Hydra/Helper/AddBuilds.pm +++ b/src/lib/Hydra/Helper/AddBuilds.pm @@ -68,8 +68,14 @@ sub handleDeclarativeJobsetBuild { my $id = $build->id; die "Declarative jobset build $id failed" unless $build->buildstatus == 0; my $declPath = ($build->buildoutputs)[0]->path; - my $declText = readNixFile($declPath) - or die "Couldn't read declarative specification file $declPath: $!"; + my $declText = eval { + readNixFile($declPath) + }; + if ($@) { + print STDERR "ERROR: failed to readNixFile $declPath: ", $@, "\n"; + die; + } + my $declSpec = decode_json($declText); txn_do($db, sub { my @kept = keys %$declSpec; From 117b9ecef18e4bf63b84a9cce0b0d3ae7d8910e9 Mon Sep 17 00:00:00 2001 From: Graham Christensen Date: Tue, 3 Mar 2020 22:36:21 -0500 Subject: [PATCH 35/52] =?UTF-8?q?Nix.pm:=20readNixFile:=20pass=20=C2=AB--e?= =?UTF-8?q?xperimental-features=20nix-command=C2=BB?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Declarative jobsets were broken by the Nix update, causing nix cat-file to break silently. This commit restores declarative jobsets, based on top of a commit making it easier to see what broke. --- src/lib/Hydra/Helper/Nix.pm | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/lib/Hydra/Helper/Nix.pm b/src/lib/Hydra/Helper/Nix.pm index 8ce284ad..5034c81b 100644 --- a/src/lib/Hydra/Helper/Nix.pm +++ b/src/lib/Hydra/Helper/Nix.pm @@ -509,7 +509,8 @@ sub getStoreUri { # Read a file from the (possibly remote) nix store sub readNixFile { my ($path) = @_; - return grab(cmd => ["nix", "cat-store", "--store", getStoreUri(), "$path"]); + return grab(cmd => ["nix", "--experimental-features", "nix-command", + "cat-store", "--store", getStoreUri(), "$path"]); } From 994430b94bb3d237ee8380a0942c58a44dc94c06 Mon Sep 17 00:00:00 2001 From: Graham Christensen Date: Tue, 3 Mar 2020 22:46:32 -0500 Subject: [PATCH 36/52] treewide: allow `nix` command --- src/lib/Hydra/Controller/Build.pm | 6 ++++-- src/script/hydra-eval-jobset | 2 +- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/src/lib/Hydra/Controller/Build.pm b/src/lib/Hydra/Controller/Build.pm index 22bfd98e..52cb71dd 100644 --- a/src/lib/Hydra/Controller/Build.pm +++ b/src/lib/Hydra/Controller/Build.pm @@ -193,7 +193,8 @@ sub checkPath { sub serveFile { my ($c, $path) = @_; - my $res = run(cmd => ["nix", "ls-store", "--store", getStoreUri(), "--json", "$path"]); + my $res = run(cmd => ["nix", "--experimental-features", "nix-command", + "ls-store", "--store", getStoreUri(), "--json", "$path"]); if ($res->{status}) { notFound($c, "File '$path' does not exist.") if $res->{stderr} =~ /does not exist/; @@ -217,7 +218,8 @@ sub serveFile { elsif ($ls->{type} eq "regular") { - $c->stash->{'plain'} = { data => grab(cmd => ["nix", "cat-store", "--store", getStoreUri(), "$path"]) }; + $c->stash->{'plain'} = { data => grab(cmd => ["nix", "--experimental-features", "nix-command", + "cat-store", "--store", getStoreUri(), "$path"]) }; # Detect MIME type. Borrowed from Catalyst::Plugin::Static::Simple. my $type = "text/plain"; diff --git a/src/script/hydra-eval-jobset b/src/script/hydra-eval-jobset index cdb09b74..53a16f38 100755 --- a/src/script/hydra-eval-jobset +++ b/src/script/hydra-eval-jobset @@ -82,7 +82,7 @@ sub getPath { my $substituter = $config->{eval_substituter}; - system("nix", "copy", "--from", $substituter, "--", $path) + system("nix", "--experimental-features", "nix-command", "copy", "--from", $substituter, "--", $path) if defined $substituter; return isValidPath($path); From 69a6f3448a1cb4252d3468b9bdc7c7e171847c92 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Wed, 4 Mar 2020 15:16:04 +0100 Subject: [PATCH 37/52] Fix calling job functions Fixes #718. --- src/hydra-eval-jobs/hydra-eval-jobs.cc | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/src/hydra-eval-jobs/hydra-eval-jobs.cc b/src/hydra-eval-jobs/hydra-eval-jobs.cc index deffaeae..f27fde1d 100644 --- a/src/hydra-eval-jobs/hydra-eval-jobs.cc +++ b/src/hydra-eval-jobs/hydra-eval-jobs.cc @@ -109,9 +109,10 @@ static void worker( nlohmann::json reply; try { - auto v = findAlongAttrPath(state, attrPath, autoArgs, *vRoot); + auto vTmp = findAlongAttrPath(state, attrPath, autoArgs, *vRoot); - state.forceValue(*v); + auto v = state.allocValue(); + state.autoCallFunction(autoArgs, *vTmp, *v); if (auto drv = getDerivation(state, *v, false)) { From 123bee1db599818f34f880a756a44339c57c7ade Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Wed, 4 Mar 2020 15:16:26 +0100 Subject: [PATCH 38/52] Restore job type checking --- src/hydra-eval-jobs/hydra-eval-jobs.cc | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/src/hydra-eval-jobs/hydra-eval-jobs.cc b/src/hydra-eval-jobs/hydra-eval-jobs.cc index f27fde1d..4cbe5a1c 100644 --- a/src/hydra-eval-jobs/hydra-eval-jobs.cc +++ b/src/hydra-eval-jobs/hydra-eval-jobs.cc @@ -194,6 +194,11 @@ static void worker( reply["attrs"] = std::move(attrs); } + else if (v->type == tNull) + ; + + else throw TypeError("attribute '%s' is %s, which is not supported", attrPath, showType(*v)); + } catch (EvalError & e) { reply["error"] = filterANSIEscapes(e.msg(), true); } From be0ec2d22332d382781d7f45f474b57ad0f5c411 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Fri, 13 Mar 2020 16:17:48 +0100 Subject: [PATCH 39/52] Remove style.css https://github.com/NixOS/nix/issues/3392 --- doc/manual/Makefile.am | 3 +- doc/manual/style.css | 256 ----------------------------------------- 2 files changed, 1 insertion(+), 258 deletions(-) delete mode 100644 doc/manual/style.css diff --git a/doc/manual/Makefile.am b/doc/manual/Makefile.am index 353f5ee1..10c8f6ee 100644 --- a/doc/manual/Makefile.am +++ b/doc/manual/Makefile.am @@ -3,14 +3,13 @@ DOCBOOK_FILES = installation.xml introduction.xml manual.xml projects.xml hackin EXTRA_DIST = $(DOCBOOK_FILES) xsltproc_opts = \ - --param html.stylesheet \'style.css\' \ --param callout.graphics.extension \'.gif\' \ --param section.autolabel 1 \ --param section.label.includes.component.label 1 # Include the manual in the tarball. -dist_html_DATA = manual.html style.css +dist_html_DATA = manual.html # Embed Docbook's callout images in the distribution. EXTRA_DIST += images diff --git a/doc/manual/style.css b/doc/manual/style.css deleted file mode 100644 index 3a0c8fa1..00000000 --- a/doc/manual/style.css +++ /dev/null @@ -1,256 +0,0 @@ -/* Copied from http://bakefile.sourceforge.net/, which appears - licensed under the GNU GPL. */ - - -/*************************************************************************** - Basic headers and text: - ***************************************************************************/ - -body -{ - font-family: "Nimbus Sans L", sans-serif; - background: white; - margin: 2em 1em 2em 1em; -} - -h1, h2, h3, h4 -{ - color: #005aa0; -} - -h1 /* title */ -{ - font-size: 200%; -} - -h2 /* chapters, appendices, subtitle */ -{ - font-size: 180%; -} - -/* Extra space between chapters, appendices. */ -div.chapter > div.titlepage h2, div.appendix > div.titlepage h2 -{ - margin-top: 1.5em; -} - -div.section > div.titlepage h2 /* sections */ -{ - font-size: 150%; - margin-top: 1.5em; -} - -h3 /* subsections */ -{ - font-size: 125%; -} - -div.simplesect h2 -{ - font-size: 110%; -} - -div.appendix h3 -{ - font-size: 150%; - margin-top: 1.5em; -} - -div.refnamediv h2, div.refsynopsisdiv h2, div.refsection h2 /* refentry parts */ -{ - margin-top: 1.4em; - font-size: 125%; -} - -div.refsection h3 -{ - font-size: 110%; -} - - -/*************************************************************************** - Examples: - ***************************************************************************/ - -div.example -{ - border: 1px solid #b0b0b0; - padding: 6px 6px; - margin-left: 1.5em; - margin-right: 1.5em; - background: #f4f4f8; - border-radius: 0.4em; - box-shadow: 0.4em 0.4em 0.5em #e0e0e0; -} - -div.example p.title -{ - margin-top: 0em; -} - -div.example pre -{ - box-shadow: none; -} - - -/*************************************************************************** - Screen dumps: - ***************************************************************************/ - -pre.screen, pre.programlisting -{ - border: 1px solid #b0b0b0; - padding: 3px 3px; - margin-left: 1.5em; - margin-right: 1.5em; - color: #600000; - background: #f4f4f8; - font-family: monospace; - border-radius: 0.4em; - box-shadow: 0.4em 0.4em 0.5em #e0e0e0; -} - -div.example pre.programlisting -{ - border: 0px; - padding: 0 0; - margin: 0 0 0 0; -} - - -/*************************************************************************** - Notes, warnings etc: - ***************************************************************************/ - -.note, .warning -{ - border: 1px solid #b0b0b0; - padding: 3px 3px; - margin-left: 1.5em; - margin-right: 1.5em; - margin-bottom: 1em; - padding: 0.3em 0.3em 0.3em 0.3em; - background: #fffff5; - border-radius: 0.4em; - box-shadow: 0.4em 0.4em 0.5em #e0e0e0; -} - -div.note, div.warning -{ - font-style: italic; -} - -div.note h3, div.warning h3 -{ - color: red; - font-size: 100%; - padding-right: 0.5em; - display: inline; -} - -div.note p, div.warning p -{ - margin-bottom: 0em; -} - -div.note h3 + p, div.warning h3 + p -{ - display: inline; -} - -div.note h3 -{ - color: blue; - font-size: 100%; -} - -div.navfooter * -{ - font-size: 90%; -} - - -/*************************************************************************** - Links colors and highlighting: - ***************************************************************************/ - -a { text-decoration: none; } -a:hover { text-decoration: underline; } -a:link { color: #0048b3; } -a:visited { color: #002a6a; } - - -/*************************************************************************** - Table of contents: - ***************************************************************************/ - -div.toc -{ - font-size: 90%; -} - -div.toc dl -{ - margin-top: 0em; - margin-bottom: 0em; -} - - -/*************************************************************************** - Special elements: - ***************************************************************************/ - -tt, code -{ - color: #400000; -} - -.term -{ - font-weight: bold; - -} - -div.variablelist dd p, div.glosslist dd p -{ - margin-top: 0em; -} - -div.variablelist dd, div.glosslist dd -{ - margin-left: 1.5em; -} - -div.glosslist dt -{ - font-style: italic; -} - -.varname -{ - color: #400000; -} - -span.command strong -{ - font-weight: normal; - color: #400000; -} - -div.calloutlist table -{ - box-shadow: none; -} - -table -{ - border-collapse: collapse; - box-shadow: 0.4em 0.4em 0.5em #e0e0e0; -} - -div.affiliation -{ - font-style: italic; -} - From 5ae8b54d258c15277d9a53c54934e4ddb4fd22be Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Fri, 13 Mar 2020 19:56:55 +0100 Subject: [PATCH 40/52] README.md: Link to the latest manual (cherry picked from commit 455611b0b4e97290a858687142805babc9929673) --- README.md | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/README.md b/README.md index 92d9e2fe..568ef4c3 100644 --- a/README.md +++ b/README.md @@ -1,11 +1,11 @@ # Hydra -[Hydra](https://nixos.org/hydra/) is a continuous integration system based -on the Nix package manager. For more information, see the -[manual](http://nixos.org/hydra/manual/). +Hydra is a continuous integration system based on the Nix package +manager. For more information, see the +[manual](https://hydra.nixos.org/job/hydra/master/manual/latest/download-by-type/doc/manual). For development see -[hacking instructions](http://nixos.org/hydra/manual/#chap-hacking). +[hacking instructions](https://hydra.nixos.org/job/hydra/master/manual/latest/download-by-type/doc/manual#chap-hacking). --- From 73694087a088ed4481b4ab268a03351b1bcaac3c Mon Sep 17 00:00:00 2001 From: Maximilian Bosch Date: Sat, 21 Mar 2020 21:08:33 +0100 Subject: [PATCH 41/52] Use the stopsign as icon for timeouts and exceeded log-limits When I browse failed builds in a jobset-eval on Hydra, I regularly mistake actual build-failures with temporary issues like timeouts (that probably disappear at the next eval). To prevent this kind of issue, I figured that using the stopsign-svg for builds with timeouts or exceeded log-limits is a reasonable choice for the following reasons: * A user can now distinguish between actual build-errors (like compilation-failures or oversized outputs) and (usually) temporary issues (like a bloated log or a timeout). * The stopsign is also used for aborted jobs that are shown in a different tab and can't be confused with timeouts for that reason. --- src/root/common.tt | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/root/common.tt b/src/root/common.tt index ed59da67..83b1c736 100644 --- a/src/root/common.tt +++ b/src/root/common.tt @@ -229,9 +229,9 @@ BLOCK renderBuildStatusIcon; [% ELSIF buildstatus == 6 %] Failed with output [% ELSIF buildstatus == 7 %] - Timed out + Timed out [% ELSIF buildstatus == 10 %] - Log limit exceeded + Log limit exceeded [% ELSIF buildstatus == 11 %] Output size limit exceeded [% ELSIF buildstatus == 12 %] From 12cc46cdb36321acd4c982429a86eb0f8f3cc969 Mon Sep 17 00:00:00 2001 From: Graham Christensen Date: Tue, 24 Mar 2020 11:22:14 -0400 Subject: [PATCH 42/52] fixup: hydra-init: correct reference to hydra-backill-ids --- src/script/hydra-init | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/script/hydra-init b/src/script/hydra-init index 1a5f2439..0b564ce6 100755 --- a/src/script/hydra-init +++ b/src/script/hydra-init @@ -51,7 +51,7 @@ WARNING: Schema version 62 and 63 make nullable jobset_id fields on migration will take many hours. Because of that, the migration is not automatic, and must be performed manually. - To backfill these IDs, run: hydra-fill-ids + To backfill these IDs, run: hydra-backfill-ids QUOTE } From 956f009672679c5e5d8b152ba8b642442f8c9492 Mon Sep 17 00:00:00 2001 From: Nikola Knezevic Date: Thu, 26 Mar 2020 10:02:41 +0100 Subject: [PATCH 43/52] Add documentation for SlackNotification plugin --- src/lib/Hydra/Plugin/SlackNotification.pm | 50 ++++++++++++++++++++++- 1 file changed, 49 insertions(+), 1 deletion(-) diff --git a/src/lib/Hydra/Plugin/SlackNotification.pm b/src/lib/Hydra/Plugin/SlackNotification.pm index 96933c51..97810516 100644 --- a/src/lib/Hydra/Plugin/SlackNotification.pm +++ b/src/lib/Hydra/Plugin/SlackNotification.pm @@ -7,6 +7,53 @@ use LWP::UserAgent; use Hydra::Helper::CatalystUtils; use JSON; +=head1 NAME + +SlackNotification - hydra-notify plugin for sending Slack notifications about +build results + +=head1 DESCRIPTION + +This plugin reports build statuses to various Slack channels. One can configure +which builds are reported to which channels, and whether reports should be on +state change (regressions and improvements), or for each build. + +=head1 CONFIGURATION + +The module is configured using the C block in Hydra's config file. There +can be multiple such blocks in the config file, each configuring different (or +even the same) set of builds and how they report to Slack channels. + +The following entries are recognized in the C block: + +=over 4 + +=item jobs + +A pattern for job names. All builds whose job name matches this pattern will +emit a message to the designated Slack channel (see C). The pattern will +match the whole name, thus leaving this field empty will result in no +notifications being sent. To match on all builds, use C<.*>. + +=item url + +The URL to a L. + +Slack administrators have to prepare one incoming webhook for each channel. This +URL should be treated as secret, as anyone knowing the URL could post a message +to the Slack workspace (or more precisely, the channel behind it). + +=item force + +(Optional) An I indicating whether to report on every build or only on +changes in the status. If not provided, defaults to 0, that is, sending reports +only when build status changes from success to failure, and vice-versa. Any +other value results in reporting on every build. + +=back + +=cut + sub isEnabled { my ($self) = @_; return defined $self->{config}->{slack}; @@ -44,9 +91,10 @@ sub buildFinished { my $jobName = showJobName $b; foreach my $channel (@config) { - my $force = $channel->{force}; next unless $jobName =~ /^$channel->{jobs}$/; + my $force = $channel->{force}; + # If build is cancelled or aborted, do not send email. next if ! $force && ($b->buildstatus == 4 || $b->buildstatus == 3); From 986fde8888e185440da68fbd86232a52a2fa9363 Mon Sep 17 00:00:00 2001 From: Nikola Knezevic Date: Thu, 26 Mar 2020 10:42:26 +0100 Subject: [PATCH 44/52] Refactor code Extract the conditions before the loop, as they do not change due to channel definition. --- src/lib/Hydra/Plugin/SlackNotification.pm | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/src/lib/Hydra/Plugin/SlackNotification.pm b/src/lib/Hydra/Plugin/SlackNotification.pm index 97810516..3cd75173 100644 --- a/src/lib/Hydra/Plugin/SlackNotification.pm +++ b/src/lib/Hydra/Plugin/SlackNotification.pm @@ -87,20 +87,24 @@ sub buildFinished { # we send one aggregate message. my %channels; foreach my $b ($build, @{$dependents}) { - my $prevBuild = getPreviousBuild($b); my $jobName = showJobName $b; + my $buildStatus = $b->buildstatus; + my $cancelledOrAborted = $buildStatus == 4 || $buildStatus == 3; + + my $prevBuild = getPreviousBuild($b); + my $sameAsPrevious = defined $prevBuild && ($buildStatus == $prevBuild->buildstatus); foreach my $channel (@config) { next unless $jobName =~ /^$channel->{jobs}$/; my $force = $channel->{force}; - # If build is cancelled or aborted, do not send email. - next if ! $force && ($b->buildstatus == 4 || $b->buildstatus == 3); + # If build is cancelled or aborted, do not send Slack notification. + next if ! $force && $cancelledOrAborted; # If there is a previous (that is not cancelled or aborted) build - # with same buildstatus, do not send email. - next if ! $force && defined $prevBuild && ($b->buildstatus == $prevBuild->buildstatus); + # with same buildstatus, do not send Slack notification. + next if ! $force && $sameAsPrevious; $channels{$channel->{url}} //= { channel => $channel, builds => [] }; push @{$channels{$channel->{url}}->{builds}}, $b; From 1bee6e3d8a9c22a84f6b1d7590c14f59c8262110 Mon Sep 17 00:00:00 2001 From: Nikola Knezevic Date: Thu, 26 Mar 2020 10:43:17 +0100 Subject: [PATCH 45/52] Add debug logging This will help us track potential problems with the plugin. --- src/lib/Hydra/Plugin/SlackNotification.pm | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/src/lib/Hydra/Plugin/SlackNotification.pm b/src/lib/Hydra/Plugin/SlackNotification.pm index 3cd75173..a09fbe03 100644 --- a/src/lib/Hydra/Plugin/SlackNotification.pm +++ b/src/lib/Hydra/Plugin/SlackNotification.pm @@ -93,12 +93,18 @@ sub buildFinished { my $prevBuild = getPreviousBuild($b); my $sameAsPrevious = defined $prevBuild && ($buildStatus == $prevBuild->buildstatus); + my $prevBuildStatus = (defined $prevBuild) ? $prevBuild->buildstatus : -1; + my $prevBuildId = (defined $prevBuild) ? $prevBuild->id : -1; + + print STDERR "SlackNotification_Debug job name $jobName status $buildStatus (previous: $prevBuildStatus from $prevBuildId)\n"; foreach my $channel (@config) { next unless $jobName =~ /^$channel->{jobs}$/; my $force = $channel->{force}; + print STDERR "SlackNotification_Debug found match with '$channel->{jobs}' with force=$force\n"; + # If build is cancelled or aborted, do not send Slack notification. next if ! $force && $cancelledOrAborted; @@ -106,6 +112,7 @@ sub buildFinished { # with same buildstatus, do not send Slack notification. next if ! $force && $sameAsPrevious; + print STDERR "SlackNotification_Debug adding $jobName to the report list\n"; $channels{$channel->{url}} //= { channel => $channel, builds => [] }; push @{$channels{$channel->{url}}->{builds}}, $b; } @@ -145,6 +152,8 @@ sub buildFinished { $text .= join(" or ", scalar @x > 1 ? join(", ", @x[0..scalar @x - 2]) : (), $x[-1]); } + print STDERR "SlackNotification_Debug POSTing to url ending with: ${\substr $url, -8}\n"; + my $msg = { attachments => [{ fallback => "Job " . showJobName($build) . " build number " . $build->id . ": " . showStatus($build), From 76299b9174d7cba06427220166dc4707b43d407d Mon Sep 17 00:00:00 2001 From: Samuel Dionne-Riel Date: Mon, 30 Mar 2020 14:36:16 -0400 Subject: [PATCH 46/52] hydra-eval-jobs: Mirror eval errors in STDERR Otherwise, errors will not be shown to end-users, which makes debugging long evals pretty much impossible. --- src/hydra-eval-jobs/hydra-eval-jobs.cc | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/src/hydra-eval-jobs/hydra-eval-jobs.cc b/src/hydra-eval-jobs/hydra-eval-jobs.cc index 4cbe5a1c..de994b39 100644 --- a/src/hydra-eval-jobs/hydra-eval-jobs.cc +++ b/src/hydra-eval-jobs/hydra-eval-jobs.cc @@ -200,7 +200,12 @@ static void worker( else throw TypeError("attribute '%s' is %s, which is not supported", attrPath, showType(*v)); } catch (EvalError & e) { + // Transmits the error we got from the previous evaluation + // in the JSON output. reply["error"] = filterANSIEscapes(e.msg(), true); + // Don't forget to print it into the STDERR log, this is + // what's shown in the Hydra UI. + printError("error: %s", reply["error"]); } writeLine(to.get(), reply.dump()); @@ -286,6 +291,9 @@ int main(int argc, char * * argv) nlohmann::json err; err["error"] = e.what(); writeLine(to->get(), err.dump()); + // Don't forget to print it into the STDERR log, this is + // what's shown in the Hydra UI. + printError("error: %s", err["error"]); } }, ProcessOptions { .allowVfork = false }); From 8b5900ac33ca4231b08a573fbd37e46ced74b903 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Tue, 31 Mar 2020 12:58:24 +0200 Subject: [PATCH 47/52] Remove outdated email address (cherry picked from commit 22a65d0269ce89f4962b781dbb78474ec2298c3c) --- configure.ac | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/configure.ac b/configure.ac index f99d4c9d..baff26c2 100644 --- a/configure.ac +++ b/configure.ac @@ -1,5 +1,4 @@ -AC_INIT([Hydra], [m4_esyscmd([echo -n $(cat ./version)$VERSION_SUFFIX])], - [nix-dev@cs.uu.nl], [hydra], [http://nixos.org/hydra/]) +AC_INIT([Hydra], [m4_esyscmd([echo -n $(cat ./version)$VERSION_SUFFIX])]) AC_CONFIG_AUX_DIR(config) AM_INIT_AUTOMAKE([foreign serial-tests]) From 68a59f34a0da49f55823616de2e6d744c826ebe5 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Thu, 26 Mar 2020 14:59:52 +0100 Subject: [PATCH 48/52] Remove tabs (cherry picked from commit 5b731004da89a9b94f4ea7945ea6538837a64e41) --- src/root/common.tt | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/root/common.tt b/src/root/common.tt index 83b1c736..bc777ede 100644 --- a/src/root/common.tt +++ b/src/root/common.tt @@ -584,10 +584,10 @@ BLOCK renderJobsetOverview %] + INCLUDE renderDateTime timestamp = j.lastcheckedtime; + IF j.errormsg || j.fetcherrormsg; %] Error[% END; + ELSE; "-"; + END %] [% IF j.get_column('nrtotal') > 0 %] [% successrate = ( j.get_column('nrsucceeded') / j.get_column('nrtotal') )*100 %] [% IF j.get_column('nrscheduled') > 0 %] From 4417f9f260b17cf2d4098df2a6a4b858856b76ea Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Thu, 26 Mar 2020 15:00:04 +0100 Subject: [PATCH 49/52] Abort unsupported build steps If we don't see machine that supports a build step for 'max_unsupported_time' seconds, the step is aborted. The default is 0, which is appropriate for Hydra installations that don't provision missing machines dynamically. (cherry picked from commit f5cdbfe21d930db43d3812c7d8e87746d6378ef9) --- src/hydra-queue-runner/builder.cc | 194 ++++++++++--------- src/hydra-queue-runner/dispatcher.cc | 85 ++++++++ src/hydra-queue-runner/hydra-queue-runner.cc | 1 + src/hydra-queue-runner/state.hh | 20 +- 4 files changed, 208 insertions(+), 92 deletions(-) diff --git a/src/hydra-queue-runner/builder.cc b/src/hydra-queue-runner/builder.cc index a0fc01c3..0a005321 100644 --- a/src/hydra-queue-runner/builder.cc +++ b/src/hydra-queue-runner/builder.cc @@ -376,97 +376,8 @@ State::StepResult State::doBuildStep(nix::ref destStore, } } - } else { - - /* Register failure in the database for all Build objects that - directly or indirectly depend on this step. */ - - std::vector dependentIDs; - - while (true) { - /* Get the builds and steps that depend on this step. */ - std::set indirect; - { - auto steps_(steps.lock()); - std::set steps; - getDependents(step, indirect, steps); - - /* If there are no builds left, delete all referring - steps from ‘steps’. As for the success case, we can - be certain no new referrers can be added. */ - if (indirect.empty()) { - for (auto & s : steps) { - printMsg(lvlDebug, "finishing build step ‘%s’", - localStore->printStorePath(s->drvPath)); - steps_->erase(s->drvPath); - } - } - } - - if (indirect.empty() && stepFinished) break; - - /* Update the database. */ - { - auto mc = startDbUpdate(); - - pqxx::work txn(*conn); - - /* Create failed build steps for every build that - depends on this, except when this step is cached - and is the top-level of that build (since then it's - redundant with the build's isCachedBuild field). */ - for (auto & build2 : indirect) { - if ((result.stepStatus == bsCachedFailure && build2->drvPath == step->drvPath) || - (result.stepStatus != bsCachedFailure && buildId == build2->id) || - build2->finishedInDB) - continue; - createBuildStep(txn, 0, build2->id, step, machine->sshName, - result.stepStatus, result.errorMsg, buildId == build2->id ? 0 : buildId); - } - - /* Mark all builds that depend on this derivation as failed. */ - for (auto & build2 : indirect) { - if (build2->finishedInDB) continue; - printMsg(lvlError, format("marking build %1% as failed") % build2->id); - txn.parameterized - ("update Builds set finished = 1, buildStatus = $2, startTime = $3, stopTime = $4, isCachedBuild = $5, notificationPendingSince = $4 where id = $1 and finished = 0") - (build2->id) - ((int) (build2->drvPath != step->drvPath && result.buildStatus() == bsFailed ? bsDepFailed : result.buildStatus())) - (result.startTime) - (result.stopTime) - (result.stepStatus == bsCachedFailure ? 1 : 0).exec(); - nrBuildsDone++; - } - - /* Remember failed paths in the database so that they - won't be built again. */ - if (result.stepStatus != bsCachedFailure && result.canCache) - for (auto & path : step->drv->outputPaths()) - txn.parameterized("insert into FailedPaths values ($1)")(localStore->printStorePath(path)).exec(); - - txn.commit(); - } - - stepFinished = true; - - /* Remove the indirect dependencies from ‘builds’. This - will cause them to be destroyed. */ - for (auto & b : indirect) { - auto builds_(builds.lock()); - b->finishedInDB = true; - builds_->erase(b->id); - dependentIDs.push_back(b->id); - if (buildOne == b->id) quit = true; - } - } - - /* Send notification about this build and its dependents. */ - { - pqxx::work txn(*conn); - notifyBuildFinished(txn, buildId, dependentIDs); - txn.commit(); - } - } + } else + failStep(*conn, step, buildId, result, machine, stepFinished, quit); // FIXME: keep stats about aborted steps? nrStepsDone++; @@ -482,6 +393,107 @@ State::StepResult State::doBuildStep(nix::ref destStore, } +void State::failStep( + Connection & conn, + Step::ptr step, + BuildID buildId, + const RemoteResult & result, + Machine::ptr machine, + bool & stepFinished, + bool & quit) +{ + /* Register failure in the database for all Build objects that + directly or indirectly depend on this step. */ + + std::vector dependentIDs; + + while (true) { + /* Get the builds and steps that depend on this step. */ + std::set indirect; + { + auto steps_(steps.lock()); + std::set steps; + getDependents(step, indirect, steps); + + /* If there are no builds left, delete all referring + steps from ‘steps’. As for the success case, we can + be certain no new referrers can be added. */ + if (indirect.empty()) { + for (auto & s : steps) { + printMsg(lvlDebug, "finishing build step ‘%s’", + localStore->printStorePath(s->drvPath)); + steps_->erase(s->drvPath); + } + } + } + + if (indirect.empty() && stepFinished) break; + + /* Update the database. */ + { + auto mc = startDbUpdate(); + + pqxx::work txn(conn); + + /* Create failed build steps for every build that + depends on this, except when this step is cached + and is the top-level of that build (since then it's + redundant with the build's isCachedBuild field). */ + for (auto & build : indirect) { + if ((result.stepStatus == bsCachedFailure && build->drvPath == step->drvPath) || + ((result.stepStatus != bsCachedFailure && result.stepStatus != bsUnsupported) && buildId == build->id) || + build->finishedInDB) + continue; + createBuildStep(txn, + 0, build->id, step, machine ? machine->sshName : "", + result.stepStatus, result.errorMsg, buildId == build->id ? 0 : buildId); + } + + /* Mark all builds that depend on this derivation as failed. */ + for (auto & build : indirect) { + if (build->finishedInDB) continue; + printMsg(lvlError, format("marking build %1% as failed") % build->id); + txn.parameterized + ("update Builds set finished = 1, buildStatus = $2, startTime = $3, stopTime = $4, isCachedBuild = $5, notificationPendingSince = $4 where id = $1 and finished = 0") + (build->id) + ((int) (build->drvPath != step->drvPath && result.buildStatus() == bsFailed ? bsDepFailed : result.buildStatus())) + (result.startTime) + (result.stopTime) + (result.stepStatus == bsCachedFailure ? 1 : 0).exec(); + nrBuildsDone++; + } + + /* Remember failed paths in the database so that they + won't be built again. */ + if (result.stepStatus != bsCachedFailure && result.canCache) + for (auto & path : step->drv->outputPaths()) + txn.parameterized("insert into FailedPaths values ($1)")(localStore->printStorePath(path)).exec(); + + txn.commit(); + } + + stepFinished = true; + + /* Remove the indirect dependencies from ‘builds’. This + will cause them to be destroyed. */ + for (auto & b : indirect) { + auto builds_(builds.lock()); + b->finishedInDB = true; + builds_->erase(b->id); + dependentIDs.push_back(b->id); + if (buildOne == b->id) quit = true; + } + } + + /* Send notification about this build and its dependents. */ + { + pqxx::work txn(conn); + notifyBuildFinished(txn, buildId, dependentIDs); + txn.commit(); + } +} + + void State::addRoot(const StorePath & storePath) { auto root = rootsDir + "/" + std::string(storePath.to_string()); diff --git a/src/hydra-queue-runner/dispatcher.cc b/src/hydra-queue-runner/dispatcher.cc index 86596ff5..700fdd8e 100644 --- a/src/hydra-queue-runner/dispatcher.cc +++ b/src/hydra-queue-runner/dispatcher.cc @@ -300,6 +300,8 @@ system_time State::doDispatch() } while (keepGoing); + abortUnsupported(); + return sleepUntil; } @@ -314,6 +316,89 @@ void State::wakeDispatcher() } +void State::abortUnsupported() +{ + /* Make a copy of 'runnable' and 'machines' so we don't block them + very long. */ + auto runnable2 = *runnable.lock(); + auto machines2 = *machines.lock(); + + system_time now = std::chrono::system_clock::now(); + auto now2 = time(0); + + std::unordered_set aborted; + + for (auto & wstep : runnable2) { + auto step(wstep.lock()); + if (!step) continue; + + bool supported = false; + for (auto & machine : machines2) { + if (machine.second->supportsStep(step)) { + step->state.lock()->lastSupported = now; + supported = true; + break; + } + } + + if (!supported + && std::chrono::duration_cast(now - step->state.lock()->lastSupported).count() >= maxUnsupportedTime) + { + printError("aborting unsupported build step '%s' (type '%s')", + localStore->printStorePath(step->drvPath), + step->systemType); + + aborted.insert(step); + + auto conn(dbPool.get()); + + std::set dependents; + std::set steps; + getDependents(step, dependents, steps); + + /* Maybe the step got cancelled. */ + if (dependents.empty()) continue; + + /* Find the build that has this step as the top-level (if + any). */ + Build::ptr build; + for (auto build2 : dependents) { + if (build2->drvPath == step->drvPath) + build = build2; + } + if (!build) build = *dependents.begin(); + + bool stepFinished = false; + bool quit = false; + + failStep( + *conn, step, build->id, + RemoteResult { + .stepStatus = bsUnsupported, + .errorMsg = fmt("unsupported system type '%s'", + step->systemType), + .startTime = now2, + .stopTime = now2, + }, + nullptr, stepFinished, quit); + + if (quit) exit(1); + } + } + + /* Clean up 'runnable'. */ + { + auto runnable_(runnable.lock()); + for (auto i = runnable_->begin(); i != runnable_->end(); ) { + if (aborted.count(i->lock())) + i = runnable_->erase(i); + else + ++i; + } + } +} + + void Jobset::addStep(time_t startTime, time_t duration) { auto steps_(steps.lock()); diff --git a/src/hydra-queue-runner/hydra-queue-runner.cc b/src/hydra-queue-runner/hydra-queue-runner.cc index 3d5dad4d..f7ecf268 100644 --- a/src/hydra-queue-runner/hydra-queue-runner.cc +++ b/src/hydra-queue-runner/hydra-queue-runner.cc @@ -46,6 +46,7 @@ std::string getEnvOrDie(const std::string & key) State::State() : config(std::make_unique<::Config>()) + , maxUnsupportedTime(config->getIntOption("max_unsupported_time", 0)) , dbPool(config->getIntOption("max_db_connections", 128)) , memoryTokens(config->getIntOption("nar_buffer_size", getMemSize() / 2)) , maxOutputSize(config->getIntOption("max_output_size", 2ULL << 30)) diff --git a/src/hydra-queue-runner/state.hh b/src/hydra-queue-runner/state.hh index 180907e9..4d87a3a7 100644 --- a/src/hydra-queue-runner/state.hh +++ b/src/hydra-queue-runner/state.hh @@ -68,7 +68,7 @@ struct RemoteResult std::unique_ptr tokens; std::shared_ptr accessor; - BuildStatus buildStatus() + BuildStatus buildStatus() const { return stepStatus == bsCachedFailure ? bsFailed : stepStatus; } @@ -198,6 +198,10 @@ struct Step /* The time at which this step became runnable. */ system_time runnableSince; + + /* The time that we last saw a machine that supports this + step. */ + system_time lastSupported = std::chrono::system_clock::now(); }; std::atomic_bool finished{false}; // debugging @@ -303,6 +307,9 @@ private: const float retryBackoff = 3.0; const unsigned int maxParallelCopyClosure = 4; + /* Time in seconds before unsupported build steps are aborted. */ + const unsigned int maxUnsupportedTime = 0; + nix::Path hydraData, logDir; bool useSubstitutes = false; @@ -483,6 +490,15 @@ private: Build::ptr referringBuild, Step::ptr referringStep, std::set & finishedDrvs, std::set & newSteps, std::set & newRunnable); + void failStep( + Connection & conn, + Step::ptr step, + BuildID buildId, + const RemoteResult & result, + Machine::ptr machine, + bool & stepFinished, + bool & quit); + Jobset::ptr createJobset(pqxx::work & txn, const std::string & projectName, const std::string & jobsetName); @@ -497,6 +513,8 @@ private: void wakeDispatcher(); + void abortUnsupported(); + void builder(MachineReservation::ptr reservation); /* Perform the given build step. Return true if the step is to be From ccd046ca3db27222b4aaf7644f9fdb2e5c7da328 Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Thu, 26 Mar 2020 15:26:12 +0100 Subject: [PATCH 50/52] Keep track of the number of unsupported steps (cherry picked from commit 45ffe578b695f9de101b30d44d46f12aa0654f10) --- src/hydra-queue-runner/dispatcher.cc | 7 +++++++ src/hydra-queue-runner/hydra-queue-runner.cc | 1 + src/hydra-queue-runner/state.hh | 1 + src/script/hydra-send-stats | 1 + 4 files changed, 10 insertions(+) diff --git a/src/hydra-queue-runner/dispatcher.cc b/src/hydra-queue-runner/dispatcher.cc index 700fdd8e..6dc7f700 100644 --- a/src/hydra-queue-runner/dispatcher.cc +++ b/src/hydra-queue-runner/dispatcher.cc @@ -328,6 +328,8 @@ void State::abortUnsupported() std::unordered_set aborted; + size_t count = 0; + for (auto & wstep : runnable2) { auto step(wstep.lock()); if (!step) continue; @@ -341,6 +343,9 @@ void State::abortUnsupported() } } + if (!supported) + count++; + if (!supported && std::chrono::duration_cast(now - step->state.lock()->lastSupported).count() >= maxUnsupportedTime) { @@ -396,6 +401,8 @@ void State::abortUnsupported() ++i; } } + + nrUnsupportedSteps = count; } diff --git a/src/hydra-queue-runner/hydra-queue-runner.cc b/src/hydra-queue-runner/hydra-queue-runner.cc index f7ecf268..8d729bd1 100644 --- a/src/hydra-queue-runner/hydra-queue-runner.cc +++ b/src/hydra-queue-runner/hydra-queue-runner.cc @@ -523,6 +523,7 @@ void State::dumpStatus(Connection & conn, bool log) root.attr("nrStepsCopyingTo", nrStepsCopyingTo); root.attr("nrStepsCopyingFrom", nrStepsCopyingFrom); root.attr("nrStepsWaiting", nrStepsWaiting); + root.attr("nrUnsupportedSteps", nrUnsupportedSteps); root.attr("bytesSent", bytesSent); root.attr("bytesReceived", bytesReceived); root.attr("nrBuildsRead", nrBuildsRead); diff --git a/src/hydra-queue-runner/state.hh b/src/hydra-queue-runner/state.hh index 4d87a3a7..c702e8a3 100644 --- a/src/hydra-queue-runner/state.hh +++ b/src/hydra-queue-runner/state.hh @@ -355,6 +355,7 @@ private: counter nrStepsCopyingTo{0}; counter nrStepsCopyingFrom{0}; counter nrStepsWaiting{0}; + counter nrUnsupportedSteps{0}; counter nrRetries{0}; counter maxNrRetries{0}; counter totalStepTime{0}; // total time for steps, including closure copying diff --git a/src/script/hydra-send-stats b/src/script/hydra-send-stats index 2b8c550b..cf653865 100755 --- a/src/script/hydra-send-stats +++ b/src/script/hydra-send-stats @@ -34,6 +34,7 @@ sub sendQueueRunnerStats { gauge("hydra.queue.steps.unfinished", $json->{nrUnfinishedSteps}); gauge("hydra.queue.steps.finished", $json->{nrStepsDone}); gauge("hydra.queue.steps.retries", $json->{nrRetries}); + gauge("hydra.queue.steps.unsupported", $json->{nrUnsupportedSteps}); gauge("hydra.queue.steps.max_retries", $json->{maxNrRetries}); if ($json->{nrStepsDone}) { gauge("hydra.queue.steps.avg_total_time", $json->{avgStepTime}); From 9727892b61e89694211cf41359c0e07398890c1b Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Thu, 26 Mar 2020 15:30:37 +0100 Subject: [PATCH 51/52] Don't spam the journal with hydra-queue-runner status dumps (cherry picked from commit 15ae932488512ba235ed2f6f841cc5eb56ba9314) --- src/hydra-queue-runner/hydra-queue-runner.cc | 13 ++++--------- src/hydra-queue-runner/state.hh | 5 +---- 2 files changed, 5 insertions(+), 13 deletions(-) diff --git a/src/hydra-queue-runner/hydra-queue-runner.cc b/src/hydra-queue-runner/hydra-queue-runner.cc index 8d729bd1..65d93a04 100644 --- a/src/hydra-queue-runner/hydra-queue-runner.cc +++ b/src/hydra-queue-runner/hydra-queue-runner.cc @@ -491,7 +491,7 @@ std::shared_ptr State::acquireGlobalLock() } -void State::dumpStatus(Connection & conn, bool log) +void State::dumpStatus(Connection & conn) { std::ostringstream out; @@ -672,11 +672,6 @@ void State::dumpStatus(Connection & conn, bool log) } } - if (log && time(0) >= lastStatusLogged + statusLogInterval) { - printMsg(lvlInfo, format("status: %1%") % out.str()); - lastStatusLogged = time(0); - } - { auto mc = startDbUpdate(); pqxx::work txn(conn); @@ -785,7 +780,7 @@ void State::run(BuildID buildOne) { auto conn(dbPool.get()); clearBusy(*conn, 0); - dumpStatus(*conn, false); + dumpStatus(*conn); } std::thread(&State::monitorMachinesFile, this).detach(); @@ -848,8 +843,8 @@ void State::run(BuildID buildOne) auto conn(dbPool.get()); receiver dumpStatus_(*conn, "dump_status"); while (true) { - conn->await_notification(statusLogInterval / 2 + 1, 0); - dumpStatus(*conn, true); + conn->await_notification(); + dumpStatus(*conn); } } catch (std::exception & e) { printMsg(lvlError, format("main thread: %1%") % e.what()); diff --git a/src/hydra-queue-runner/state.hh b/src/hydra-queue-runner/state.hh index c702e8a3..31c7fddf 100644 --- a/src/hydra-queue-runner/state.hh +++ b/src/hydra-queue-runner/state.hh @@ -420,9 +420,6 @@ private: size_t maxOutputSize; size_t maxLogSize; - time_t lastStatusLogged = 0; - const int statusLogInterval = 300; - /* Steps that were busy while we encounted a PostgreSQL error. These need to be cleared at a later time to prevent them from showing up as busy until the queue runner is restarted. */ @@ -546,7 +543,7 @@ private: has it. */ std::shared_ptr acquireGlobalLock(); - void dumpStatus(Connection & conn, bool log); + void dumpStatus(Connection & conn); void addRoot(const nix::StorePath & storePath); From 53e1294825c3889235af2ca6ac9dbc9d6cb74a7a Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Tue, 31 Mar 2020 13:33:11 +0200 Subject: [PATCH 52/52] Fix Mercurial tests In Nixpkgs 20.03, Mercurial fails if PYTHONPATH is set: $ hg Traceback (most recent call last): File "/nix/store/q7s856v6nw4dffdrm9k3w38qs35i8kr3-mercurial-5.2.2/bin/..hg-wrapped-wrapped", line 37, in dispatch.run() File "/nix/store/bffdy7q3wi3qinflnvbdkigqj39zzynd-python3-3.7.6/lib/python3.7/importlib/util.py", line 245, in __getattribute__ self.__spec__.loader.exec_module(self) File "", line 728, in exec_module File "", line 219, in _call_with_frames_removed File "/nix/store/q7s856v6nw4dffdrm9k3w38qs35i8kr3-mercurial-5.2.2/lib/python3.7/site-packages/mercurial/dispatch.py", line 10, in import difflib File "/nix/store/bffdy7q3wi3qinflnvbdkigqj39zzynd-python3-3.7.6/lib/python3.7/difflib.py", line 1084, in import re File "/nix/store/bffdy7q3wi3qinflnvbdkigqj39zzynd-python3-3.7.6/lib/python3.7/re.py", line 143, in class RegexFlag(enum.IntFlag): AttributeError: module 'enum' has no attribute 'IntFlag' (cherry picked from commit 4009d4295e1e3c0e59e716b0b111f48c590a6020) --- tests/Makefile.am | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/Makefile.am b/tests/Makefile.am index e56e528e..d3153a55 100644 --- a/tests/Makefile.am +++ b/tests/Makefile.am @@ -14,6 +14,7 @@ TESTS_ENVIRONMENT = \ NIX_BUILD_HOOK= \ PGHOST=/tmp \ PERL5LIB="$(srcdir):$(abs_top_srcdir)/src/lib:$$PERL5LIB" \ + PYTHONPATH= \ PATH=$(abs_top_srcdir)/src/hydra-evaluator:$(abs_top_srcdir)/src/script:$(abs_top_srcdir)/src/hydra-eval-jobs:$(abs_top_srcdir)/src/hydra-queue-runner:$$PATH \ perl -w
State:[% IF jobset.enabled == 0; "Disabled"; ELSIF jobset.enabled == 1; "Enabled"; ELSIF jobset.enabled == 2; "One-shot"; END %][% IF jobset.enabled == 0; "Disabled"; ELSIF jobset.enabled == 1; "Enabled"; ELSIF jobset.enabled == 2; "One-shot"; ELSIF jobset.enabled == 3; "One-at-a-time"; END %]
Description:[% IF showProject; INCLUDE renderFullJobsetName project=j.get_column('project') jobset=j.name inRow=1; ELSE; INCLUDE renderJobsetName project=j.get_column('project') jobset=j.name inRow=1; END %] [% HTML.escape(j.description) %] [% IF j.lastcheckedtime; - INCLUDE renderDateTime timestamp = j.lastcheckedtime; - IF j.errormsg || j.fetcherrormsg; %] Error[% END; - ELSE; "-"; - END %]