hydra/release.nix

160 lines
5 KiB
Nix
Raw Normal View History

{ hydraSrc ? { outPath = ./.; revCount = 1234; rev = "abcdef"; }
2010-03-05 17:52:43 +00:00
, officialRelease ? false
2016-03-22 12:10:37 +00:00
, shell ? false
2010-03-05 17:52:43 +00:00
}:
2015-10-14 11:17:24 +00:00
with import <nixpkgs/lib>;
let
pkgs = import <nixpkgs> {};
2015-10-14 11:17:24 +00:00
genAttrs' = genAttrs [ "x86_64-linux" /* "i686-linux" */ ];
2013-11-06 17:10:52 +00:00
hydraServer = hydraPkg:
{ config, pkgs, ... }:
{ imports = [ ./hydra-module.nix ];
virtualisation.memorySize = 1024;
2015-08-12 13:40:47 +00:00
virtualisation.writableStore = true;
2013-11-06 17:10:52 +00:00
services.hydra-dev.enable = true;
services.hydra-dev.package = hydraPkg;
services.hydra-dev.hydraURL = "http://hydra.example.org";
services.hydra-dev.notificationSender = "admin@hydra.example.org";
2013-11-06 17:10:52 +00:00
services.postgresql.enable = true;
services.postgresql.package = pkgs.postgresql92;
environment.systemPackages = [ pkgs.perlPackages.LWP pkgs.perlPackages.JSON ];
};
2015-10-14 11:17:24 +00:00
in
rec {
2013-01-23 14:47:42 +00:00
build = genAttrs' (system:
2010-03-05 17:52:43 +00:00
with import <nixpkgs> { inherit system; };
2008-11-28 16:13:06 +00:00
let
2016-10-06 15:26:12 +00:00
aws-sdk-cpp' =
2017-02-03 13:39:18 +00:00
aws-sdk-cpp.override {
2016-10-06 15:26:12 +00:00
apis = ["s3"];
customMemoryManagement = false;
2017-02-03 13:39:18 +00:00
};
2016-10-06 15:26:12 +00:00
stdenv6 = overrideCC stdenv gcc6;
in
2010-03-05 17:52:43 +00:00
callPackage ./. {
stdenv = stdenv6 // { mkDerivation = args: releaseTools.nixBuild (args // {
stdenv = stdenv6;
postUnpack = optionalString (!shell) ''
# Clean up when building from a working tree.
(cd $sourceRoot && (git ls-files -o --directory | xargs -r rm -rfv)) || true
'';
}); };
inherit (gitAndTools) topGit;
nix = nixUnstable;
aws-sdk-cpp = aws-sdk-cpp';
hydraSrc = if shell then null else hydraSrc;
});
manual = pkgs.runCommand "hydra-manual-${build.x86_64-linux.version}"
2016-10-10 13:26:50 +00:00
{ build = build.x86_64-linux;
2016-10-10 13:26:18 +00:00
}
''
mkdir -p $out/share
cp -prvd $build/share/doc $out/share/
mkdir $out/nix-support
echo "doc manual $out/share/doc/hydra" >> $out/nix-support/hydra-build-products
'';
tests.install = genAttrs' (system:
2013-10-11 08:58:25 +00:00
with import <nixpkgs/nixos/lib/testing.nix> { inherit system; };
simpleTest {
2015-06-12 16:02:54 +00:00
machine = hydraServer build.${system};
testScript =
''
2013-11-06 17:10:52 +00:00
$machine->waitForJob("hydra-init");
$machine->waitForJob("hydra-server");
$machine->waitForJob("hydra-evaluator");
$machine->waitForJob("hydra-queue-runner");
$machine->waitForOpenPort("3000");
2013-11-06 17:10:52 +00:00
$machine->succeed("curl --fail http://localhost:3000/");
'';
});
tests.api = genAttrs' (system:
2013-10-11 08:58:25 +00:00
with import <nixpkgs/nixos/lib/testing.nix> { inherit system; };
simpleTest {
2015-06-12 16:02:54 +00:00
machine = hydraServer build.${system};
testScript =
let dbi = "dbi:Pg:dbname=hydra;user=root;"; in
''
2013-11-06 17:10:52 +00:00
$machine->waitForJob("hydra-init");
2013-11-06 17:10:52 +00:00
# Create an admin account and some other state.
$machine->succeed
( "su - hydra -c \"hydra-create-user root --email-address 'alice\@example.org' --password foobar --role admin\""
2013-11-06 17:10:52 +00:00
, "mkdir /run/jobset /tmp/nix"
, "chmod 755 /run/jobset /tmp/nix"
, "cp ${./tests/api-test.nix} /run/jobset/default.nix"
, "chmod 644 /run/jobset/default.nix"
2013-11-06 17:10:52 +00:00
, "chown -R hydra /run/jobset /tmp/nix"
);
2015-08-12 13:40:47 +00:00
$machine->succeed("systemctl stop hydra-evaluator hydra-queue-runner");
$machine->waitForJob("hydra-server");
$machine->waitForOpenPort("3000");
2013-11-06 17:10:52 +00:00
# Run the API tests.
$machine->mustSucceed("su - hydra -c 'perl ${./tests/api-test.pl}' >&2");
'';
});
Add a plugin for backing up builds in s3 In your hydra config, you can add an arbitrary number of <s3config> sections, with the following options: * name (required): Bucket name * jobs (required): A regex to match job names (in project:jobset:job format) that should be backed up to this bucket * compression_type: bzip2 (default), xz, or none * prefix: String to prepend to all hydra-created s3 keys (if this is meant to represent a directory, you should include the trailing slash, e.g. "cache/"). Default "". After each build with an output (i.e. successful or failed-with-output builds), the output path and its closure are uploaded to the bucket as .nar files, with corresponding .narinfos to enable use as a binary cache. This plugin requires that s3 credentials be available. It uses Net::Amazon::S3, which as of this commit the nixpkgs version can retrieve s3 credentials from the AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY environment variables, or from ec2 instance metadata when using an IAM role. This commit also adds a hydra-s3-backup-collect-garbage program, which uses hydra's gc roots directory to determine which paths are live, and then deletes all files except nix-cache-info and any .nar or .narinfo files corresponding to live paths. hydra-s3-backup-collect-garbage respects the prefix configuration option, so it won't delete anything outside of the hierarchy you give it, and it has the same credential requirements as the plugin. Probably a timer unit running the garbage collection periodically should be added to hydra-module.nix Note that two of the added tests fail, due to a bug in the interaction between Net::Amazon::S3 and fake-s3. Those behaviors work against real s3 though, so I'm committing this even with the broken tests. Signed-off-by: Shea Levy <shea@shealevy.com>
2013-09-03 14:53:56 +00:00
/*
Add a plugin for backing up builds in s3 In your hydra config, you can add an arbitrary number of <s3config> sections, with the following options: * name (required): Bucket name * jobs (required): A regex to match job names (in project:jobset:job format) that should be backed up to this bucket * compression_type: bzip2 (default), xz, or none * prefix: String to prepend to all hydra-created s3 keys (if this is meant to represent a directory, you should include the trailing slash, e.g. "cache/"). Default "". After each build with an output (i.e. successful or failed-with-output builds), the output path and its closure are uploaded to the bucket as .nar files, with corresponding .narinfos to enable use as a binary cache. This plugin requires that s3 credentials be available. It uses Net::Amazon::S3, which as of this commit the nixpkgs version can retrieve s3 credentials from the AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY environment variables, or from ec2 instance metadata when using an IAM role. This commit also adds a hydra-s3-backup-collect-garbage program, which uses hydra's gc roots directory to determine which paths are live, and then deletes all files except nix-cache-info and any .nar or .narinfo files corresponding to live paths. hydra-s3-backup-collect-garbage respects the prefix configuration option, so it won't delete anything outside of the hierarchy you give it, and it has the same credential requirements as the plugin. Probably a timer unit running the garbage collection periodically should be added to hydra-module.nix Note that two of the added tests fail, due to a bug in the interaction between Net::Amazon::S3 and fake-s3. Those behaviors work against real s3 though, so I'm committing this even with the broken tests. Signed-off-by: Shea Levy <shea@shealevy.com>
2013-09-03 14:53:56 +00:00
tests.s3backup = genAttrs' (system:
2013-10-11 08:58:25 +00:00
with import <nixpkgs/nixos/lib/testing.nix> { inherit system; };
2015-06-12 16:02:54 +00:00
let hydra = build.${system}
Add a plugin for backing up builds in s3 In your hydra config, you can add an arbitrary number of <s3config> sections, with the following options: * name (required): Bucket name * jobs (required): A regex to match job names (in project:jobset:job format) that should be backed up to this bucket * compression_type: bzip2 (default), xz, or none * prefix: String to prepend to all hydra-created s3 keys (if this is meant to represent a directory, you should include the trailing slash, e.g. "cache/"). Default "". After each build with an output (i.e. successful or failed-with-output builds), the output path and its closure are uploaded to the bucket as .nar files, with corresponding .narinfos to enable use as a binary cache. This plugin requires that s3 credentials be available. It uses Net::Amazon::S3, which as of this commit the nixpkgs version can retrieve s3 credentials from the AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY environment variables, or from ec2 instance metadata when using an IAM role. This commit also adds a hydra-s3-backup-collect-garbage program, which uses hydra's gc roots directory to determine which paths are live, and then deletes all files except nix-cache-info and any .nar or .narinfo files corresponding to live paths. hydra-s3-backup-collect-garbage respects the prefix configuration option, so it won't delete anything outside of the hierarchy you give it, and it has the same credential requirements as the plugin. Probably a timer unit running the garbage collection periodically should be added to hydra-module.nix Note that two of the added tests fail, due to a bug in the interaction between Net::Amazon::S3 and fake-s3. Those behaviors work against real s3 though, so I'm committing this even with the broken tests. Signed-off-by: Shea Levy <shea@shealevy.com>
2013-09-03 14:53:56 +00:00
simpleTest {
machine =
{ config, pkgs, ... }:
{ services.postgresql.enable = true;
services.postgresql.package = pkgs.postgresql92;
environment.systemPackages = [ hydra pkgs.rubyLibs.fakes3 ];
virtualisation.memorySize = 2047;
boot.kernelPackages = pkgs.linuxPackages_3_10;
virtualisation.writableStore = true;
networking.extraHosts = ''
127.0.0.1 hydra.s3.amazonaws.com
'';
};
testScript =
''
$machine->waitForJob("postgresql");
# Initialise the database and the state.
2013-11-06 17:10:52 +00:00
$machine->succeed
Add a plugin for backing up builds in s3 In your hydra config, you can add an arbitrary number of <s3config> sections, with the following options: * name (required): Bucket name * jobs (required): A regex to match job names (in project:jobset:job format) that should be backed up to this bucket * compression_type: bzip2 (default), xz, or none * prefix: String to prepend to all hydra-created s3 keys (if this is meant to represent a directory, you should include the trailing slash, e.g. "cache/"). Default "". After each build with an output (i.e. successful or failed-with-output builds), the output path and its closure are uploaded to the bucket as .nar files, with corresponding .narinfos to enable use as a binary cache. This plugin requires that s3 credentials be available. It uses Net::Amazon::S3, which as of this commit the nixpkgs version can retrieve s3 credentials from the AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY environment variables, or from ec2 instance metadata when using an IAM role. This commit also adds a hydra-s3-backup-collect-garbage program, which uses hydra's gc roots directory to determine which paths are live, and then deletes all files except nix-cache-info and any .nar or .narinfo files corresponding to live paths. hydra-s3-backup-collect-garbage respects the prefix configuration option, so it won't delete anything outside of the hierarchy you give it, and it has the same credential requirements as the plugin. Probably a timer unit running the garbage collection periodically should be added to hydra-module.nix Note that two of the added tests fail, due to a bug in the interaction between Net::Amazon::S3 and fake-s3. Those behaviors work against real s3 though, so I'm committing this even with the broken tests. Signed-off-by: Shea Levy <shea@shealevy.com>
2013-09-03 14:53:56 +00:00
( "createdb -O root hydra"
, "psql hydra -f ${hydra}/libexec/hydra/sql/hydra-postgresql.sql"
, "mkdir /var/lib/hydra"
, "mkdir /tmp/jobs"
, "cp ${./tests/s3-backup-test.pl} /tmp/s3-backup-test.pl"
, "cp ${./tests/api-test.nix} /tmp/jobs/default.nix"
);
# start fakes3
2013-11-06 17:10:52 +00:00
$machine->succeed("fakes3 --root /tmp/s3 --port 80 &>/dev/null &");
Add a plugin for backing up builds in s3 In your hydra config, you can add an arbitrary number of <s3config> sections, with the following options: * name (required): Bucket name * jobs (required): A regex to match job names (in project:jobset:job format) that should be backed up to this bucket * compression_type: bzip2 (default), xz, or none * prefix: String to prepend to all hydra-created s3 keys (if this is meant to represent a directory, you should include the trailing slash, e.g. "cache/"). Default "". After each build with an output (i.e. successful or failed-with-output builds), the output path and its closure are uploaded to the bucket as .nar files, with corresponding .narinfos to enable use as a binary cache. This plugin requires that s3 credentials be available. It uses Net::Amazon::S3, which as of this commit the nixpkgs version can retrieve s3 credentials from the AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY environment variables, or from ec2 instance metadata when using an IAM role. This commit also adds a hydra-s3-backup-collect-garbage program, which uses hydra's gc roots directory to determine which paths are live, and then deletes all files except nix-cache-info and any .nar or .narinfo files corresponding to live paths. hydra-s3-backup-collect-garbage respects the prefix configuration option, so it won't delete anything outside of the hierarchy you give it, and it has the same credential requirements as the plugin. Probably a timer unit running the garbage collection periodically should be added to hydra-module.nix Note that two of the added tests fail, due to a bug in the interaction between Net::Amazon::S3 and fake-s3. Those behaviors work against real s3 though, so I'm committing this even with the broken tests. Signed-off-by: Shea Levy <shea@shealevy.com>
2013-09-03 14:53:56 +00:00
$machine->waitForOpenPort("80");
2013-11-06 17:10:52 +00:00
$machine->succeed("cd /tmp && LOGNAME=root AWS_ACCESS_KEY_ID=foo AWS_SECRET_ACCESS_KEY=bar HYDRA_DBI='dbi:Pg:dbname=hydra;user=root;' HYDRA_CONFIG=${./tests/s3-backup-test.config} perl -I ${hydra}/libexec/hydra/lib -I ${hydra.perlDeps}/lib/perl5/site_perl ./s3-backup-test.pl >&2");
Add a plugin for backing up builds in s3 In your hydra config, you can add an arbitrary number of <s3config> sections, with the following options: * name (required): Bucket name * jobs (required): A regex to match job names (in project:jobset:job format) that should be backed up to this bucket * compression_type: bzip2 (default), xz, or none * prefix: String to prepend to all hydra-created s3 keys (if this is meant to represent a directory, you should include the trailing slash, e.g. "cache/"). Default "". After each build with an output (i.e. successful or failed-with-output builds), the output path and its closure are uploaded to the bucket as .nar files, with corresponding .narinfos to enable use as a binary cache. This plugin requires that s3 credentials be available. It uses Net::Amazon::S3, which as of this commit the nixpkgs version can retrieve s3 credentials from the AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY environment variables, or from ec2 instance metadata when using an IAM role. This commit also adds a hydra-s3-backup-collect-garbage program, which uses hydra's gc roots directory to determine which paths are live, and then deletes all files except nix-cache-info and any .nar or .narinfo files corresponding to live paths. hydra-s3-backup-collect-garbage respects the prefix configuration option, so it won't delete anything outside of the hierarchy you give it, and it has the same credential requirements as the plugin. Probably a timer unit running the garbage collection periodically should be added to hydra-module.nix Note that two of the added tests fail, due to a bug in the interaction between Net::Amazon::S3 and fake-s3. Those behaviors work against real s3 though, so I'm committing this even with the broken tests. Signed-off-by: Shea Levy <shea@shealevy.com>
2013-09-03 14:53:56 +00:00
'';
});
*/
2010-03-05 17:52:43 +00:00
}