hydra/release.nix

279 lines
8.8 KiB
Nix
Raw Normal View History

{ hydraSrc ? { outPath = ./.; revCount = 1234; rev = "abcdef"; }
2010-03-05 17:52:43 +00:00
, officialRelease ? false
2016-03-22 12:10:37 +00:00
, shell ? false
2010-03-05 17:52:43 +00:00
}:
2015-10-14 11:17:24 +00:00
with import <nixpkgs/lib>;
let
pkgs = import <nixpkgs> {};
2015-10-14 11:17:24 +00:00
genAttrs' = genAttrs [ "x86_64-linux" /* "i686-linux" */ ];
2013-11-06 17:10:52 +00:00
hydraServer = hydraPkg:
{ config, pkgs, ... }:
{ imports = [ ./hydra-module.nix ];
virtualisation.memorySize = 1024;
2015-08-12 13:40:47 +00:00
virtualisation.writableStore = true;
2013-11-06 17:10:52 +00:00
services.hydra-dev.enable = true;
services.hydra-dev.package = hydraPkg;
services.hydra-dev.hydraURL = "http://hydra.example.org";
services.hydra-dev.notificationSender = "admin@hydra.example.org";
2013-11-06 17:10:52 +00:00
services.postgresql.enable = true;
services.postgresql.package = pkgs.postgresql92;
environment.systemPackages = [ pkgs.perlPackages.LWP pkgs.perlPackages.JSON ];
};
version = builtins.readFile ./version + "." + toString hydraSrc.revCount + "." + hydraSrc.rev;
2015-10-14 11:17:24 +00:00
in
rec {
2013-01-23 14:47:42 +00:00
build = genAttrs' (system:
2010-03-05 17:52:43 +00:00
with import <nixpkgs> { inherit system; };
2008-11-28 16:13:06 +00:00
let
2016-10-06 15:26:12 +00:00
aws-sdk-cpp' =
lib.overrideDerivation (aws-sdk-cpp.override {
apis = ["s3"];
customMemoryManagement = false;
}) (attrs: {
src = fetchFromGitHub {
owner = "edolstra";
repo = "aws-sdk-cpp";
rev = "d1e2479f79c24e2a1df8a3f3ef3278a1c6383b1e";
sha256 = "1vhgsxkhpai9a7dk38q4r239l6dsz2jvl8hii24c194lsga3g84h";
};
});
nix = overrideDerivation nixUnstable (attrs: {
src = fetchFromGitHub {
owner = "NixOS";
repo = "nix";
2016-10-26 13:09:16 +00:00
rev = "d2c58ba60572e4248bd52f82fac57d6e0c79773d";
sha256 = "195f23xcndzrzg3n4wk0884qa6k4gm9mq1pkhzd7rn30vkn564dc";
2016-10-06 15:26:12 +00:00
};
buildInputs = attrs.buildInputs ++ [ autoreconfHook bison flex ];
nativeBuildInputs = attrs.nativeBuildInputs ++ [ aws-sdk-cpp' autoconf-archive ];
configureFlags = attrs.configureFlags + " --disable-doc-gen";
preConfigure = "./bootstrap.sh; mkdir -p $doc $man";
});
perlDeps = buildEnv {
name = "hydra-perl-deps";
paths = with perlPackages;
[ ModulePluggable
CatalystActionREST
CatalystAuthenticationStoreDBIxClass
CatalystDevel
CatalystDispatchTypeRegex
CatalystPluginAccessLog
CatalystPluginAuthorizationRoles
CatalystPluginCaptcha
CatalystPluginSessionStateCookie
CatalystPluginSessionStoreFastMmap
CatalystPluginStackTrace
CatalystPluginUnicodeEncoding
CatalystTraitForRequestProxyBase
CatalystViewDownload
CatalystViewJSON
CatalystViewTT
CatalystXScriptServerStarman
CryptRandPasswd
DBDPg
DBDSQLite
DataDump
DateTime
DigestSHA1
EmailMIME
EmailSender
FileSlurp
IOCompress
IPCRun
JSONXS
LWP
LWPProtocolHttps
NetAmazonS3
NetStatsd
PadWalker
Readonly
SQLSplitStatement
SetScalar
Starman
SysHostnameLong
TestMore
TextDiff
TextTable
XMLSimple
2016-10-06 15:26:12 +00:00
nix git boehmgc aws-sdk-cpp'
];
};
in
2010-03-05 17:52:43 +00:00
2010-09-30 14:29:19 +00:00
releaseTools.nixBuild {
2016-03-22 16:26:50 +00:00
name = "hydra-${version}";
2016-03-22 12:10:37 +00:00
src = if shell then null else hydraSrc;
2010-03-05 17:52:43 +00:00
stdenv = overrideCC stdenv gcc6;
2010-03-05 17:52:43 +00:00
buildInputs =
[ makeWrapper autoconf automake libtool unzip nukeReferences pkgconfig sqlite libpqxx
gitAndTools.topGit mercurial darcs subversion bazaar openssl bzip2 libxslt
guile # optional, for Guile + Guix support
perlDeps perl nix
2015-06-25 13:51:44 +00:00
postgresql92 # for running the tests
];
2010-03-05 17:52:43 +00:00
hydraPath = lib.makeBinPath (
[ libxslt sqlite subversion openssh nix coreutils findutils pixz
2015-07-08 14:30:58 +00:00
gzip bzip2 lzma gnutar unzip git gitAndTools.topGit mercurial darcs gnused bazaar
2013-01-23 14:47:42 +00:00
] ++ lib.optionals stdenv.isLinux [ rpm dpkg cdrkit ] );
2010-03-05 17:52:43 +00:00
2016-03-22 12:10:37 +00:00
postUnpack = optionalString (!shell) ''
# Clean up when building from a working tree.
2016-03-22 12:10:37 +00:00
(cd $sourceRoot && (git ls-files -o --directory | xargs -r rm -rfv)) || true
'';
configureFlags = [ "--with-docbook-xsl=${docbook_xsl}/xml/xsl/docbook" ];
2016-03-22 15:59:05 +00:00
preHook = ''
2016-10-21 17:19:28 +00:00
PATH=$(pwd)/src/hydra-evaluator:$(pwd)/src/script:$(pwd)/src/hydra-eval-jobs:$(pwd)/src/hydra-queue-runner:$PATH
2016-03-22 15:59:05 +00:00
PERL5LIB=$(pwd)/src/lib:$PERL5LIB;
'';
preConfigure = "autoreconf -vfi";
enableParallelBuilding = true;
2013-08-16 14:15:09 +00:00
preCheck = ''
patchShebangs .
export LOGNAME=${LOGNAME:-foo}
'';
2011-11-30 17:03:50 +00:00
2010-09-30 14:29:19 +00:00
postInstall = ''
2013-01-23 14:47:42 +00:00
mkdir -p $out/nix-support
2010-03-05 17:52:43 +00:00
for i in $out/bin/*; do
2016-03-22 12:35:09 +00:00
read -n 4 chars < $i
if [[ $chars =~ ELF ]]; then continue; fi
2010-03-05 17:52:43 +00:00
wrapProgram $i \
--prefix PERL5LIB ':' $out/libexec/hydra/lib:$PERL5LIB \
--prefix PATH ':' $out/bin:$hydraPath \
--set HYDRA_RELEASE ${version} \
2010-10-18 10:34:54 +00:00
--set HYDRA_HOME $out/libexec/hydra \
--set NIX_RELEASE ${nix.name or "unknown"}
2010-03-05 17:52:43 +00:00
done
''; # */
2011-08-19 15:28:32 +00:00
2016-03-22 12:35:09 +00:00
dontStrip = true;
2015-10-30 17:12:30 +00:00
2013-01-23 14:47:42 +00:00
meta.description = "Build of Hydra on ${system}";
Add a plugin for backing up builds in s3 In your hydra config, you can add an arbitrary number of <s3config> sections, with the following options: * name (required): Bucket name * jobs (required): A regex to match job names (in project:jobset:job format) that should be backed up to this bucket * compression_type: bzip2 (default), xz, or none * prefix: String to prepend to all hydra-created s3 keys (if this is meant to represent a directory, you should include the trailing slash, e.g. "cache/"). Default "". After each build with an output (i.e. successful or failed-with-output builds), the output path and its closure are uploaded to the bucket as .nar files, with corresponding .narinfos to enable use as a binary cache. This plugin requires that s3 credentials be available. It uses Net::Amazon::S3, which as of this commit the nixpkgs version can retrieve s3 credentials from the AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY environment variables, or from ec2 instance metadata when using an IAM role. This commit also adds a hydra-s3-backup-collect-garbage program, which uses hydra's gc roots directory to determine which paths are live, and then deletes all files except nix-cache-info and any .nar or .narinfo files corresponding to live paths. hydra-s3-backup-collect-garbage respects the prefix configuration option, so it won't delete anything outside of the hierarchy you give it, and it has the same credential requirements as the plugin. Probably a timer unit running the garbage collection periodically should be added to hydra-module.nix Note that two of the added tests fail, due to a bug in the interaction between Net::Amazon::S3 and fake-s3. Those behaviors work against real s3 though, so I'm committing this even with the broken tests. Signed-off-by: Shea Levy <shea@shealevy.com>
2013-09-03 14:53:56 +00:00
passthru.perlDeps = perlDeps;
});
2016-10-10 13:26:18 +00:00
manual = pkgs.runCommand "hydra-manual-${version}"
2016-10-10 13:26:50 +00:00
{ build = build.x86_64-linux;
2016-10-10 13:26:18 +00:00
}
''
mkdir -p $out/share
cp -prvd $build/share/doc $out/share/
mkdir $out/nix-support
echo "doc manual $out/share/doc/hydra" >> $out/nix-support/hydra-build-products
'';
tests.install = genAttrs' (system:
2013-10-11 08:58:25 +00:00
with import <nixpkgs/nixos/lib/testing.nix> { inherit system; };
simpleTest {
2015-06-12 16:02:54 +00:00
machine = hydraServer build.${system};
testScript =
''
2013-11-06 17:10:52 +00:00
$machine->waitForJob("hydra-init");
$machine->waitForJob("hydra-server");
$machine->waitForJob("hydra-evaluator");
$machine->waitForJob("hydra-queue-runner");
$machine->waitForOpenPort("3000");
2013-11-06 17:10:52 +00:00
$machine->succeed("curl --fail http://localhost:3000/");
'';
});
tests.api = genAttrs' (system:
2013-10-11 08:58:25 +00:00
with import <nixpkgs/nixos/lib/testing.nix> { inherit system; };
simpleTest {
2015-06-12 16:02:54 +00:00
machine = hydraServer build.${system};
testScript =
let dbi = "dbi:Pg:dbname=hydra;user=root;"; in
''
2013-11-06 17:10:52 +00:00
$machine->waitForJob("hydra-init");
2013-11-06 17:10:52 +00:00
# Create an admin account and some other state.
$machine->succeed
( "su - hydra -c \"hydra-create-user root --email-address 'alice\@example.org' --password foobar --role admin\""
2013-11-06 17:10:52 +00:00
, "mkdir /run/jobset /tmp/nix"
, "chmod 755 /run/jobset /tmp/nix"
, "cp ${./tests/api-test.nix} /run/jobset/default.nix"
, "chmod 644 /run/jobset/default.nix"
2013-11-06 17:10:52 +00:00
, "chown -R hydra /run/jobset /tmp/nix"
);
2015-08-12 13:40:47 +00:00
$machine->succeed("systemctl stop hydra-evaluator hydra-queue-runner");
$machine->waitForJob("hydra-server");
$machine->waitForOpenPort("3000");
2013-11-06 17:10:52 +00:00
# Run the API tests.
$machine->mustSucceed("su - hydra -c 'perl ${./tests/api-test.pl}' >&2");
'';
});
Add a plugin for backing up builds in s3 In your hydra config, you can add an arbitrary number of <s3config> sections, with the following options: * name (required): Bucket name * jobs (required): A regex to match job names (in project:jobset:job format) that should be backed up to this bucket * compression_type: bzip2 (default), xz, or none * prefix: String to prepend to all hydra-created s3 keys (if this is meant to represent a directory, you should include the trailing slash, e.g. "cache/"). Default "". After each build with an output (i.e. successful or failed-with-output builds), the output path and its closure are uploaded to the bucket as .nar files, with corresponding .narinfos to enable use as a binary cache. This plugin requires that s3 credentials be available. It uses Net::Amazon::S3, which as of this commit the nixpkgs version can retrieve s3 credentials from the AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY environment variables, or from ec2 instance metadata when using an IAM role. This commit also adds a hydra-s3-backup-collect-garbage program, which uses hydra's gc roots directory to determine which paths are live, and then deletes all files except nix-cache-info and any .nar or .narinfo files corresponding to live paths. hydra-s3-backup-collect-garbage respects the prefix configuration option, so it won't delete anything outside of the hierarchy you give it, and it has the same credential requirements as the plugin. Probably a timer unit running the garbage collection periodically should be added to hydra-module.nix Note that two of the added tests fail, due to a bug in the interaction between Net::Amazon::S3 and fake-s3. Those behaviors work against real s3 though, so I'm committing this even with the broken tests. Signed-off-by: Shea Levy <shea@shealevy.com>
2013-09-03 14:53:56 +00:00
/*
Add a plugin for backing up builds in s3 In your hydra config, you can add an arbitrary number of <s3config> sections, with the following options: * name (required): Bucket name * jobs (required): A regex to match job names (in project:jobset:job format) that should be backed up to this bucket * compression_type: bzip2 (default), xz, or none * prefix: String to prepend to all hydra-created s3 keys (if this is meant to represent a directory, you should include the trailing slash, e.g. "cache/"). Default "". After each build with an output (i.e. successful or failed-with-output builds), the output path and its closure are uploaded to the bucket as .nar files, with corresponding .narinfos to enable use as a binary cache. This plugin requires that s3 credentials be available. It uses Net::Amazon::S3, which as of this commit the nixpkgs version can retrieve s3 credentials from the AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY environment variables, or from ec2 instance metadata when using an IAM role. This commit also adds a hydra-s3-backup-collect-garbage program, which uses hydra's gc roots directory to determine which paths are live, and then deletes all files except nix-cache-info and any .nar or .narinfo files corresponding to live paths. hydra-s3-backup-collect-garbage respects the prefix configuration option, so it won't delete anything outside of the hierarchy you give it, and it has the same credential requirements as the plugin. Probably a timer unit running the garbage collection periodically should be added to hydra-module.nix Note that two of the added tests fail, due to a bug in the interaction between Net::Amazon::S3 and fake-s3. Those behaviors work against real s3 though, so I'm committing this even with the broken tests. Signed-off-by: Shea Levy <shea@shealevy.com>
2013-09-03 14:53:56 +00:00
tests.s3backup = genAttrs' (system:
2013-10-11 08:58:25 +00:00
with import <nixpkgs/nixos/lib/testing.nix> { inherit system; };
2015-06-12 16:02:54 +00:00
let hydra = build.${system}
Add a plugin for backing up builds in s3 In your hydra config, you can add an arbitrary number of <s3config> sections, with the following options: * name (required): Bucket name * jobs (required): A regex to match job names (in project:jobset:job format) that should be backed up to this bucket * compression_type: bzip2 (default), xz, or none * prefix: String to prepend to all hydra-created s3 keys (if this is meant to represent a directory, you should include the trailing slash, e.g. "cache/"). Default "". After each build with an output (i.e. successful or failed-with-output builds), the output path and its closure are uploaded to the bucket as .nar files, with corresponding .narinfos to enable use as a binary cache. This plugin requires that s3 credentials be available. It uses Net::Amazon::S3, which as of this commit the nixpkgs version can retrieve s3 credentials from the AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY environment variables, or from ec2 instance metadata when using an IAM role. This commit also adds a hydra-s3-backup-collect-garbage program, which uses hydra's gc roots directory to determine which paths are live, and then deletes all files except nix-cache-info and any .nar or .narinfo files corresponding to live paths. hydra-s3-backup-collect-garbage respects the prefix configuration option, so it won't delete anything outside of the hierarchy you give it, and it has the same credential requirements as the plugin. Probably a timer unit running the garbage collection periodically should be added to hydra-module.nix Note that two of the added tests fail, due to a bug in the interaction between Net::Amazon::S3 and fake-s3. Those behaviors work against real s3 though, so I'm committing this even with the broken tests. Signed-off-by: Shea Levy <shea@shealevy.com>
2013-09-03 14:53:56 +00:00
simpleTest {
machine =
{ config, pkgs, ... }:
{ services.postgresql.enable = true;
services.postgresql.package = pkgs.postgresql92;
environment.systemPackages = [ hydra pkgs.rubyLibs.fakes3 ];
virtualisation.memorySize = 2047;
boot.kernelPackages = pkgs.linuxPackages_3_10;
virtualisation.writableStore = true;
networking.extraHosts = ''
127.0.0.1 hydra.s3.amazonaws.com
'';
};
testScript =
''
$machine->waitForJob("postgresql");
# Initialise the database and the state.
2013-11-06 17:10:52 +00:00
$machine->succeed
Add a plugin for backing up builds in s3 In your hydra config, you can add an arbitrary number of <s3config> sections, with the following options: * name (required): Bucket name * jobs (required): A regex to match job names (in project:jobset:job format) that should be backed up to this bucket * compression_type: bzip2 (default), xz, or none * prefix: String to prepend to all hydra-created s3 keys (if this is meant to represent a directory, you should include the trailing slash, e.g. "cache/"). Default "". After each build with an output (i.e. successful or failed-with-output builds), the output path and its closure are uploaded to the bucket as .nar files, with corresponding .narinfos to enable use as a binary cache. This plugin requires that s3 credentials be available. It uses Net::Amazon::S3, which as of this commit the nixpkgs version can retrieve s3 credentials from the AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY environment variables, or from ec2 instance metadata when using an IAM role. This commit also adds a hydra-s3-backup-collect-garbage program, which uses hydra's gc roots directory to determine which paths are live, and then deletes all files except nix-cache-info and any .nar or .narinfo files corresponding to live paths. hydra-s3-backup-collect-garbage respects the prefix configuration option, so it won't delete anything outside of the hierarchy you give it, and it has the same credential requirements as the plugin. Probably a timer unit running the garbage collection periodically should be added to hydra-module.nix Note that two of the added tests fail, due to a bug in the interaction between Net::Amazon::S3 and fake-s3. Those behaviors work against real s3 though, so I'm committing this even with the broken tests. Signed-off-by: Shea Levy <shea@shealevy.com>
2013-09-03 14:53:56 +00:00
( "createdb -O root hydra"
, "psql hydra -f ${hydra}/libexec/hydra/sql/hydra-postgresql.sql"
, "mkdir /var/lib/hydra"
, "mkdir /tmp/jobs"
, "cp ${./tests/s3-backup-test.pl} /tmp/s3-backup-test.pl"
, "cp ${./tests/api-test.nix} /tmp/jobs/default.nix"
);
# start fakes3
2013-11-06 17:10:52 +00:00
$machine->succeed("fakes3 --root /tmp/s3 --port 80 &>/dev/null &");
Add a plugin for backing up builds in s3 In your hydra config, you can add an arbitrary number of <s3config> sections, with the following options: * name (required): Bucket name * jobs (required): A regex to match job names (in project:jobset:job format) that should be backed up to this bucket * compression_type: bzip2 (default), xz, or none * prefix: String to prepend to all hydra-created s3 keys (if this is meant to represent a directory, you should include the trailing slash, e.g. "cache/"). Default "". After each build with an output (i.e. successful or failed-with-output builds), the output path and its closure are uploaded to the bucket as .nar files, with corresponding .narinfos to enable use as a binary cache. This plugin requires that s3 credentials be available. It uses Net::Amazon::S3, which as of this commit the nixpkgs version can retrieve s3 credentials from the AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY environment variables, or from ec2 instance metadata when using an IAM role. This commit also adds a hydra-s3-backup-collect-garbage program, which uses hydra's gc roots directory to determine which paths are live, and then deletes all files except nix-cache-info and any .nar or .narinfo files corresponding to live paths. hydra-s3-backup-collect-garbage respects the prefix configuration option, so it won't delete anything outside of the hierarchy you give it, and it has the same credential requirements as the plugin. Probably a timer unit running the garbage collection periodically should be added to hydra-module.nix Note that two of the added tests fail, due to a bug in the interaction between Net::Amazon::S3 and fake-s3. Those behaviors work against real s3 though, so I'm committing this even with the broken tests. Signed-off-by: Shea Levy <shea@shealevy.com>
2013-09-03 14:53:56 +00:00
$machine->waitForOpenPort("80");
2013-11-06 17:10:52 +00:00
$machine->succeed("cd /tmp && LOGNAME=root AWS_ACCESS_KEY_ID=foo AWS_SECRET_ACCESS_KEY=bar HYDRA_DBI='dbi:Pg:dbname=hydra;user=root;' HYDRA_CONFIG=${./tests/s3-backup-test.config} perl -I ${hydra}/libexec/hydra/lib -I ${hydra.perlDeps}/lib/perl5/site_perl ./s3-backup-test.pl >&2");
Add a plugin for backing up builds in s3 In your hydra config, you can add an arbitrary number of <s3config> sections, with the following options: * name (required): Bucket name * jobs (required): A regex to match job names (in project:jobset:job format) that should be backed up to this bucket * compression_type: bzip2 (default), xz, or none * prefix: String to prepend to all hydra-created s3 keys (if this is meant to represent a directory, you should include the trailing slash, e.g. "cache/"). Default "". After each build with an output (i.e. successful or failed-with-output builds), the output path and its closure are uploaded to the bucket as .nar files, with corresponding .narinfos to enable use as a binary cache. This plugin requires that s3 credentials be available. It uses Net::Amazon::S3, which as of this commit the nixpkgs version can retrieve s3 credentials from the AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY environment variables, or from ec2 instance metadata when using an IAM role. This commit also adds a hydra-s3-backup-collect-garbage program, which uses hydra's gc roots directory to determine which paths are live, and then deletes all files except nix-cache-info and any .nar or .narinfo files corresponding to live paths. hydra-s3-backup-collect-garbage respects the prefix configuration option, so it won't delete anything outside of the hierarchy you give it, and it has the same credential requirements as the plugin. Probably a timer unit running the garbage collection periodically should be added to hydra-module.nix Note that two of the added tests fail, due to a bug in the interaction between Net::Amazon::S3 and fake-s3. Those behaviors work against real s3 though, so I'm committing this even with the broken tests. Signed-off-by: Shea Levy <shea@shealevy.com>
2013-09-03 14:53:56 +00:00
'';
});
*/
2010-03-05 17:52:43 +00:00
}