2018-07-31 19:19:10 +00:00
|
|
|
{ hydraSrc ? builtins.fetchGit ./.
|
2020-02-20 10:19:45 +00:00
|
|
|
, nixpkgs ? builtins.fetchTarball https://github.com/NixOS/nixpkgs/archive/release-19.09.tar.gz
|
2010-03-05 17:52:43 +00:00
|
|
|
, officialRelease ? false
|
2016-03-22 12:10:37 +00:00
|
|
|
, shell ? false
|
2010-03-05 17:52:43 +00:00
|
|
|
}:
|
|
|
|
|
2018-05-22 10:21:23 +00:00
|
|
|
with import (nixpkgs + "/lib");
|
2015-10-14 11:17:24 +00:00
|
|
|
|
2013-07-01 17:37:14 +00:00
|
|
|
let
|
|
|
|
|
2018-05-22 10:21:23 +00:00
|
|
|
pkgs = import nixpkgs {};
|
2013-07-01 17:37:14 +00:00
|
|
|
|
2015-10-14 11:17:24 +00:00
|
|
|
genAttrs' = genAttrs [ "x86_64-linux" /* "i686-linux" */ ];
|
2013-07-01 17:37:14 +00:00
|
|
|
|
2013-11-06 17:10:52 +00:00
|
|
|
hydraServer = hydraPkg:
|
|
|
|
{ config, pkgs, ... }:
|
|
|
|
{ imports = [ ./hydra-module.nix ];
|
|
|
|
|
|
|
|
virtualisation.memorySize = 1024;
|
2015-08-12 13:40:47 +00:00
|
|
|
virtualisation.writableStore = true;
|
2013-11-06 17:10:52 +00:00
|
|
|
|
2016-10-10 09:29:15 +00:00
|
|
|
services.hydra-dev.enable = true;
|
|
|
|
services.hydra-dev.package = hydraPkg;
|
|
|
|
services.hydra-dev.hydraURL = "http://hydra.example.org";
|
|
|
|
services.hydra-dev.notificationSender = "admin@hydra.example.org";
|
2013-11-06 17:10:52 +00:00
|
|
|
|
|
|
|
services.postgresql.enable = true;
|
2017-09-22 13:18:22 +00:00
|
|
|
services.postgresql.package = pkgs.postgresql95;
|
2013-11-06 17:10:52 +00:00
|
|
|
|
|
|
|
environment.systemPackages = [ pkgs.perlPackages.LWP pkgs.perlPackages.JSON ];
|
Added the InfluxDBNotification plugin including a NixOS test
This adds a `InfluxDBNotification` plugin which is configured as:
```
<influxdb>
url = http://127.0.0.1:8086
db = hydra
</influxdb>
```
which will write a notification for every finished job to the
configured database in InfluxDB looking like:
```
hydra_build_status,cached=false,job=job,jobset=default,project=sample,repo=default,result=success,status=success,system=x86_64-linux build_id="1",build_status=0i,closure_size=584i,duration=0i,main_build_id="1",queued=0i,size=168i 1564156212
```
2019-07-25 07:43:40 +00:00
|
|
|
|
|
|
|
# The following is to work around the following error from hydra-server:
|
|
|
|
# [error] Caught exception in engine "Cannot determine local time zone"
|
|
|
|
time.timeZone = "UTC";
|
|
|
|
|
|
|
|
nix = {
|
|
|
|
# The following is to work around: https://github.com/NixOS/hydra/pull/432
|
|
|
|
buildMachines = [
|
|
|
|
{ hostName = "localhost";
|
|
|
|
system = "x86_64-linux";
|
|
|
|
}
|
|
|
|
];
|
|
|
|
# Without this nix tries to fetch packages from the default
|
|
|
|
# cache.nixos.org which is not reachable from this sandboxed NixOS test.
|
|
|
|
binaryCaches = [];
|
|
|
|
};
|
2013-11-06 17:10:52 +00:00
|
|
|
};
|
|
|
|
|
2017-04-12 11:07:59 +00:00
|
|
|
version = builtins.readFile ./version + "." + toString hydraSrc.revCount + "." + hydraSrc.rev;
|
|
|
|
|
2015-10-14 11:17:24 +00:00
|
|
|
in
|
|
|
|
|
|
|
|
rec {
|
2013-01-23 14:47:42 +00:00
|
|
|
|
2013-07-01 17:37:14 +00:00
|
|
|
build = genAttrs' (system:
|
2019-03-15 10:22:10 +00:00
|
|
|
let pkgs = import nixpkgs { inherit system; }; in
|
2010-03-05 17:52:43 +00:00
|
|
|
|
2019-03-15 10:22:10 +00:00
|
|
|
with pkgs;
|
2008-11-28 16:13:06 +00:00
|
|
|
|
2013-05-24 19:26:47 +00:00
|
|
|
let
|
|
|
|
|
2018-07-31 19:19:10 +00:00
|
|
|
nix = pkgs.nixUnstable or pkgs.nix;
|
2017-04-12 11:07:59 +00:00
|
|
|
|
|
|
|
perlDeps = buildEnv {
|
|
|
|
name = "hydra-perl-deps";
|
|
|
|
paths = with perlPackages;
|
|
|
|
[ ModulePluggable
|
|
|
|
CatalystActionREST
|
|
|
|
CatalystAuthenticationStoreDBIxClass
|
|
|
|
CatalystDevel
|
|
|
|
CatalystDispatchTypeRegex
|
|
|
|
CatalystPluginAccessLog
|
|
|
|
CatalystPluginAuthorizationRoles
|
|
|
|
CatalystPluginCaptcha
|
|
|
|
CatalystPluginSessionStateCookie
|
|
|
|
CatalystPluginSessionStoreFastMmap
|
|
|
|
CatalystPluginStackTrace
|
|
|
|
CatalystPluginUnicodeEncoding
|
|
|
|
CatalystTraitForRequestProxyBase
|
|
|
|
CatalystViewDownload
|
|
|
|
CatalystViewJSON
|
|
|
|
CatalystViewTT
|
|
|
|
CatalystXScriptServerStarman
|
2018-09-10 16:58:32 +00:00
|
|
|
CatalystXRoleApplicator
|
2017-04-12 11:07:59 +00:00
|
|
|
CryptRandPasswd
|
|
|
|
DBDPg
|
|
|
|
DBDSQLite
|
|
|
|
DataDump
|
|
|
|
DateTime
|
|
|
|
DigestSHA1
|
|
|
|
EmailMIME
|
|
|
|
EmailSender
|
|
|
|
FileSlurp
|
|
|
|
IOCompress
|
|
|
|
IPCRun
|
2018-09-10 12:50:20 +00:00
|
|
|
JSON
|
2018-09-10 16:58:32 +00:00
|
|
|
JSONAny
|
2017-04-12 11:07:59 +00:00
|
|
|
JSONXS
|
|
|
|
LWP
|
|
|
|
LWPProtocolHttps
|
|
|
|
NetAmazonS3
|
2019-09-24 20:34:16 +00:00
|
|
|
NetPrometheus
|
2017-04-12 11:07:59 +00:00
|
|
|
NetStatsd
|
|
|
|
PadWalker
|
|
|
|
Readonly
|
|
|
|
SQLSplitStatement
|
|
|
|
SetScalar
|
|
|
|
Starman
|
|
|
|
SysHostnameLong
|
2019-08-30 09:54:56 +00:00
|
|
|
TermSizeAny
|
2017-04-12 11:07:59 +00:00
|
|
|
TestMore
|
|
|
|
TextDiff
|
|
|
|
TextTable
|
|
|
|
XMLSimple
|
|
|
|
nix
|
|
|
|
nix.perl-bindings
|
|
|
|
git
|
|
|
|
boehmgc
|
|
|
|
];
|
|
|
|
};
|
|
|
|
|
2013-05-24 19:26:47 +00:00
|
|
|
in
|
2010-03-05 17:52:43 +00:00
|
|
|
|
2017-04-12 11:07:59 +00:00
|
|
|
releaseTools.nixBuild {
|
|
|
|
name = "hydra-${version}";
|
|
|
|
|
|
|
|
src = if shell then null else hydraSrc;
|
|
|
|
|
|
|
|
buildInputs =
|
|
|
|
[ makeWrapper autoconf automake libtool unzip nukeReferences pkgconfig sqlite libpqxx
|
|
|
|
gitAndTools.topGit mercurial darcs subversion bazaar openssl bzip2 libxslt
|
|
|
|
perlDeps perl nix
|
2017-09-22 13:18:22 +00:00
|
|
|
postgresql95 # for running the tests
|
2018-05-22 11:24:43 +00:00
|
|
|
boost
|
2020-02-20 09:26:18 +00:00
|
|
|
(nlohmann_json.override { multipleHeaders = true; })
|
2017-04-12 11:07:59 +00:00
|
|
|
];
|
|
|
|
|
|
|
|
hydraPath = lib.makeBinPath (
|
|
|
|
[ sqlite subversion openssh nix coreutils findutils pixz
|
|
|
|
gzip bzip2 lzma gnutar unzip git gitAndTools.topGit mercurial darcs gnused bazaar
|
|
|
|
] ++ lib.optionals stdenv.isLinux [ rpm dpkg cdrkit ] );
|
|
|
|
|
|
|
|
postUnpack = optionalString (!shell) ''
|
|
|
|
# Clean up when building from a working tree.
|
|
|
|
(cd $sourceRoot && (git ls-files -o --directory | xargs -r rm -rfv)) || true
|
|
|
|
'';
|
|
|
|
|
|
|
|
configureFlags = [ "--with-docbook-xsl=${docbook_xsl}/xml/xsl/docbook" ];
|
|
|
|
|
2017-05-04 13:25:22 +00:00
|
|
|
shellHook = ''
|
2017-04-12 11:07:59 +00:00
|
|
|
PATH=$(pwd)/src/hydra-evaluator:$(pwd)/src/script:$(pwd)/src/hydra-eval-jobs:$(pwd)/src/hydra-queue-runner:$PATH
|
2017-08-10 12:13:00 +00:00
|
|
|
${lib.optionalString shell "PERL5LIB=$(pwd)/src/lib:$PERL5LIB"}
|
2017-04-12 11:07:59 +00:00
|
|
|
'';
|
|
|
|
|
|
|
|
preConfigure = "autoreconf -vfi";
|
|
|
|
|
2020-02-20 10:19:45 +00:00
|
|
|
NIX_LDFLAGS = [ "-lpthread" ];
|
2019-04-26 13:13:08 +00:00
|
|
|
|
2017-04-12 11:07:59 +00:00
|
|
|
enableParallelBuilding = true;
|
|
|
|
|
|
|
|
preCheck = ''
|
|
|
|
patchShebangs .
|
2017-11-14 14:11:14 +00:00
|
|
|
export LOGNAME=''${LOGNAME:-foo}
|
2017-04-12 11:07:59 +00:00
|
|
|
'';
|
|
|
|
|
|
|
|
postInstall = ''
|
|
|
|
mkdir -p $out/nix-support
|
|
|
|
|
|
|
|
for i in $out/bin/*; do
|
|
|
|
read -n 4 chars < $i
|
|
|
|
if [[ $chars =~ ELF ]]; then continue; fi
|
|
|
|
wrapProgram $i \
|
|
|
|
--prefix PERL5LIB ':' $out/libexec/hydra/lib:$PERL5LIB \
|
|
|
|
--prefix PATH ':' $out/bin:$hydraPath \
|
|
|
|
--set HYDRA_RELEASE ${version} \
|
|
|
|
--set HYDRA_HOME $out/libexec/hydra \
|
|
|
|
--set NIX_RELEASE ${nix.name or "unknown"}
|
|
|
|
done
|
|
|
|
''; # */
|
|
|
|
|
|
|
|
dontStrip = true;
|
|
|
|
|
|
|
|
meta.description = "Build of Hydra on ${system}";
|
|
|
|
passthru.perlDeps = perlDeps;
|
2013-07-01 17:37:14 +00:00
|
|
|
});
|
2010-03-10 15:48:45 +00:00
|
|
|
|
2017-04-12 11:07:59 +00:00
|
|
|
manual = pkgs.runCommand "hydra-manual-${version}"
|
2016-10-10 13:26:50 +00:00
|
|
|
{ build = build.x86_64-linux;
|
2016-10-10 13:26:18 +00:00
|
|
|
}
|
|
|
|
''
|
|
|
|
mkdir -p $out/share
|
|
|
|
cp -prvd $build/share/doc $out/share/
|
|
|
|
|
|
|
|
mkdir $out/nix-support
|
|
|
|
echo "doc manual $out/share/doc/hydra" >> $out/nix-support/hydra-build-products
|
|
|
|
'';
|
2010-03-10 15:48:45 +00:00
|
|
|
|
2013-07-01 17:37:14 +00:00
|
|
|
tests.install = genAttrs' (system:
|
2018-05-22 10:21:23 +00:00
|
|
|
with import (nixpkgs + "/nixos/lib/testing.nix") { inherit system; };
|
2013-07-01 17:37:14 +00:00
|
|
|
simpleTest {
|
2015-06-12 16:02:54 +00:00
|
|
|
machine = hydraServer build.${system};
|
2013-07-01 17:37:14 +00:00
|
|
|
testScript =
|
|
|
|
''
|
2013-11-06 17:10:52 +00:00
|
|
|
$machine->waitForJob("hydra-init");
|
|
|
|
$machine->waitForJob("hydra-server");
|
|
|
|
$machine->waitForJob("hydra-evaluator");
|
|
|
|
$machine->waitForJob("hydra-queue-runner");
|
2013-07-01 17:37:14 +00:00
|
|
|
$machine->waitForOpenPort("3000");
|
2013-11-06 17:10:52 +00:00
|
|
|
$machine->succeed("curl --fail http://localhost:3000/");
|
2013-07-01 17:37:14 +00:00
|
|
|
'';
|
|
|
|
});
|
2010-03-10 15:48:45 +00:00
|
|
|
|
2013-06-17 16:34:21 +00:00
|
|
|
tests.api = genAttrs' (system:
|
2018-05-22 10:21:23 +00:00
|
|
|
with import (nixpkgs + "/nixos/lib/testing.nix") { inherit system; };
|
2013-06-17 16:34:21 +00:00
|
|
|
simpleTest {
|
2015-06-12 16:02:54 +00:00
|
|
|
machine = hydraServer build.${system};
|
2013-06-17 16:34:21 +00:00
|
|
|
testScript =
|
2013-11-06 14:37:06 +00:00
|
|
|
let dbi = "dbi:Pg:dbname=hydra;user=root;"; in
|
2013-06-17 16:34:21 +00:00
|
|
|
''
|
2013-11-06 17:10:52 +00:00
|
|
|
$machine->waitForJob("hydra-init");
|
2013-06-17 16:34:21 +00:00
|
|
|
|
2013-11-06 17:10:52 +00:00
|
|
|
# Create an admin account and some other state.
|
|
|
|
$machine->succeed
|
2015-06-15 09:47:56 +00:00
|
|
|
( "su - hydra -c \"hydra-create-user root --email-address 'alice\@example.org' --password foobar --role admin\""
|
2013-11-06 17:10:52 +00:00
|
|
|
, "mkdir /run/jobset /tmp/nix"
|
|
|
|
, "chmod 755 /run/jobset /tmp/nix"
|
2013-06-17 16:34:21 +00:00
|
|
|
, "cp ${./tests/api-test.nix} /run/jobset/default.nix"
|
|
|
|
, "chmod 644 /run/jobset/default.nix"
|
2013-11-06 17:10:52 +00:00
|
|
|
, "chown -R hydra /run/jobset /tmp/nix"
|
2013-06-17 16:34:21 +00:00
|
|
|
);
|
|
|
|
|
2015-08-12 13:40:47 +00:00
|
|
|
$machine->succeed("systemctl stop hydra-evaluator hydra-queue-runner");
|
|
|
|
$machine->waitForJob("hydra-server");
|
2013-06-17 16:34:21 +00:00
|
|
|
$machine->waitForOpenPort("3000");
|
|
|
|
|
2013-11-06 17:10:52 +00:00
|
|
|
# Run the API tests.
|
2019-06-21 14:57:12 +00:00
|
|
|
$machine->mustSucceed("su - hydra -c 'perl -I ${build.${system}.perlDeps}/lib/perl5/site_perl ${./tests/api-test.pl}' >&2");
|
2013-06-17 16:34:21 +00:00
|
|
|
'';
|
|
|
|
});
|
Add a plugin for backing up builds in s3
In your hydra config, you can add an arbitrary number of <s3config>
sections, with the following options:
* name (required): Bucket name
* jobs (required): A regex to match job names (in project:jobset:job
format) that should be backed up to this bucket
* compression_type: bzip2 (default), xz, or none
* prefix: String to prepend to all hydra-created s3 keys (if this is
meant to represent a directory, you should include the trailing slash,
e.g. "cache/"). Default "".
After each build with an output (i.e. successful or failed-with-output
builds), the output path and its closure are uploaded to the bucket as
.nar files, with corresponding .narinfos to enable use as a binary
cache.
This plugin requires that s3 credentials be available. It uses
Net::Amazon::S3, which as of this commit the nixpkgs version can
retrieve s3 credentials from the AWS_ACCESS_KEY_ID and
AWS_SECRET_ACCESS_KEY environment variables, or from ec2 instance
metadata when using an IAM role.
This commit also adds a hydra-s3-backup-collect-garbage program, which
uses hydra's gc roots directory to determine which paths are live, and
then deletes all files except nix-cache-info and any .nar or .narinfo
files corresponding to live paths. hydra-s3-backup-collect-garbage
respects the prefix configuration option, so it won't delete anything
outside of the hierarchy you give it, and it has the same credential
requirements as the plugin. Probably a timer unit running the garbage
collection periodically should be added to hydra-module.nix
Note that two of the added tests fail, due to a bug in the interaction
between Net::Amazon::S3 and fake-s3. Those behaviors work against real
s3 though, so I'm committing this even with the broken tests.
Signed-off-by: Shea Levy <shea@shealevy.com>
2013-09-03 14:53:56 +00:00
|
|
|
|
Added the InfluxDBNotification plugin including a NixOS test
This adds a `InfluxDBNotification` plugin which is configured as:
```
<influxdb>
url = http://127.0.0.1:8086
db = hydra
</influxdb>
```
which will write a notification for every finished job to the
configured database in InfluxDB looking like:
```
hydra_build_status,cached=false,job=job,jobset=default,project=sample,repo=default,result=success,status=success,system=x86_64-linux build_id="1",build_status=0i,closure_size=584i,duration=0i,main_build_id="1",queued=0i,size=168i 1564156212
```
2019-07-25 07:43:40 +00:00
|
|
|
tests.notifications = genAttrs' (system:
|
|
|
|
with import (nixpkgs + "/nixos/lib/testing.nix") { inherit system; };
|
|
|
|
simpleTest {
|
|
|
|
machine = { pkgs, ... }: {
|
|
|
|
imports = [ (hydraServer build.${system}) ];
|
|
|
|
services.hydra-dev.extraConfig = ''
|
|
|
|
<influxdb>
|
|
|
|
url = http://127.0.0.1:8086
|
|
|
|
db = hydra
|
|
|
|
</influxdb>
|
|
|
|
'';
|
|
|
|
services.influxdb.enable = true;
|
|
|
|
};
|
|
|
|
testScript = ''
|
|
|
|
$machine->waitForJob("hydra-init");
|
|
|
|
|
|
|
|
# Create an admin account and some other state.
|
|
|
|
$machine->succeed
|
|
|
|
( "su - hydra -c \"hydra-create-user root --email-address 'alice\@example.org' --password foobar --role admin\""
|
|
|
|
, "mkdir /run/jobset"
|
|
|
|
, "chmod 755 /run/jobset"
|
|
|
|
, "cp ${./tests/api-test.nix} /run/jobset/default.nix"
|
|
|
|
, "chmod 644 /run/jobset/default.nix"
|
|
|
|
, "chown -R hydra /run/jobset"
|
|
|
|
);
|
|
|
|
|
|
|
|
# Wait until InfluxDB can receive web requests
|
|
|
|
$machine->waitForJob("influxdb");
|
|
|
|
$machine->waitForOpenPort("8086");
|
|
|
|
|
|
|
|
# Create an InfluxDB database where hydra will write to
|
|
|
|
$machine->succeed(
|
|
|
|
"curl -XPOST 'http://127.0.0.1:8086/query' \\
|
|
|
|
--data-urlencode 'q=CREATE DATABASE hydra'");
|
|
|
|
|
|
|
|
# Wait until hydra-server can receive HTTP requests
|
|
|
|
$machine->waitForJob("hydra-server");
|
|
|
|
$machine->waitForOpenPort("3000");
|
|
|
|
|
|
|
|
# Setup the project and jobset
|
|
|
|
$machine->mustSucceed(
|
|
|
|
"su - hydra -c 'perl -I ${build.${system}.perlDeps}/lib/perl5/site_perl ${./tests/setup-notifications-jobset.pl}' >&2");
|
|
|
|
|
|
|
|
# Wait until hydra has build the job and
|
|
|
|
# the InfluxDBNotification plugin uploaded its notification to InfluxDB
|
|
|
|
$machine->waitUntilSucceeds(
|
|
|
|
"curl -s -H 'Accept: application/csv' \\
|
|
|
|
-G 'http://127.0.0.1:8086/query?db=hydra' \\
|
|
|
|
--data-urlencode 'q=SELECT * FROM hydra_build_status' | grep success");
|
|
|
|
'';
|
|
|
|
});
|
|
|
|
|
2014-09-24 13:21:01 +00:00
|
|
|
/*
|
Add a plugin for backing up builds in s3
In your hydra config, you can add an arbitrary number of <s3config>
sections, with the following options:
* name (required): Bucket name
* jobs (required): A regex to match job names (in project:jobset:job
format) that should be backed up to this bucket
* compression_type: bzip2 (default), xz, or none
* prefix: String to prepend to all hydra-created s3 keys (if this is
meant to represent a directory, you should include the trailing slash,
e.g. "cache/"). Default "".
After each build with an output (i.e. successful or failed-with-output
builds), the output path and its closure are uploaded to the bucket as
.nar files, with corresponding .narinfos to enable use as a binary
cache.
This plugin requires that s3 credentials be available. It uses
Net::Amazon::S3, which as of this commit the nixpkgs version can
retrieve s3 credentials from the AWS_ACCESS_KEY_ID and
AWS_SECRET_ACCESS_KEY environment variables, or from ec2 instance
metadata when using an IAM role.
This commit also adds a hydra-s3-backup-collect-garbage program, which
uses hydra's gc roots directory to determine which paths are live, and
then deletes all files except nix-cache-info and any .nar or .narinfo
files corresponding to live paths. hydra-s3-backup-collect-garbage
respects the prefix configuration option, so it won't delete anything
outside of the hierarchy you give it, and it has the same credential
requirements as the plugin. Probably a timer unit running the garbage
collection periodically should be added to hydra-module.nix
Note that two of the added tests fail, due to a bug in the interaction
between Net::Amazon::S3 and fake-s3. Those behaviors work against real
s3 though, so I'm committing this even with the broken tests.
Signed-off-by: Shea Levy <shea@shealevy.com>
2013-09-03 14:53:56 +00:00
|
|
|
tests.s3backup = genAttrs' (system:
|
2018-05-22 10:21:23 +00:00
|
|
|
with import (nixpkgs + "/nixos/lib/testing.nix") { inherit system; };
|
2015-06-12 16:02:54 +00:00
|
|
|
let hydra = build.${system}
|
Add a plugin for backing up builds in s3
In your hydra config, you can add an arbitrary number of <s3config>
sections, with the following options:
* name (required): Bucket name
* jobs (required): A regex to match job names (in project:jobset:job
format) that should be backed up to this bucket
* compression_type: bzip2 (default), xz, or none
* prefix: String to prepend to all hydra-created s3 keys (if this is
meant to represent a directory, you should include the trailing slash,
e.g. "cache/"). Default "".
After each build with an output (i.e. successful or failed-with-output
builds), the output path and its closure are uploaded to the bucket as
.nar files, with corresponding .narinfos to enable use as a binary
cache.
This plugin requires that s3 credentials be available. It uses
Net::Amazon::S3, which as of this commit the nixpkgs version can
retrieve s3 credentials from the AWS_ACCESS_KEY_ID and
AWS_SECRET_ACCESS_KEY environment variables, or from ec2 instance
metadata when using an IAM role.
This commit also adds a hydra-s3-backup-collect-garbage program, which
uses hydra's gc roots directory to determine which paths are live, and
then deletes all files except nix-cache-info and any .nar or .narinfo
files corresponding to live paths. hydra-s3-backup-collect-garbage
respects the prefix configuration option, so it won't delete anything
outside of the hierarchy you give it, and it has the same credential
requirements as the plugin. Probably a timer unit running the garbage
collection periodically should be added to hydra-module.nix
Note that two of the added tests fail, due to a bug in the interaction
between Net::Amazon::S3 and fake-s3. Those behaviors work against real
s3 though, so I'm committing this even with the broken tests.
Signed-off-by: Shea Levy <shea@shealevy.com>
2013-09-03 14:53:56 +00:00
|
|
|
simpleTest {
|
|
|
|
machine =
|
|
|
|
{ config, pkgs, ... }:
|
|
|
|
{ services.postgresql.enable = true;
|
2017-09-22 13:18:22 +00:00
|
|
|
services.postgresql.package = pkgs.postgresql95;
|
Add a plugin for backing up builds in s3
In your hydra config, you can add an arbitrary number of <s3config>
sections, with the following options:
* name (required): Bucket name
* jobs (required): A regex to match job names (in project:jobset:job
format) that should be backed up to this bucket
* compression_type: bzip2 (default), xz, or none
* prefix: String to prepend to all hydra-created s3 keys (if this is
meant to represent a directory, you should include the trailing slash,
e.g. "cache/"). Default "".
After each build with an output (i.e. successful or failed-with-output
builds), the output path and its closure are uploaded to the bucket as
.nar files, with corresponding .narinfos to enable use as a binary
cache.
This plugin requires that s3 credentials be available. It uses
Net::Amazon::S3, which as of this commit the nixpkgs version can
retrieve s3 credentials from the AWS_ACCESS_KEY_ID and
AWS_SECRET_ACCESS_KEY environment variables, or from ec2 instance
metadata when using an IAM role.
This commit also adds a hydra-s3-backup-collect-garbage program, which
uses hydra's gc roots directory to determine which paths are live, and
then deletes all files except nix-cache-info and any .nar or .narinfo
files corresponding to live paths. hydra-s3-backup-collect-garbage
respects the prefix configuration option, so it won't delete anything
outside of the hierarchy you give it, and it has the same credential
requirements as the plugin. Probably a timer unit running the garbage
collection periodically should be added to hydra-module.nix
Note that two of the added tests fail, due to a bug in the interaction
between Net::Amazon::S3 and fake-s3. Those behaviors work against real
s3 though, so I'm committing this even with the broken tests.
Signed-off-by: Shea Levy <shea@shealevy.com>
2013-09-03 14:53:56 +00:00
|
|
|
environment.systemPackages = [ hydra pkgs.rubyLibs.fakes3 ];
|
|
|
|
virtualisation.memorySize = 2047;
|
|
|
|
boot.kernelPackages = pkgs.linuxPackages_3_10;
|
|
|
|
virtualisation.writableStore = true;
|
|
|
|
networking.extraHosts = ''
|
|
|
|
127.0.0.1 hydra.s3.amazonaws.com
|
|
|
|
'';
|
|
|
|
};
|
|
|
|
|
|
|
|
testScript =
|
|
|
|
''
|
|
|
|
$machine->waitForJob("postgresql");
|
|
|
|
|
|
|
|
# Initialise the database and the state.
|
2013-11-06 17:10:52 +00:00
|
|
|
$machine->succeed
|
Add a plugin for backing up builds in s3
In your hydra config, you can add an arbitrary number of <s3config>
sections, with the following options:
* name (required): Bucket name
* jobs (required): A regex to match job names (in project:jobset:job
format) that should be backed up to this bucket
* compression_type: bzip2 (default), xz, or none
* prefix: String to prepend to all hydra-created s3 keys (if this is
meant to represent a directory, you should include the trailing slash,
e.g. "cache/"). Default "".
After each build with an output (i.e. successful or failed-with-output
builds), the output path and its closure are uploaded to the bucket as
.nar files, with corresponding .narinfos to enable use as a binary
cache.
This plugin requires that s3 credentials be available. It uses
Net::Amazon::S3, which as of this commit the nixpkgs version can
retrieve s3 credentials from the AWS_ACCESS_KEY_ID and
AWS_SECRET_ACCESS_KEY environment variables, or from ec2 instance
metadata when using an IAM role.
This commit also adds a hydra-s3-backup-collect-garbage program, which
uses hydra's gc roots directory to determine which paths are live, and
then deletes all files except nix-cache-info and any .nar or .narinfo
files corresponding to live paths. hydra-s3-backup-collect-garbage
respects the prefix configuration option, so it won't delete anything
outside of the hierarchy you give it, and it has the same credential
requirements as the plugin. Probably a timer unit running the garbage
collection periodically should be added to hydra-module.nix
Note that two of the added tests fail, due to a bug in the interaction
between Net::Amazon::S3 and fake-s3. Those behaviors work against real
s3 though, so I'm committing this even with the broken tests.
Signed-off-by: Shea Levy <shea@shealevy.com>
2013-09-03 14:53:56 +00:00
|
|
|
( "createdb -O root hydra"
|
|
|
|
, "psql hydra -f ${hydra}/libexec/hydra/sql/hydra-postgresql.sql"
|
|
|
|
, "mkdir /var/lib/hydra"
|
|
|
|
, "mkdir /tmp/jobs"
|
|
|
|
, "cp ${./tests/s3-backup-test.pl} /tmp/s3-backup-test.pl"
|
|
|
|
, "cp ${./tests/api-test.nix} /tmp/jobs/default.nix"
|
|
|
|
);
|
|
|
|
|
|
|
|
# start fakes3
|
2013-11-06 17:10:52 +00:00
|
|
|
$machine->succeed("fakes3 --root /tmp/s3 --port 80 &>/dev/null &");
|
Add a plugin for backing up builds in s3
In your hydra config, you can add an arbitrary number of <s3config>
sections, with the following options:
* name (required): Bucket name
* jobs (required): A regex to match job names (in project:jobset:job
format) that should be backed up to this bucket
* compression_type: bzip2 (default), xz, or none
* prefix: String to prepend to all hydra-created s3 keys (if this is
meant to represent a directory, you should include the trailing slash,
e.g. "cache/"). Default "".
After each build with an output (i.e. successful or failed-with-output
builds), the output path and its closure are uploaded to the bucket as
.nar files, with corresponding .narinfos to enable use as a binary
cache.
This plugin requires that s3 credentials be available. It uses
Net::Amazon::S3, which as of this commit the nixpkgs version can
retrieve s3 credentials from the AWS_ACCESS_KEY_ID and
AWS_SECRET_ACCESS_KEY environment variables, or from ec2 instance
metadata when using an IAM role.
This commit also adds a hydra-s3-backup-collect-garbage program, which
uses hydra's gc roots directory to determine which paths are live, and
then deletes all files except nix-cache-info and any .nar or .narinfo
files corresponding to live paths. hydra-s3-backup-collect-garbage
respects the prefix configuration option, so it won't delete anything
outside of the hierarchy you give it, and it has the same credential
requirements as the plugin. Probably a timer unit running the garbage
collection periodically should be added to hydra-module.nix
Note that two of the added tests fail, due to a bug in the interaction
between Net::Amazon::S3 and fake-s3. Those behaviors work against real
s3 though, so I'm committing this even with the broken tests.
Signed-off-by: Shea Levy <shea@shealevy.com>
2013-09-03 14:53:56 +00:00
|
|
|
$machine->waitForOpenPort("80");
|
|
|
|
|
2013-11-06 17:10:52 +00:00
|
|
|
$machine->succeed("cd /tmp && LOGNAME=root AWS_ACCESS_KEY_ID=foo AWS_SECRET_ACCESS_KEY=bar HYDRA_DBI='dbi:Pg:dbname=hydra;user=root;' HYDRA_CONFIG=${./tests/s3-backup-test.config} perl -I ${hydra}/libexec/hydra/lib -I ${hydra.perlDeps}/lib/perl5/site_perl ./s3-backup-test.pl >&2");
|
Add a plugin for backing up builds in s3
In your hydra config, you can add an arbitrary number of <s3config>
sections, with the following options:
* name (required): Bucket name
* jobs (required): A regex to match job names (in project:jobset:job
format) that should be backed up to this bucket
* compression_type: bzip2 (default), xz, or none
* prefix: String to prepend to all hydra-created s3 keys (if this is
meant to represent a directory, you should include the trailing slash,
e.g. "cache/"). Default "".
After each build with an output (i.e. successful or failed-with-output
builds), the output path and its closure are uploaded to the bucket as
.nar files, with corresponding .narinfos to enable use as a binary
cache.
This plugin requires that s3 credentials be available. It uses
Net::Amazon::S3, which as of this commit the nixpkgs version can
retrieve s3 credentials from the AWS_ACCESS_KEY_ID and
AWS_SECRET_ACCESS_KEY environment variables, or from ec2 instance
metadata when using an IAM role.
This commit also adds a hydra-s3-backup-collect-garbage program, which
uses hydra's gc roots directory to determine which paths are live, and
then deletes all files except nix-cache-info and any .nar or .narinfo
files corresponding to live paths. hydra-s3-backup-collect-garbage
respects the prefix configuration option, so it won't delete anything
outside of the hierarchy you give it, and it has the same credential
requirements as the plugin. Probably a timer unit running the garbage
collection periodically should be added to hydra-module.nix
Note that two of the added tests fail, due to a bug in the interaction
between Net::Amazon::S3 and fake-s3. Those behaviors work against real
s3 though, so I'm committing this even with the broken tests.
Signed-off-by: Shea Levy <shea@shealevy.com>
2013-09-03 14:53:56 +00:00
|
|
|
'';
|
|
|
|
});
|
2014-09-24 13:21:01 +00:00
|
|
|
*/
|
2010-03-05 17:52:43 +00:00
|
|
|
}
|