Merge branch 'master' into libpqxx_undeprecate

This commit is contained in:
Kevin Quick 2020-04-01 11:54:41 -07:00 committed by GitHub
commit a055796ef5
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
88 changed files with 1667 additions and 1309 deletions

1
.gitignore vendored
View file

@ -15,7 +15,6 @@ Makefile.in
/aclocal.m4
/missing
/install-sh
/src/script/hydra-eval-guile-jobs
/src/sql/hydra-postgresql.sql
/src/sql/hydra-sqlite.sql
/src/sql/tmp.sqlite

View file

@ -1,11 +1,11 @@
# Hydra
[Hydra](https://nixos.org/hydra/) is a continuous integration system based
on the Nix package manager. For more information, see the
[manual](http://nixos.org/hydra/manual/).
Hydra is a continuous integration system based on the Nix package
manager. For more information, see the
[manual](https://hydra.nixos.org/job/hydra/master/manual/latest/download-by-type/doc/manual).
For development see
[hacking instructions](http://nixos.org/hydra/manual/#chap-hacking).
[hacking instructions](https://hydra.nixos.org/job/hydra/master/manual/latest/download-by-type/doc/manual#chap-hacking).
---

View file

@ -1,5 +1,4 @@
AC_INIT([Hydra], [m4_esyscmd([echo -n $(cat ./version)$VERSION_SUFFIX])],
[nix-dev@cs.uu.nl], [hydra], [http://nixos.org/hydra/])
AC_INIT([Hydra], [m4_esyscmd([echo -n $(cat ./version)$VERSION_SUFFIX])])
AC_CONFIG_AUX_DIR(config)
AM_INIT_AUTOMAKE([foreign serial-tests])
@ -53,15 +52,6 @@ fi
PKG_CHECK_MODULES([NIX], [nix-main nix-expr nix-store])
PKG_CHECK_MODULES([GUILE], [guile-2.0], [HAVE_GUILE=yes], [HAVE_GUILE=no])
if test "x$HAVE_GUILE" = xyes; then
AC_PATH_PROG([GUILE], [guile])
else
GUILE="guile"
fi
AC_SUBST([GUILE])
testPath="$(dirname $(type -p expr))"
AC_SUBST(testPath)
@ -80,13 +70,11 @@ AC_CONFIG_FILES([
src/lib/Makefile
src/root/Makefile
src/script/Makefile
src/script/hydra-eval-guile-jobs
tests/Makefile
tests/jobs/config.nix
])
AC_CONFIG_COMMANDS([executable-scripts],
[chmod +x src/script/hydra-eval-guile-jobs])
AC_CONFIG_COMMANDS([executable-scripts], [])
AC_CONFIG_HEADER([hydra-config.h])

View file

@ -3,14 +3,13 @@ DOCBOOK_FILES = installation.xml introduction.xml manual.xml projects.xml hackin
EXTRA_DIST = $(DOCBOOK_FILES)
xsltproc_opts = \
--param html.stylesheet \'style.css\' \
--param callout.graphics.extension \'.gif\' \
--param section.autolabel 1 \
--param section.label.includes.component.label 1
# Include the manual in the tarball.
dist_html_DATA = manual.html style.css
dist_html_DATA = manual.html
# Embed Docbook's callout images in the distribution.
EXTRA_DIST += images

View file

@ -1,256 +0,0 @@
/* Copied from http://bakefile.sourceforge.net/, which appears
licensed under the GNU GPL. */
/***************************************************************************
Basic headers and text:
***************************************************************************/
body
{
font-family: "Nimbus Sans L", sans-serif;
background: white;
margin: 2em 1em 2em 1em;
}
h1, h2, h3, h4
{
color: #005aa0;
}
h1 /* title */
{
font-size: 200%;
}
h2 /* chapters, appendices, subtitle */
{
font-size: 180%;
}
/* Extra space between chapters, appendices. */
div.chapter > div.titlepage h2, div.appendix > div.titlepage h2
{
margin-top: 1.5em;
}
div.section > div.titlepage h2 /* sections */
{
font-size: 150%;
margin-top: 1.5em;
}
h3 /* subsections */
{
font-size: 125%;
}
div.simplesect h2
{
font-size: 110%;
}
div.appendix h3
{
font-size: 150%;
margin-top: 1.5em;
}
div.refnamediv h2, div.refsynopsisdiv h2, div.refsection h2 /* refentry parts */
{
margin-top: 1.4em;
font-size: 125%;
}
div.refsection h3
{
font-size: 110%;
}
/***************************************************************************
Examples:
***************************************************************************/
div.example
{
border: 1px solid #b0b0b0;
padding: 6px 6px;
margin-left: 1.5em;
margin-right: 1.5em;
background: #f4f4f8;
border-radius: 0.4em;
box-shadow: 0.4em 0.4em 0.5em #e0e0e0;
}
div.example p.title
{
margin-top: 0em;
}
div.example pre
{
box-shadow: none;
}
/***************************************************************************
Screen dumps:
***************************************************************************/
pre.screen, pre.programlisting
{
border: 1px solid #b0b0b0;
padding: 3px 3px;
margin-left: 1.5em;
margin-right: 1.5em;
color: #600000;
background: #f4f4f8;
font-family: monospace;
border-radius: 0.4em;
box-shadow: 0.4em 0.4em 0.5em #e0e0e0;
}
div.example pre.programlisting
{
border: 0px;
padding: 0 0;
margin: 0 0 0 0;
}
/***************************************************************************
Notes, warnings etc:
***************************************************************************/
.note, .warning
{
border: 1px solid #b0b0b0;
padding: 3px 3px;
margin-left: 1.5em;
margin-right: 1.5em;
margin-bottom: 1em;
padding: 0.3em 0.3em 0.3em 0.3em;
background: #fffff5;
border-radius: 0.4em;
box-shadow: 0.4em 0.4em 0.5em #e0e0e0;
}
div.note, div.warning
{
font-style: italic;
}
div.note h3, div.warning h3
{
color: red;
font-size: 100%;
padding-right: 0.5em;
display: inline;
}
div.note p, div.warning p
{
margin-bottom: 0em;
}
div.note h3 + p, div.warning h3 + p
{
display: inline;
}
div.note h3
{
color: blue;
font-size: 100%;
}
div.navfooter *
{
font-size: 90%;
}
/***************************************************************************
Links colors and highlighting:
***************************************************************************/
a { text-decoration: none; }
a:hover { text-decoration: underline; }
a:link { color: #0048b3; }
a:visited { color: #002a6a; }
/***************************************************************************
Table of contents:
***************************************************************************/
div.toc
{
font-size: 90%;
}
div.toc dl
{
margin-top: 0em;
margin-bottom: 0em;
}
/***************************************************************************
Special elements:
***************************************************************************/
tt, code
{
color: #400000;
}
.term
{
font-weight: bold;
}
div.variablelist dd p, div.glosslist dd p
{
margin-top: 0em;
}
div.variablelist dd, div.glosslist dd
{
margin-left: 1.5em;
}
div.glosslist dt
{
font-style: italic;
}
.varname
{
color: #400000;
}
span.command strong
{
font-weight: normal;
color: #400000;
}
div.calloutlist table
{
box-shadow: none;
}
table
{
border-collapse: collapse;
box-shadow: 0.4em 0.4em 0.5em #e0e0e0;
}
div.affiliation
{
font-style: italic;
}

View file

@ -1,5 +1,5 @@
{ hydraSrc ? builtins.fetchGit ./.
, nixpkgs ? builtins.fetchGit { url = https://github.com/NixOS/nixpkgs-channels.git; ref = "nixos-19.09-small"; }
, nixpkgs ? builtins.fetchTarball https://github.com/NixOS/nixpkgs/archive/release-19.09.tar.gz
, officialRelease ? false
, shell ? false
}:
@ -129,11 +129,10 @@ rec {
buildInputs =
[ makeWrapper autoconf automake libtool unzip nukeReferences pkgconfig sqlite libpqxx
gitAndTools.topGit mercurial darcs subversion bazaar openssl bzip2 libxslt
guile # optional, for Guile + Guix support
perlDeps perl nix
postgresql95 # for running the tests
boost
nlohmann_json
(nlohmann_json.override { multipleHeaders = true; })
];
hydraPath = lib.makeBinPath (
@ -155,9 +154,7 @@ rec {
preConfigure = "autoreconf -vfi";
NIX_LDFLAGS = [
"-lpthread"
];
NIX_LDFLAGS = [ "-lpthread" ];
enableParallelBuilding = true;

View file

@ -1,5 +1,5 @@
bin_PROGRAMS = hydra-eval-jobs
hydra_eval_jobs_SOURCES = hydra-eval-jobs.cc
hydra_eval_jobs_LDADD = $(NIX_LIBS)
hydra_eval_jobs_LDADD = $(NIX_LIBS) -lnixrust
hydra_eval_jobs_CXXFLAGS = $(NIX_CFLAGS) -I ../libhydra

View file

@ -1,35 +1,63 @@
#include <map>
#include <iostream>
#define GC_LINUX_THREADS 1
#include <gc/gc_allocator.h>
#include "shared.hh"
#include "store-api.hh"
#include "eval.hh"
#include "eval-inline.hh"
#include "util.hh"
#include "json.hh"
#include "get-drvs.hh"
#include "globals.hh"
#include "common-eval-args.hh"
#include "attr-path.hh"
#include "derivations.hh"
#include "hydra-config.hh"
#include <sys/types.h>
#include <sys/wait.h>
#include <sys/resource.h>
#include <nlohmann/json.hpp>
using namespace nix;
static Path gcRootsDir;
static size_t maxMemorySize;
struct MyArgs : MixEvalArgs, MixCommonArgs
{
Path releaseExpr;
bool dryRun = false;
static void findJobs(EvalState & state, JSONObject & top,
Bindings & autoArgs, Value & v, const string & attrPath);
MyArgs() : MixCommonArgs("hydra-eval-jobs")
{
mkFlag()
.longName("help")
.description("show usage information")
.handler([&]() {
printHelp(programName, std::cout);
throw Exit();
});
mkFlag()
.longName("gc-roots-dir")
.description("garbage collector roots directory")
.labels({"path"})
.dest(&gcRootsDir);
static string queryMetaStrings(EvalState & state, DrvInfo & drv, const string & name, const string & subAttribute)
mkFlag()
.longName("dry-run")
.description("don't create store derivations")
.set(&dryRun, true);
expectArg("expr", &releaseExpr);
}
};
static MyArgs myArgs;
static std::string queryMetaStrings(EvalState & state, DrvInfo & drv, const string & name, const string & subAttribute)
{
Strings res;
std::function<void(Value & v)> rec;
@ -54,92 +82,84 @@ static string queryMetaStrings(EvalState & state, DrvInfo & drv, const string &
return concatStringsSep(", ", res);
}
static std::string lastAttrPath;
static bool comma = false;
static size_t maxHeapSize;
struct BailOut { };
bool lte(const std::string & s1, const std::string & s2)
static void worker(
EvalState & state,
Bindings & autoArgs,
AutoCloseFD & to,
AutoCloseFD & from)
{
size_t p1 = 0, p2 = 0;
Value vTop;
state.evalFile(lookupFileArg(state, myArgs.releaseExpr), vTop);
auto vRoot = state.allocValue();
state.autoCallFunction(autoArgs, vTop, *vRoot);
while (true) {
if (p1 == s1.size()) return p2 == s2.size();
if (p2 == s2.size()) return true;
/* Wait for the master to send us a job name. */
writeLine(to.get(), "next");
auto d1 = s1.find('.', p1);
auto d2 = s2.find('.', p2);
auto s = readLine(from.get());
if (s == "exit") break;
if (!hasPrefix(s, "do ")) abort();
std::string attrPath(s, 3);
auto c = s1.compare(p1, d1 - p1, s2, p2, d2 - p2);
debug("worker process %d at '%s'", getpid(), attrPath);
if (c < 0) return true;
if (c > 0) return false;
/* Evaluate it and send info back to the master. */
nlohmann::json reply;
p1 = d1 == std::string::npos ? s1.size() : d1 + 1;
p2 = d2 == std::string::npos ? s2.size() : d2 + 1;
}
}
try {
auto vTmp = findAlongAttrPath(state, attrPath, autoArgs, *vRoot);
auto v = state.allocValue();
state.autoCallFunction(autoArgs, *vTmp, *v);
static void findJobsWrapped(EvalState & state, JSONObject & top,
Bindings & autoArgs, Value & vIn, const string & attrPath)
{
if (lastAttrPath != "" && lte(attrPath, lastAttrPath)) return;
debug(format("at path `%1%'") % attrPath);
checkInterrupt();
Value v;
state.autoCallFunction(autoArgs, vIn, v);
if (v.type == tAttrs) {
auto drv = getDerivation(state, v, false);
if (drv) {
Path drvPath;
if (auto drv = getDerivation(state, *v, false)) {
DrvInfo::Outputs outputs = drv->queryOutputs();
if (drv->querySystem() == "unknown")
throw EvalError("derivation must have a system attribute");
throw EvalError("derivation must have a 'system' attribute");
if (comma) { std::cout << ","; comma = false; }
auto drvPath = drv->queryDrvPath();
{
auto res = top.object(attrPath);
res.attr("nixName", drv->queryName());
res.attr("system", drv->querySystem());
res.attr("drvPath", drvPath = drv->queryDrvPath());
res.attr("description", drv->queryMetaString("description"));
res.attr("license", queryMetaStrings(state, *drv, "license", "shortName"));
res.attr("homepage", drv->queryMetaString("homepage"));
res.attr("maintainers", queryMetaStrings(state, *drv, "maintainers", "email"));
res.attr("schedulingPriority", drv->queryMetaInt("schedulingPriority", 100));
res.attr("timeout", drv->queryMetaInt("timeout", 36000));
res.attr("maxSilent", drv->queryMetaInt("maxSilent", 7200));
res.attr("isChannel", drv->queryMetaBool("isHydraChannel", false));
nlohmann::json job;
job["nixName"] = drv->queryName();
job["system"] =drv->querySystem();
job["drvPath"] = drvPath;
job["description"] = drv->queryMetaString("description");
job["license"] = queryMetaStrings(state, *drv, "license", "shortName");
job["homepage"] = drv->queryMetaString("homepage");
job["maintainers"] = queryMetaStrings(state, *drv, "maintainers", "email");
job["schedulingPriority"] = drv->queryMetaInt("schedulingPriority", 100);
job["timeout"] = drv->queryMetaInt("timeout", 36000);
job["maxSilent"] = drv->queryMetaInt("maxSilent", 7200);
job["isChannel"] = drv->queryMetaBool("isHydraChannel", false);
/* If this is an aggregate, then get its constituents. */
Bindings::iterator a = v.attrs->find(state.symbols.create("_hydraAggregate"));
if (a != v.attrs->end() && state.forceBool(*a->value, *a->pos)) {
Bindings::iterator a = v.attrs->find(state.symbols.create("constituents"));
if (a == v.attrs->end())
auto a = v->attrs->get(state.symbols.create("_hydraAggregate"));
if (a && state.forceBool(*(*a)->value, *(*a)->pos)) {
auto a = v->attrs->get(state.symbols.create("constituents"));
if (!a)
throw EvalError("derivation must have a constituents attribute");
PathSet context;
state.coerceToString(*a->pos, *a->value, context, true, false);
PathSet drvs;
state.coerceToString(*(*a)->pos, *(*a)->value, context, true, false);
for (auto & i : context)
if (i.at(0) == '!') {
size_t index = i.find("!", 1);
drvs.insert(string(i, index + 1));
job["constituents"].push_back(string(i, index + 1));
}
state.forceList(*(*a)->value, *(*a)->pos);
for (unsigned int n = 0; n < (*a)->value->listSize(); ++n) {
auto v = (*a)->value->listElems()[n];
state.forceValue(*v);
if (v->type == tString)
job["namedConstituents"].push_back(state.forceStringNoCtx(*v));
}
res.attr("constituents", concatStringsSep(" ", drvs));
}
/* Register the derivation as a GC root. !!! This
@ -147,76 +167,61 @@ static void findJobsWrapped(EvalState & state, JSONObject & top,
done. */
auto localStore = state.store.dynamic_pointer_cast<LocalFSStore>();
if (gcRootsDir != "" && localStore) {
Path root = gcRootsDir + "/" + baseNameOf(drvPath);
if (!pathExists(root)) localStore->addPermRoot(drvPath, root, false);
Path root = gcRootsDir + "/" + std::string(baseNameOf(drvPath));
if (!pathExists(root))
localStore->addPermRoot(localStore->parseStorePath(drvPath), root, false);
}
auto res2 = res.object("outputs");
nlohmann::json out;
for (auto & j : outputs)
res2.attr(j.first, j.second);
out[j.first] = j.second;
job["outputs"] = std::move(out);
reply["job"] = std::move(job);
}
GC_prof_stats_s gc;
GC_get_prof_stats(&gc, sizeof(gc));
if (gc.heapsize_full > maxHeapSize) {
printInfo("restarting hydra-eval-jobs after job '%s' because heap size is at %d bytes", attrPath, gc.heapsize_full);
lastAttrPath = attrPath;
throw BailOut();
}
}
else {
if (!state.isDerivation(v)) {
for (auto & i : v.attrs->lexicographicOrder()) {
else if (v->type == tAttrs) {
auto attrs = nlohmann::json::array();
StringSet ss;
for (auto & i : v->attrs->lexicographicOrder()) {
std::string name(i->name);
/* Skip jobs with dots in the name. */
if (name.find('.') != std::string::npos) {
if (name.find('.') != std::string::npos || name.find(' ') != std::string::npos) {
printError("skipping job with illegal name '%s'", name);
continue;
}
findJobs(state, top, autoArgs, *i->value,
(attrPath.empty() ? "" : attrPath + ".") + name);
}
}
attrs.push_back(name);
}
reply["attrs"] = std::move(attrs);
}
else if (v.type == tNull) {
// allow null values, meaning 'do nothing'
}
else if (v->type == tNull)
;
else
throw TypeError(format("unsupported value: %1%") % v);
}
else throw TypeError("attribute '%s' is %s, which is not supported", attrPath, showType(*v));
static void findJobs(EvalState & state, JSONObject & top,
Bindings & autoArgs, Value & v, const string & attrPath)
{
try {
findJobsWrapped(state, top, autoArgs, v, attrPath);
} catch (EvalError & e) {
if (comma) { std::cout << ","; comma = false; }
auto res = top.object(attrPath);
res.attr("error", filterANSIEscapes(e.msg(), true));
}
// Transmits the error we got from the previous evaluation
// in the JSON output.
reply["error"] = filterANSIEscapes(e.msg(), true);
// Don't forget to print it into the STDERR log, this is
// what's shown in the Hydra UI.
printError("error: %s", reply["error"]);
}
writeLine(to.get(), reply.dump());
/* If our RSS exceeds the maximum, exit. The master will
start a new process. */
struct rusage r;
getrusage(RUSAGE_SELF, &r);
if ((size_t) r.ru_maxrss > maxMemorySize * 1024) break;
}
writeLine(to.get(), "restart");
}
int main(int argc, char * * argv)
{
assert(lte("abc", "def"));
assert(lte("abc", "def.foo"));
assert(!lte("def", "abc"));
assert(lte("nixpkgs.hello", "nixpkgs"));
assert(lte("nixpkgs.hello", "nixpkgs.hellooo"));
assert(lte("gitAndTools.git-annex.x86_64-darwin", "gitAndTools.git-annex.x86_64-linux"));
assert(lte("gitAndTools.git-annex.x86_64-linux", "gitAndTools.git-annex-remote-b2.aarch64-linux"));
/* Prevent undeclared dependencies in the evaluation via
$NIX_PATH. */
unsetenv("NIX_PATH");
@ -225,69 +230,14 @@ int main(int argc, char * * argv)
auto config = std::make_unique<::Config>();
auto initialHeapSize = config->getStrOption("evaluator_initial_heap_size", "");
if (initialHeapSize != "")
setenv("GC_INITIAL_HEAP_SIZE", initialHeapSize.c_str(), 1);
maxHeapSize = config->getIntOption("evaluator_max_heap_size", 1UL << 30);
auto nrWorkers = config->getIntOption("evaluator_workers", 1);
maxMemorySize = config->getIntOption("evaluator_max_memory_size", 4096);
initNix();
initGC();
/* Read the current heap size, which is the initial heap size. */
GC_prof_stats_s gc;
GC_get_prof_stats(&gc, sizeof(gc));
auto initialHeapSizeInt = gc.heapsize_full;
/* Then make sure the maximum heap size will be bigger than the initial heap size. */
if (initialHeapSizeInt > maxHeapSize) {
printInfo("warning: evaluator_initial_heap_size (%d) bigger than evaluator_max_heap_size (%d).", initialHeapSizeInt, maxHeapSize);
maxHeapSize = initialHeapSizeInt * 1.1;
printInfo(" evaluator_max_heap_size now set to %d.", maxHeapSize);
}
Path releaseExpr;
struct MyArgs : LegacyArgs, MixEvalArgs
{
using LegacyArgs::LegacyArgs;
};
MyArgs myArgs(baseNameOf(argv[0]), [&](Strings::iterator & arg, const Strings::iterator & end) {
if (*arg == "--gc-roots-dir")
gcRootsDir = getArg(*arg, arg, end);
else if (*arg == "--dry-run")
settings.readOnlyMode = true;
else if (*arg != "" && arg->at(0) == '-')
return false;
else
releaseExpr = *arg;
return true;
});
myArgs.parseCmdline(argvToStrings(argc, argv));
JSONObject json(std::cout, true);
std::cout.flush();
do {
Pipe pipe;
pipe.create();
ProcessOptions options;
options.allowVfork = false;
GC_atfork_prepare();
auto pid = startProcess([&]() {
pipe.readSide = -1;
GC_atfork_child();
GC_start_mark_threads();
if (lastAttrPath != "") debug("resuming from '%s'", lastAttrPath);
/* FIXME: The build hook in conjunction with import-from-derivation is causing "unexpected EOF" during eval */
settings.builders = "";
@ -295,46 +245,199 @@ int main(int argc, char * * argv)
to the environment. */
evalSettings.restrictEval = true;
if (releaseExpr == "") throw UsageError("no expression specified");
if (myArgs.dryRun) settings.readOnlyMode = true;
if (myArgs.releaseExpr == "") throw UsageError("no expression specified");
if (gcRootsDir == "") printMsg(lvlError, "warning: `--gc-roots-dir' not specified");
EvalState state(myArgs.searchPath, openStore());
struct State
{
std::set<std::string> todo{""};
std::set<std::string> active;
nlohmann::json jobs;
std::exception_ptr exc;
};
Bindings & autoArgs = *myArgs.getAutoArgs(state);
std::condition_variable wakeup;
Value v;
state.evalFile(lookupFileArg(state, releaseExpr), v);
comma = lastAttrPath != "";
Sync<State> state_;
/* Start a handler thread per worker process. */
auto handler = [&]()
{
try {
findJobs(state, json, autoArgs, v, "");
lastAttrPath = "";
} catch (BailOut &) { }
pid_t pid = -1;
AutoCloseFD from, to;
writeFull(pipe.writeSide.get(), lastAttrPath);
while (true) {
exit(0);
}, options);
/* Start a new worker process if necessary. */
if (pid == -1) {
Pipe toPipe, fromPipe;
toPipe.create();
fromPipe.create();
pid = startProcess(
[&,
to{std::make_shared<AutoCloseFD>(std::move(fromPipe.writeSide))},
from{std::make_shared<AutoCloseFD>(std::move(toPipe.readSide))}
]()
{
try {
EvalState state(myArgs.searchPath, openStore());
Bindings & autoArgs = *myArgs.getAutoArgs(state);
worker(state, autoArgs, *to, *from);
} catch (std::exception & e) {
nlohmann::json err;
err["error"] = e.what();
writeLine(to->get(), err.dump());
// Don't forget to print it into the STDERR log, this is
// what's shown in the Hydra UI.
printError("error: %s", err["error"]);
}
},
ProcessOptions { .allowVfork = false });
from = std::move(fromPipe.readSide);
to = std::move(toPipe.writeSide);
debug("created worker process %d", pid);
}
GC_atfork_parent();
/* Check whether the existing worker process is still there. */
auto s = readLine(from.get());
if (s == "restart") {
pid = -1;
continue;
} else if (s != "next") {
auto json = nlohmann::json::parse(s);
throw Error("worker error: %s", (std::string) json["error"]);
}
pipe.writeSide = -1;
/* Wait for a job name to become available. */
std::string attrPath;
int status;
while (true) {
checkInterrupt();
if (waitpid(pid, &status, 0) == pid) break;
if (errno != EINTR) continue;
auto state(state_.lock());
if ((state->todo.empty() && state->active.empty()) || state->exc) {
writeLine(to.get(), "exit");
return;
}
if (!state->todo.empty()) {
attrPath = *state->todo.begin();
state->todo.erase(state->todo.begin());
state->active.insert(attrPath);
break;
} else
state.wait(wakeup);
}
if (status != 0)
throw Exit(WIFEXITED(status) ? WEXITSTATUS(status) : 99);
/* Tell the worker to evaluate it. */
writeLine(to.get(), "do " + attrPath);
maxHeapSize += 64 * 1024 * 1024;
/* Wait for the response. */
auto response = nlohmann::json::parse(readLine(from.get()));
lastAttrPath = drainFD(pipe.readSide.get());
} while (lastAttrPath != "");
/* Handle the response. */
StringSet newAttrs;
if (response.find("job") != response.end()) {
auto state(state_.lock());
state->jobs[attrPath] = response["job"];
}
if (response.find("attrs") != response.end()) {
for (auto & i : response["attrs"]) {
auto s = (attrPath.empty() ? "" : attrPath + ".") + (std::string) i;
newAttrs.insert(s);
}
}
if (response.find("error") != response.end()) {
auto state(state_.lock());
state->jobs[attrPath]["error"] = response["error"];
}
/* Add newly discovered job names to the queue. */
{
auto state(state_.lock());
state->active.erase(attrPath);
for (auto & s : newAttrs)
state->todo.insert(s);
wakeup.notify_all();
}
}
} catch (...) {
auto state(state_.lock());
state->exc = std::current_exception();
wakeup.notify_all();
}
};
std::vector<std::thread> threads;
for (size_t i = 0; i < nrWorkers; i++)
threads.emplace_back(std::thread(handler));
for (auto & thread : threads)
thread.join();
auto state(state_.lock());
if (state->exc)
std::rethrow_exception(state->exc);
/* For aggregate jobs that have named consistuents
(i.e. constituents that are a job name rather than a
derivation), look up the referenced job and add it to the
dependencies of the aggregate derivation. */
auto store = openStore();
for (auto i = state->jobs.begin(); i != state->jobs.end(); ++i) {
auto jobName = i.key();
auto & job = i.value();
auto named = job.find("namedConstituents");
if (named == job.end()) continue;
if (myArgs.dryRun) {
for (std::string jobName2 : *named) {
auto job2 = state->jobs.find(jobName2);
if (job2 == state->jobs.end())
throw Error("aggregate job '%s' references non-existent job '%s'", jobName, jobName2);
std::string drvPath2 = (*job2)["drvPath"];
job["constituents"].push_back(drvPath2);
}
} else {
std::string drvPath = job["drvPath"];
auto drv = readDerivation(*store, drvPath);
for (std::string jobName2 : *named) {
auto job2 = state->jobs.find(jobName2);
if (job2 == state->jobs.end())
throw Error("aggregate job '%s' references non-existent job '%s'", jobName, jobName2);
std::string drvPath2 = (*job2)["drvPath"];
auto drv2 = readDerivation(*store, drvPath2);
job["constituents"].push_back(drvPath2);
drv.inputDrvs[store->parseStorePath(drvPath2)] = {drv2.outputs.begin()->first};
}
std::string drvName(store->parseStorePath(drvPath).name());
assert(hasSuffix(drvName, drvExtension));
drvName.resize(drvName.size() - drvExtension.size());
auto h = hashDerivationModulo(*store, drv, true);
auto outPath = store->makeOutputPath("out", h, drvName);
drv.env["out"] = store->printStorePath(outPath);
drv.outputs.insert_or_assign("out", DerivationOutput(outPath.clone(), "", ""));
auto newDrvPath = store->printStorePath(writeDerivation(store, drv, drvName));
debug("rewrote aggregate derivation %s -> %s", drvPath, newDrvPath);
job["drvPath"] = newDrvPath;
job["outputs"]["out"] = store->printStorePath(outPath);
}
job.erase("namedConstituents");
}
std::cout << state->jobs.dump(2) << "\n";
});
}

View file

@ -2,4 +2,4 @@ bin_PROGRAMS = hydra-evaluator
hydra_evaluator_SOURCES = hydra-evaluator.cc
hydra_evaluator_LDADD = $(NIX_LIBS) -lpqxx
hydra_evaluator_CXXFLAGS = $(NIX_CFLAGS) -Wall -I ../libhydra
hydra_evaluator_CXXFLAGS = $(NIX_CFLAGS) -Wall -I ../libhydra -Wno-deprecated-declarations

View file

@ -15,6 +15,13 @@ using namespace nix;
typedef std::pair<std::string, std::string> JobsetName;
enum class EvaluationStyle
{
SCHEDULE = 1,
ONESHOT = 2,
ONE_AT_A_TIME = 3,
};
struct Evaluator
{
std::unique_ptr<Config> config;
@ -24,6 +31,7 @@ struct Evaluator
struct Jobset
{
JobsetName name;
std::optional<EvaluationStyle> evaluation_style;
time_t lastCheckedTime, triggerTime;
int checkInterval;
Pid pid;
@ -60,9 +68,10 @@ struct Evaluator
pqxx::work txn(*conn);
auto res = txn.exec
("select project, j.name, lastCheckedTime, triggerTime, checkInterval from Jobsets j join Projects p on j.project = p.name "
("select project, j.name, lastCheckedTime, triggerTime, checkInterval, j.enabled as jobset_enabled from Jobsets j join Projects p on j.project = p.name "
"where j.enabled != 0 and p.enabled != 0");
auto state(state_.lock());
std::set<JobsetName> seen;
@ -78,6 +87,17 @@ struct Evaluator
jobset.lastCheckedTime = row["lastCheckedTime"].as<time_t>(0);
jobset.triggerTime = row["triggerTime"].as<time_t>(notTriggered);
jobset.checkInterval = row["checkInterval"].as<time_t>();
switch (row["jobset_enabled"].as<int>(0)) {
case 1:
jobset.evaluation_style = EvaluationStyle::SCHEDULE;
break;
case 2:
jobset.evaluation_style = EvaluationStyle::ONESHOT;
break;
case 3:
jobset.evaluation_style = EvaluationStyle::ONE_AT_A_TIME;
break;
}
seen.insert(name);
}
@ -128,19 +148,100 @@ struct Evaluator
childStarted.notify_one();
}
bool shouldEvaluate(Jobset & jobset)
{
if (jobset.pid != -1) {
// Already running.
debug("shouldEvaluate %s:%s? no: already running",
jobset.name.first, jobset.name.second);
return false;
}
if (jobset.triggerTime != std::numeric_limits<time_t>::max()) {
// An evaluation of this Jobset is requested
debug("shouldEvaluate %s:%s? yes: requested",
jobset.name.first, jobset.name.second);
return true;
}
if (jobset.checkInterval <= 0) {
// Automatic scheduling is disabled. We allow requested
// evaluations, but never schedule start one.
debug("shouldEvaluate %s:%s? no: checkInterval <= 0",
jobset.name.first, jobset.name.second);
return false;
}
if (jobset.lastCheckedTime + jobset.checkInterval <= time(0)) {
// Time to schedule a fresh evaluation. If the jobset
// is a ONE_AT_A_TIME jobset, ensure the previous jobset
// has no remaining, unfinished work.
auto conn(dbPool.get());
pqxx::work txn(*conn);
if (jobset.evaluation_style == EvaluationStyle::ONE_AT_A_TIME) {
auto evaluation_res = txn.parameterized
("select id from JobsetEvals "
"where project = $1 and jobset = $2 "
"order by id desc limit 1")
(jobset.name.first)
(jobset.name.second)
.exec();
if (evaluation_res.empty()) {
// First evaluation, so allow scheduling.
debug("shouldEvaluate(one-at-a-time) %s:%s? yes: no prior eval",
jobset.name.first, jobset.name.second);
return true;
}
auto evaluation_id = evaluation_res[0][0].as<int>();
auto unfinished_build_res = txn.parameterized
("select id from Builds "
"join JobsetEvalMembers "
" on (JobsetEvalMembers.build = Builds.id) "
"where JobsetEvalMembers.eval = $1 "
" and builds.finished = 0 "
" limit 1")
(evaluation_id)
.exec();
// If the previous evaluation has no unfinished builds
// schedule!
if (unfinished_build_res.empty()) {
debug("shouldEvaluate(one-at-a-time) %s:%s? yes: no unfinished builds",
jobset.name.first, jobset.name.second);
return true;
} else {
debug("shouldEvaluate(one-at-a-time) %s:%s? no: at least one unfinished build",
jobset.name.first, jobset.name.second);
return false;
}
} else {
// EvaluationStyle::ONESHOT, EvaluationStyle::SCHEDULED
debug("shouldEvaluate(oneshot/scheduled) %s:%s? yes: checkInterval elapsed",
jobset.name.first, jobset.name.second);
return true;
}
}
return false;
}
void startEvals(State & state)
{
std::vector<Jobsets::iterator> sorted;
time_t now = time(0);
/* Filter out jobsets that have been evaluated recently and have
not been triggered. */
for (auto i = state.jobsets.begin(); i != state.jobsets.end(); ++i)
if (evalOne ||
(i->second.pid == -1 &&
(i->second.triggerTime != std::numeric_limits<time_t>::max() ||
(i->second.checkInterval > 0 && i->second.lastCheckedTime + i->second.checkInterval <= now))))
(i->second.evaluation_style && shouldEvaluate(i->second)))
sorted.push_back(i);
/* Put jobsets in order of ascending trigger time, last checked

View file

@ -3,5 +3,5 @@ bin_PROGRAMS = hydra-queue-runner
hydra_queue_runner_SOURCES = hydra-queue-runner.cc queue-monitor.cc dispatcher.cc \
builder.cc build-result.cc build-remote.cc \
build-result.hh counter.hh token-server.hh state.hh db.hh
hydra_queue_runner_LDADD = $(NIX_LIBS) -lpqxx
hydra_queue_runner_CXXFLAGS = $(NIX_CFLAGS) -Wall -I ../libhydra
hydra_queue_runner_LDADD = $(NIX_LIBS) -lpqxx -lnixrust
hydra_queue_runner_CXXFLAGS = $(NIX_CFLAGS) -Wall -I ../libhydra -Wno-deprecated-declarations

View file

@ -82,10 +82,10 @@ static void openConnection(Machine::ptr machine, Path tmpDir, int stderrFD, Chil
static void copyClosureTo(std::timed_mutex & sendMutex, ref<Store> destStore,
FdSource & from, FdSink & to, const PathSet & paths,
FdSource & from, FdSink & to, const StorePathSet & paths,
bool useSubstitutes = false)
{
PathSet closure;
StorePathSet closure;
for (auto & path : paths)
destStore->computeFSClosure(path, closure);
@ -94,20 +94,21 @@ static void copyClosureTo(std::timed_mutex & sendMutex, ref<Store> destStore,
garbage-collect paths that are already there. Optionally, ask
the remote host to substitute missing paths. */
// FIXME: substitute output pollutes our build log
to << cmdQueryValidPaths << 1 << useSubstitutes << closure;
to << cmdQueryValidPaths << 1 << useSubstitutes;
writeStorePaths(*destStore, to, closure);
to.flush();
/* Get back the set of paths that are already valid on the remote
host. */
auto present = readStorePaths<PathSet>(*destStore, from);
auto present = readStorePaths<StorePathSet>(*destStore, from);
if (present.size() == closure.size()) return;
Paths sorted = destStore->topoSortPaths(closure);
auto sorted = destStore->topoSortPaths(closure);
Paths missing;
StorePathSet missing;
for (auto i = sorted.rbegin(); i != sorted.rend(); ++i)
if (present.find(*i) == present.end()) missing.push_back(*i);
if (!present.count(*i)) missing.insert(i->clone());
printMsg(lvlDebug, format("sending %1% missing paths") % missing.size());
@ -131,7 +132,7 @@ void State::buildRemote(ref<Store> destStore,
{
assert(BuildResult::TimedOut == 8);
string base = baseNameOf(step->drvPath);
string base(step->drvPath.to_string());
result.logFile = logDir + "/" + string(base, 0, 2) + "/" + string(base, 2);
AutoDelete autoDelete(result.logFile, false);
@ -217,22 +218,22 @@ void State::buildRemote(ref<Store> destStore,
outputs of the input derivations. */
updateStep(ssSendingInputs);
PathSet inputs;
BasicDerivation basicDrv(step->drv);
StorePathSet inputs;
BasicDerivation basicDrv(*step->drv);
if (sendDerivation)
inputs.insert(step->drvPath);
inputs.insert(step->drvPath.clone());
else
for (auto & p : step->drv.inputSrcs)
inputs.insert(p);
for (auto & p : step->drv->inputSrcs)
inputs.insert(p.clone());
for (auto & input : step->drv.inputDrvs) {
Derivation drv2 = readDerivation(input.first);
for (auto & input : step->drv->inputDrvs) {
Derivation drv2 = readDerivation(*localStore, localStore->printStorePath(input.first));
for (auto & name : input.second) {
auto i = drv2.outputs.find(name);
if (i == drv2.outputs.end()) continue;
inputs.insert(i->second.path);
basicDrv.inputSrcs.insert(i->second.path);
inputs.insert(i->second.path.clone());
basicDrv.inputSrcs.insert(i->second.path.clone());
}
}
@ -241,14 +242,15 @@ void State::buildRemote(ref<Store> destStore,
this will copy the inputs to the binary cache from the local
store. */
if (localStore != std::shared_ptr<Store>(destStore))
copyClosure(ref<Store>(localStore), destStore, step->drv.inputSrcs, NoRepair, NoCheckSigs);
copyClosure(ref<Store>(localStore), destStore, step->drv->inputSrcs, NoRepair, NoCheckSigs);
/* Copy the input closure. */
if (!machine->isLocalhost()) {
auto mc1 = std::make_shared<MaintainCount<counter>>(nrStepsWaiting);
mc1.reset();
MaintainCount<counter> mc2(nrStepsCopyingTo);
printMsg(lvlDebug, format("sending closure of %1% to %2%") % step->drvPath % machine->sshName);
printMsg(lvlDebug, "sending closure of %s to %s",
localStore->printStorePath(step->drvPath), machine->sshName);
auto now1 = std::chrono::steady_clock::now();
@ -272,14 +274,19 @@ void State::buildRemote(ref<Store> destStore,
logFD = -1;
/* Do the build. */
printMsg(lvlDebug, format("building %1% on %2%") % step->drvPath % machine->sshName);
printMsg(lvlDebug, "building %s on %s",
localStore->printStorePath(step->drvPath),
machine->sshName);
updateStep(ssBuilding);
if (sendDerivation)
to << cmdBuildPaths << PathSet({step->drvPath});
else
to << cmdBuildDerivation << step->drvPath << basicDrv;
if (sendDerivation) {
to << cmdBuildPaths;
writeStorePaths(*localStore, to, singleton(step->drvPath));
} else {
to << cmdBuildDerivation << localStore->printStorePath(step->drvPath);
writeDerivation(to, *localStore, basicDrv);
}
to << maxSilentTime << buildTimeout;
if (GET_PROTOCOL_MINOR(remoteVersion) >= 2)
to << maxLogSize;
@ -380,7 +387,8 @@ void State::buildRemote(ref<Store> destStore,
/* If the path was substituted or already valid, then we didn't
get a build log. */
if (result.isCached) {
printMsg(lvlInfo, format("outputs of %1% substituted or already valid on %2%") % step->drvPath % machine->sshName);
printMsg(lvlInfo, "outputs of %s substituted or already valid on %s",
localStore->printStorePath(step->drvPath), machine->sshName);
unlink(result.logFile.c_str());
result.logFile = "";
}
@ -395,13 +403,12 @@ void State::buildRemote(ref<Store> destStore,
auto now1 = std::chrono::steady_clock::now();
PathSet outputs;
for (auto & output : step->drv.outputs)
outputs.insert(output.second.path);
auto outputs = step->drv->outputPaths();
/* Query the size of the output paths. */
size_t totalNarSize = 0;
to << cmdQueryPathInfos << outputs;
to << cmdQueryPathInfos;
writeStorePaths(*localStore, to, outputs);
to.flush();
while (true) {
if (readString(from) == "") break;
@ -416,8 +423,8 @@ void State::buildRemote(ref<Store> destStore,
return;
}
printMsg(lvlDebug, format("copying outputs of %s from %s (%d bytes)")
% step->drvPath % machine->sshName % totalNarSize);
printMsg(lvlDebug, "copying outputs of %s from %s (%d bytes)",
localStore->printStorePath(step->drvPath), machine->sshName, totalNarSize);
/* Block until we have the required amount of memory
available, which is twice the NAR size (namely the
@ -431,10 +438,11 @@ void State::buildRemote(ref<Store> destStore,
auto resMs = std::chrono::duration_cast<std::chrono::milliseconds>(resStop - resStart).count();
if (resMs >= 1000)
printMsg(lvlError, format("warning: had to wait %d ms for %d memory tokens for %s")
% resMs % totalNarSize % step->drvPath);
printMsg(lvlError, "warning: had to wait %d ms for %d memory tokens for %s",
resMs, totalNarSize, localStore->printStorePath(step->drvPath));
to << cmdExportPaths << 0 << outputs;
to << cmdExportPaths << 0;
writeStorePaths(*localStore, to, outputs);
to.flush();
destStore->importPaths(from, result.accessor, NoCheckSigs);

View file

@ -14,16 +14,14 @@ BuildOutput getBuildOutput(nix::ref<Store> store,
BuildOutput res;
/* Compute the closure size. */
PathSet outputs;
for (auto & output : drv.outputs)
outputs.insert(output.second.path);
PathSet closure;
auto outputs = drv.outputPaths();
StorePathSet closure;
for (auto & output : outputs)
store->computeFSClosure(output, closure);
store->computeFSClosure(singleton(output), closure);
for (auto & path : closure) {
auto info = store->queryPathInfo(path);
res.closureSize += info->narSize;
if (outputs.find(path) != outputs.end()) res.size += info->narSize;
if (outputs.count(path)) res.size += info->narSize;
}
/* Get build products. */
@ -39,11 +37,13 @@ BuildOutput getBuildOutput(nix::ref<Store> store,
, std::regex::extended);
for (auto & output : outputs) {
Path failedFile = output + "/nix-support/failed";
auto outputS = store->printStorePath(output);
Path failedFile = outputS + "/nix-support/failed";
if (accessor->stat(failedFile).type == FSAccessor::Type::tRegular)
res.failed = true;
Path productsFile = output + "/nix-support/hydra-build-products";
Path productsFile = outputS + "/nix-support/hydra-build-products";
if (accessor->stat(productsFile).type != FSAccessor::Type::tRegular)
continue;
@ -72,7 +72,7 @@ BuildOutput getBuildOutput(nix::ref<Store> store,
auto st = accessor->stat(product.path);
if (st.type == FSAccessor::Type::tMissing) continue;
product.name = product.path == output ? "" : baseNameOf(product.path);
product.name = product.path == store->printStorePath(output) ? "" : baseNameOf(product.path);
if (st.type == FSAccessor::Type::tRegular) {
product.isRegular = true;
@ -91,14 +91,14 @@ BuildOutput getBuildOutput(nix::ref<Store> store,
if (!explicitProducts) {
for (auto & output : drv.outputs) {
BuildProduct product;
product.path = output.second.path;
product.path = store->printStorePath(output.second.path);
product.type = "nix-build";
product.subtype = output.first == "out" ? "" : output.first;
product.name = storePathToName(product.path);
product.name = output.second.path.name();
auto st = accessor->stat(product.path);
if (st.type == FSAccessor::Type::tMissing)
throw Error(format("getting status of %1%") % product.path);
throw Error("getting status of %s", product.path);
if (st.type == FSAccessor::Type::tDirectory)
res.products.push_back(product);
}
@ -106,7 +106,7 @@ BuildOutput getBuildOutput(nix::ref<Store> store,
/* Get the release name from $output/nix-support/hydra-release-name. */
for (auto & output : outputs) {
Path p = output + "/nix-support/hydra-release-name";
auto p = store->printStorePath(output) + "/nix-support/hydra-release-name";
if (accessor->stat(p).type != FSAccessor::Type::tRegular) continue;
try {
res.releaseName = trim(accessor->readFile(p));
@ -116,7 +116,7 @@ BuildOutput getBuildOutput(nix::ref<Store> store,
/* Get metrics. */
for (auto & output : outputs) {
Path metricsFile = output + "/nix-support/hydra-metrics";
auto metricsFile = store->printStorePath(output) + "/nix-support/hydra-metrics";
if (accessor->stat(metricsFile).type != FSAccessor::Type::tRegular) continue;
for (auto & line : tokenizeString<Strings>(accessor->readFile(metricsFile), "\n")) {
auto fields = tokenizeString<std::vector<std::string>>(line);

View file

@ -18,7 +18,7 @@ void setThreadName(const std::string & name)
void State::builder(MachineReservation::ptr reservation)
{
setThreadName("bld~" + baseNameOf(reservation->step->drvPath));
setThreadName("bld~" + std::string(reservation->step->drvPath.to_string()));
StepResult res = sRetry;
@ -39,8 +39,10 @@ void State::builder(MachineReservation::ptr reservation)
auto destStore = getDestStore();
res = doBuildStep(destStore, reservation, activeStep);
} catch (std::exception & e) {
printMsg(lvlError, format("uncaught exception building %1% on %2%: %3%")
% reservation->step->drvPath % reservation->machine->sshName % e.what());
printMsg(lvlError, "uncaught exception building %s on %s: %s",
localStore->printStorePath(reservation->step->drvPath),
reservation->machine->sshName,
e.what());
}
}
@ -60,7 +62,7 @@ void State::builder(MachineReservation::ptr reservation)
nrRetries++;
if (step_->tries > maxNrRetries) maxNrRetries = step_->tries; // yeah yeah, not atomic
int delta = retryInterval * std::pow(retryBackoff, step_->tries - 1) + (rand() % 10);
printMsg(lvlInfo, format("will retry %1% after %2%s") % step->drvPath % delta);
printMsg(lvlInfo, "will retry %s after %ss", localStore->printStorePath(step->drvPath), delta);
step_->after = std::chrono::system_clock::now() + std::chrono::seconds(delta);
}
@ -95,7 +97,7 @@ State::StepResult State::doBuildStep(nix::ref<Store> destStore,
cancelled (namely if there are no more Builds referring to
it). */
BuildID buildId;
Path buildDrvPath;
std::optional<StorePath> buildDrvPath;
unsigned int maxSilentTime, buildTimeout;
unsigned int repeats = step->isDeterministic ? 1 : 0;
@ -116,7 +118,7 @@ State::StepResult State::doBuildStep(nix::ref<Store> destStore,
possibility, we retry this step (putting it back in
the runnable queue). If there are really no strong
pointers to the step, it will be deleted. */
printMsg(lvlInfo, format("maybe cancelling build step %1%") % step->drvPath);
printMsg(lvlInfo, "maybe cancelling build step %s", localStore->printStorePath(step->drvPath));
return sMaybeCancelled;
}
@ -138,15 +140,15 @@ State::StepResult State::doBuildStep(nix::ref<Store> destStore,
if (!build) build = *dependents.begin();
buildId = build->id;
buildDrvPath = build->drvPath;
buildDrvPath = build->drvPath.clone();
maxSilentTime = build->maxSilentTime;
buildTimeout = build->buildTimeout;
printInfo("performing step %s %d times on %s (needed by build %d and %d others)",
step->drvPath, repeats + 1, machine->sshName, buildId, (dependents.size() - 1));
localStore->printStorePath(step->drvPath), repeats + 1, machine->sshName, buildId, (dependents.size() - 1));
}
bool quit = buildId == buildOne && step->drvPath == buildDrvPath;
bool quit = buildId == buildOne && step->drvPath == *buildDrvPath;
RemoteResult result;
BuildOutput res;
@ -166,7 +168,7 @@ State::StepResult State::doBuildStep(nix::ref<Store> destStore,
try {
auto store = destStore.dynamic_pointer_cast<BinaryCacheStore>();
if (uploadLogsToBinaryCache && store && pathExists(result.logFile)) {
store->upsertFile("log/" + baseNameOf(step->drvPath), readFile(result.logFile), "text/plain; charset=utf-8");
store->upsertFile("log/" + std::string(step->drvPath.to_string()), readFile(result.logFile), "text/plain; charset=utf-8");
unlink(result.logFile.c_str());
}
} catch (...) {
@ -218,7 +220,7 @@ State::StepResult State::doBuildStep(nix::ref<Store> destStore,
if (result.stepStatus == bsSuccess) {
updateStep(ssPostProcessing);
res = getBuildOutput(destStore, ref<FSAccessor>(result.accessor), step->drv);
res = getBuildOutput(destStore, ref<FSAccessor>(result.accessor), *step->drv);
}
result.accessor = 0;
@ -255,8 +257,8 @@ State::StepResult State::doBuildStep(nix::ref<Store> destStore,
/* The step had a hopefully temporary failure (e.g. network
issue). Retry a number of times. */
if (result.canRetry) {
printMsg(lvlError, format("possibly transient failure building %1% on %2%: %3%")
% step->drvPath % machine->sshName % result.errorMsg);
printMsg(lvlError, "possibly transient failure building %s on %s: %s",
localStore->printStorePath(step->drvPath), machine->sshName, result.errorMsg);
assert(stepNr);
bool retry;
{
@ -275,7 +277,7 @@ State::StepResult State::doBuildStep(nix::ref<Store> destStore,
assert(stepNr);
for (auto & path : step->drv.outputPaths())
for (auto & path : step->drv->outputPaths())
addRoot(path);
/* Register success in the database for all Build objects that
@ -308,7 +310,8 @@ State::StepResult State::doBuildStep(nix::ref<Store> destStore,
no new referrers can have been added in the
meantime or be added afterwards. */
if (direct.empty()) {
printMsg(lvlDebug, format("finishing build step %1%") % step->drvPath);
printMsg(lvlDebug, "finishing build step %s",
localStore->printStorePath(step->drvPath));
steps_->erase(step->drvPath);
}
}
@ -373,8 +376,32 @@ State::StepResult State::doBuildStep(nix::ref<Store> destStore,
}
}
} else {
} else
failStep(*conn, step, buildId, result, machine, stepFinished, quit);
// FIXME: keep stats about aborted steps?
nrStepsDone++;
totalStepTime += stepStopTime - stepStartTime;
totalStepBuildTime += result.stopTime - result.startTime;
machine->state->nrStepsDone++;
machine->state->totalStepTime += stepStopTime - stepStartTime;
machine->state->totalStepBuildTime += result.stopTime - result.startTime;
if (quit) exit(0); // testing hack; FIXME: this won't run plugins
return sDone;
}
void State::failStep(
Connection & conn,
Step::ptr step,
BuildID buildId,
const RemoteResult & result,
Machine::ptr machine,
bool & stepFinished,
bool & quit)
{
/* Register failure in the database for all Build objects that
directly or indirectly depend on this step. */
@ -393,7 +420,8 @@ State::StepResult State::doBuildStep(nix::ref<Store> destStore,
be certain no new referrers can be added. */
if (indirect.empty()) {
for (auto & s : steps) {
printMsg(lvlDebug, format("finishing build step %1%") % s->drvPath);
printMsg(lvlDebug, "finishing build step %s",
localStore->printStorePath(s->drvPath));
steps_->erase(s->drvPath);
}
}
@ -405,29 +433,30 @@ State::StepResult State::doBuildStep(nix::ref<Store> destStore,
{
auto mc = startDbUpdate();
pqxx::work txn(*conn);
pqxx::work txn(conn);
/* Create failed build steps for every build that
depends on this, except when this step is cached
and is the top-level of that build (since then it's
redundant with the build's isCachedBuild field). */
for (auto & build2 : indirect) {
if ((result.stepStatus == bsCachedFailure && build2->drvPath == step->drvPath) ||
(result.stepStatus != bsCachedFailure && buildId == build2->id) ||
build2->finishedInDB)
for (auto & build : indirect) {
if ((result.stepStatus == bsCachedFailure && build->drvPath == step->drvPath) ||
((result.stepStatus != bsCachedFailure && result.stepStatus != bsUnsupported) && buildId == build->id) ||
build->finishedInDB)
continue;
createBuildStep(txn, 0, build2->id, step, machine->sshName,
result.stepStatus, result.errorMsg, buildId == build2->id ? 0 : buildId);
createBuildStep(txn,
0, build->id, step, machine ? machine->sshName : "",
result.stepStatus, result.errorMsg, buildId == build->id ? 0 : buildId);
}
/* Mark all builds that depend on this derivation as failed. */
for (auto & build2 : indirect) {
if (build2->finishedInDB) continue;
printMsg(lvlError, format("marking build %1% as failed") % build2->id);
for (auto & build : indirect) {
if (build->finishedInDB) continue;
printMsg(lvlError, format("marking build %1% as failed") % build->id);
txn.exec_params0
("update Builds set finished = 1, buildStatus = $2, startTime = $3, stopTime = $4, isCachedBuild = $5, notificationPendingSince = $4 where id = $1 and finished = 0",
build2->id,
(int) (build2->drvPath != step->drvPath && result.buildStatus() == bsFailed ? bsDepFailed : result.buildStatus()),
build->id,
(int) (build->drvPath != step->drvPath && result.buildStatus() == bsFailed ? bsDepFailed : result.buildStatus()),
result.startTime,
result.stopTime,
result.stepStatus == bsCachedFailure ? 1 : 0);
@ -438,7 +467,7 @@ State::StepResult State::doBuildStep(nix::ref<Store> destStore,
won't be built again. */
if (result.stepStatus != bsCachedFailure && result.canCache)
for (auto & path : step->drv.outputPaths())
txn.exec_params0("insert into FailedPaths values ($1)", path);
txn.exec_params0("insert into FailedPaths values ($1)", localStore->printStorePath(path));
txn.commit();
}
@ -458,28 +487,15 @@ State::StepResult State::doBuildStep(nix::ref<Store> destStore,
/* Send notification about this build and its dependents. */
{
pqxx::work txn(*conn);
pqxx::work txn(conn);
notifyBuildFinished(txn, buildId, dependentIDs);
txn.commit();
}
}
// FIXME: keep stats about aborted steps?
nrStepsDone++;
totalStepTime += stepStopTime - stepStartTime;
totalStepBuildTime += result.stopTime - result.startTime;
machine->state->nrStepsDone++;
machine->state->totalStepTime += stepStopTime - stepStartTime;
machine->state->totalStepBuildTime += result.stopTime - result.startTime;
if (quit) exit(0); // testing hack; FIXME: this won't run plugins
return sDone;
}
void State::addRoot(const Path & storePath)
void State::addRoot(const StorePath & storePath)
{
auto root = rootsDir + "/" + baseNameOf(storePath);
auto root = rootsDir + "/" + std::string(storePath.to_string());
if (!pathExists(root)) writeFile(root, "");
}

View file

@ -10,7 +10,7 @@ using namespace nix;
void State::makeRunnable(Step::ptr step)
{
printMsg(lvlChatty, format("step %1% is now runnable") % step->drvPath);
printMsg(lvlChatty, "step %s is now runnable", localStore->printStorePath(step->drvPath));
{
auto step_(step->state.lock());
@ -248,7 +248,7 @@ system_time State::doDispatch()
/* Can this machine do this step? */
if (!mi.machine->supportsStep(step)) {
debug("machine '%s' does not support step '%s' (system type '%s')",
mi.machine->sshName, step->drvPath, step->drv.platform);
mi.machine->sshName, localStore->printStorePath(step->drvPath), step->drv->platform);
continue;
}
@ -300,6 +300,8 @@ system_time State::doDispatch()
} while (keepGoing);
abortUnsupported();
return sleepUntil;
}
@ -314,6 +316,96 @@ void State::wakeDispatcher()
}
void State::abortUnsupported()
{
/* Make a copy of 'runnable' and 'machines' so we don't block them
very long. */
auto runnable2 = *runnable.lock();
auto machines2 = *machines.lock();
system_time now = std::chrono::system_clock::now();
auto now2 = time(0);
std::unordered_set<Step::ptr> aborted;
size_t count = 0;
for (auto & wstep : runnable2) {
auto step(wstep.lock());
if (!step) continue;
bool supported = false;
for (auto & machine : machines2) {
if (machine.second->supportsStep(step)) {
step->state.lock()->lastSupported = now;
supported = true;
break;
}
}
if (!supported)
count++;
if (!supported
&& std::chrono::duration_cast<std::chrono::seconds>(now - step->state.lock()->lastSupported).count() >= maxUnsupportedTime)
{
printError("aborting unsupported build step '%s' (type '%s')",
localStore->printStorePath(step->drvPath),
step->systemType);
aborted.insert(step);
auto conn(dbPool.get());
std::set<Build::ptr> dependents;
std::set<Step::ptr> steps;
getDependents(step, dependents, steps);
/* Maybe the step got cancelled. */
if (dependents.empty()) continue;
/* Find the build that has this step as the top-level (if
any). */
Build::ptr build;
for (auto build2 : dependents) {
if (build2->drvPath == step->drvPath)
build = build2;
}
if (!build) build = *dependents.begin();
bool stepFinished = false;
bool quit = false;
failStep(
*conn, step, build->id,
RemoteResult {
.stepStatus = bsUnsupported,
.errorMsg = fmt("unsupported system type '%s'",
step->systemType),
.startTime = now2,
.stopTime = now2,
},
nullptr, stepFinished, quit);
if (quit) exit(1);
}
}
/* Clean up 'runnable'. */
{
auto runnable_(runnable.lock());
for (auto i = runnable_->begin(); i != runnable_->end(); ) {
if (aborted.count(i->lock()))
i = runnable_->erase(i);
else
++i;
}
}
nrUnsupportedSteps = count;
}
void Jobset::addStep(time_t startTime, time_t duration)
{
auto steps_(steps.lock());

View file

@ -39,14 +39,15 @@ static uint64_t getMemSize()
std::string getEnvOrDie(const std::string & key)
{
char * value = getenv(key.c_str());
auto value = getEnv(key);
if (!value) throw Error("environment variable '%s' is not set", key);
return value;
return *value;
}
State::State()
: config(std::make_unique<::Config>())
, maxUnsupportedTime(config->getIntOption("max_unsupported_time", 0))
, dbPool(config->getIntOption("max_db_connections", 128))
, memoryTokens(config->getIntOption("nar_buffer_size", getMemSize() / 2))
, maxOutputSize(config->getIntOption("max_output_size", 2ULL << 30))
@ -161,7 +162,7 @@ void State::monitorMachinesFile()
{
string defaultMachinesFile = "/etc/nix/machines";
auto machinesFiles = tokenizeString<std::vector<Path>>(
getEnv("NIX_REMOTE_SYSTEMS", pathExists(defaultMachinesFile) ? defaultMachinesFile : ""), ":");
getEnv("NIX_REMOTE_SYSTEMS").value_or(pathExists(defaultMachinesFile) ? defaultMachinesFile : ""), ":");
if (machinesFiles.empty()) {
parseMachines("localhost " +
@ -219,6 +220,7 @@ void State::monitorMachinesFile()
sleep(30);
} catch (std::exception & e) {
printMsg(lvlError, format("reloading machines file: %1%") % e.what());
sleep(5);
}
}
}
@ -253,7 +255,7 @@ unsigned int State::createBuildStep(pqxx::work & txn, time_t startTime, BuildID
buildId,
stepNr,
0, // == build
step->drvPath,
localStore->printStorePath(step->drvPath),
status == bsBusy ? 1 : 0,
startTime != 0 ? std::make_optional(startTime) : std::nullopt,
step->drv.platform,
@ -268,7 +270,7 @@ unsigned int State::createBuildStep(pqxx::work & txn, time_t startTime, BuildID
for (auto & output : step->drv.outputs)
txn.exec_params0
("insert into BuildStepOutputs (build, stepnr, name, path) values ($1, $2, $3, $4)",
buildId, stepNr, output.first, output.second.path);
buildId, stepNr, output.first, localStore->printStorePath(output.second.path));
if (status == bsBusy)
txn.exec(fmt("notify step_started, '%d\t%d'", buildId, stepNr));
@ -309,7 +311,7 @@ void State::finishBuildStep(pqxx::work & txn, const RemoteResult & result,
int State::createSubstitutionStep(pqxx::work & txn, time_t startTime, time_t stopTime,
Build::ptr build, const Path & drvPath, const string & outputName, const Path & storePath)
Build::ptr build, const StorePath & drvPath, const string & outputName, const StorePath & storePath)
{
restart:
auto stepNr = allocBuildStep(txn, build->id);
@ -319,7 +321,7 @@ int State::createSubstitutionStep(pqxx::work & txn, time_t startTime, time_t sto
build->id,
stepNr,
1, // == substitution
drvPath,
(localStore->printStorePath(drvPath)),
0,
0,
startTime,
@ -329,7 +331,8 @@ int State::createSubstitutionStep(pqxx::work & txn, time_t startTime, time_t sto
txn.exec_params0
("insert into BuildStepOutputs (build, stepnr, name, path) values ($1, $2, $3, $4)",
build->id, stepNr, outputName, storePath);
build->id, stepNr, outputName,
localStore->printStorePath(storePath));
return stepNr;
}
@ -450,7 +453,7 @@ bool State::checkCachedFailure(Step::ptr step, Connection & conn)
{
pqxx::work txn(conn);
for (auto & path : step->drv.outputPaths())
if (!txn.exec_params("select 1 from FailedPaths where path = $1", path).empty())
if (!txn.exec_params("select 1 from FailedPaths where path = $1", localStore->printStorePath(path)).empty())
return true;
return false;
}
@ -486,7 +489,7 @@ std::shared_ptr<PathLocks> State::acquireGlobalLock()
}
void State::dumpStatus(Connection & conn, bool log)
void State::dumpStatus(Connection & conn)
{
std::ostringstream out;
@ -518,6 +521,7 @@ void State::dumpStatus(Connection & conn, bool log)
root.attr("nrStepsCopyingTo", nrStepsCopyingTo);
root.attr("nrStepsCopyingFrom", nrStepsCopyingFrom);
root.attr("nrStepsWaiting", nrStepsWaiting);
root.attr("nrUnsupportedSteps", nrUnsupportedSteps);
root.attr("bytesSent", bytesSent);
root.attr("bytesReceived", bytesReceived);
root.attr("nrBuildsRead", nrBuildsRead);
@ -666,11 +670,6 @@ void State::dumpStatus(Connection & conn, bool log)
}
}
if (log && time(0) >= lastStatusLogged + statusLogInterval) {
printMsg(lvlInfo, format("status: %1%") % out.str());
lastStatusLogged = time(0);
}
{
auto mc = startDbUpdate();
pqxx::work txn(conn);
@ -762,7 +761,7 @@ void State::run(BuildID buildOne)
Store::Params localParams;
localParams["max-connections"] = "16";
localParams["max-connection-age"] = "600";
localStore = openStore(getEnv("NIX_REMOTE"), localParams);
localStore = openStore(getEnv("NIX_REMOTE").value_or(""), localParams);
auto storeUri = config->getStrOption("store_uri");
_destStore = storeUri == "" ? localStore : openStore(storeUri);
@ -779,7 +778,7 @@ void State::run(BuildID buildOne)
{
auto conn(dbPool.get());
clearBusy(*conn, 0);
dumpStatus(*conn, false);
dumpStatus(*conn);
}
std::thread(&State::monitorMachinesFile, this).detach();
@ -842,8 +841,8 @@ void State::run(BuildID buildOne)
auto conn(dbPool.get());
receiver dumpStatus_(*conn, "dump_status");
while (true) {
conn->await_notification(statusLogInterval / 2 + 1, 0);
dumpStatus(*conn, true);
conn->await_notification();
dumpStatus(*conn);
}
} catch (std::exception & e) {
printMsg(lvlError, format("main thread: %1%") % e.what());

View file

@ -83,7 +83,7 @@ bool State::getQueuedBuilds(Connection & conn,
them yet (since we don't want a long-running transaction). */
std::vector<BuildID> newIDs;
std::map<BuildID, Build::ptr> newBuildsByID;
std::multimap<Path, BuildID> newBuildsByPath;
std::multimap<StorePath, BuildID> newBuildsByPath;
unsigned int newLastBuildId = lastBuildId;
@ -102,9 +102,9 @@ bool State::getQueuedBuilds(Connection & conn,
if (id > newLastBuildId) newLastBuildId = id;
if (builds_->count(id)) continue;
auto build = std::make_shared<Build>();
auto build = std::make_shared<Build>(
localStore->parseStorePath(row["drvPath"].as<string>()));
build->id = id;
build->drvPath = row["drvPath"].as<string>();
build->projectName = row["project"].as<string>();
build->jobsetName = row["jobset"].as<string>();
build->jobName = row["job"].as<string>();
@ -117,14 +117,14 @@ bool State::getQueuedBuilds(Connection & conn,
newIDs.push_back(id);
newBuildsByID[id] = build;
newBuildsByPath.emplace(std::make_pair(build->drvPath, id));
newBuildsByPath.emplace(std::make_pair(build->drvPath.clone(), id));
}
}
std::set<Step::ptr> newRunnable;
unsigned int nrAdded;
std::function<void(Build::ptr)> createBuild;
std::set<Path> finishedDrvs;
std::set<StorePath> finishedDrvs;
createBuild = [&](Build::ptr build) {
printMsg(lvlTalkative, format("loading build %1% (%2%)") % build->id % build->fullJobName());
@ -160,7 +160,8 @@ bool State::getQueuedBuilds(Connection & conn,
/* Some step previously failed, so mark the build as
failed right away. */
printMsg(lvlError, format("marking build %d as cached failure due to %s") % build->id % ex.step->drvPath);
printMsg(lvlError, "marking build %d as cached failure due to %s",
build->id, localStore->printStorePath(ex.step->drvPath));
if (!build->finishedInDB) {
auto mc = startDbUpdate();
pqxx::work txn(conn);
@ -171,14 +172,14 @@ bool State::getQueuedBuilds(Connection & conn,
auto res = txn.exec_params1
("select max(build) from BuildSteps where drvPath = $1 and startTime != 0 and stopTime != 0 and status = 1",
ex.step->drvPath);
localStore->printStorePathh(ex.step->drvPath));
if (!res[0].is_null()) propagatedFrom = res[0].as<BuildID>();
if (!propagatedFrom) {
for (auto & output : ex.step->drv.outputs) {
auto res = txn.exec_params
("select max(s.build) from BuildSteps s join BuildStepOutputs o on s.build = o.build where path = $1 and startTime != 0 and stopTime != 0 and status = 1",
output.second.path);
localStore->printStorePath(output.second.path));
if (!res[0][0].is_null()) {
propagatedFrom = res[0][0].as<BuildID>();
break;
@ -217,7 +218,7 @@ bool State::getQueuedBuilds(Connection & conn,
/* If we didn't get a step, it means the step's outputs are
all valid. So we mark this as a finished, cached build. */
if (!step) {
Derivation drv = readDerivation(build->drvPath);
Derivation drv = readDerivation(*localStore, localStore->printStorePath(build->drvPath));
BuildOutput res = getBuildOutputCached(conn, destStore, drv);
for (auto & path : drv.outputPaths())
@ -227,7 +228,7 @@ bool State::getQueuedBuilds(Connection & conn,
auto mc = startDbUpdate();
pqxx::work txn(conn);
time_t now = time(0);
printMsg(lvlInfo, format("marking build %1% as succeeded (cached)") % build->id);
printMsg(lvlInfo, "marking build %1% as succeeded (cached)", build->id);
markSucceededBuild(txn, build, res, true, now, now);
notifyBuildFinished(txn, build->id, {});
txn.commit();
@ -250,8 +251,8 @@ bool State::getQueuedBuilds(Connection & conn,
build->propagatePriorities();
printMsg(lvlChatty, format("added build %1% (top-level step %2%, %3% new steps)")
% build->id % step->drvPath % newSteps.size());
printMsg(lvlChatty, "added build %1% (top-level step %2%, %3% new steps)",
build->id, localStore->printStorePath(step->drvPath), newSteps.size());
};
/* Now instantiate build steps for each new build. The builder
@ -271,7 +272,7 @@ bool State::getQueuedBuilds(Connection & conn,
try {
createBuild(build);
} catch (Error & e) {
e.addPrefix(format("while loading build %1%: ") % build->id);
e.addPrefix(fmt("while loading build %1%: ", build->id));
throw;
}
@ -358,10 +359,12 @@ void State::processQueueChange(Connection & conn)
activeStepState->cancelled = true;
if (activeStepState->pid != -1) {
printInfo("killing builder process %d of build step %s",
activeStepState->pid, activeStep->step->drvPath);
activeStepState->pid,
localStore->printStorePath(activeStep->step->drvPath));
if (kill(activeStepState->pid, SIGINT) == -1)
printError("error killing build step %s: %s",
activeStep->step->drvPath, strerror(errno));
localStore->printStorePath(activeStep->step->drvPath),
strerror(errno));
}
}
}
@ -370,8 +373,8 @@ void State::processQueueChange(Connection & conn)
Step::ptr State::createStep(ref<Store> destStore,
Connection & conn, Build::ptr build, const Path & drvPath,
Build::ptr referringBuild, Step::ptr referringStep, std::set<Path> & finishedDrvs,
Connection & conn, Build::ptr build, const StorePath & drvPath,
Build::ptr referringBuild, Step::ptr referringStep, std::set<StorePath> & finishedDrvs,
std::set<Step::ptr> & newSteps, std::set<Step::ptr> & newRunnable)
{
if (finishedDrvs.find(drvPath) != finishedDrvs.end()) return 0;
@ -399,8 +402,7 @@ Step::ptr State::createStep(ref<Store> destStore,
/* If it doesn't exist, create it. */
if (!step) {
step = std::make_shared<Step>();
step->drvPath = drvPath;
step = std::make_shared<Step>(drvPath.clone());
isNew = true;
}
@ -414,28 +416,28 @@ Step::ptr State::createStep(ref<Store> destStore,
if (referringStep)
step_->rdeps.push_back(referringStep);
(*steps_)[drvPath] = step;
steps_->insert_or_assign(drvPath.clone(), step);
}
if (!isNew) return step;
printMsg(lvlDebug, format("considering derivation %1%") % drvPath);
printMsg(lvlDebug, "considering derivation %1%", localStore->printStorePath(drvPath));
/* Initialize the step. Note that the step may be visible in
steps before this point, but that doesn't matter because
it's not runnable yet, and other threads won't make it
runnable while step->created == false. */
step->drv = readDerivation(drvPath);
step->parsedDrv = std::make_unique<ParsedDerivation>(drvPath, step->drv);
step->drv = std::make_unique<Derivation>(readDerivation(*localStore, localStore->printStorePath(drvPath)));
step->parsedDrv = std::make_unique<ParsedDerivation>(drvPath.clone(), *step->drv);
step->preferLocalBuild = step->parsedDrv->willBuildLocally();
step->isDeterministic = get(step->drv.env, "isDetermistic", "0") == "1";
step->isDeterministic = get(step->drv->env, "isDetermistic").value_or("0") == "1";
step->systemType = step->drv.platform;
step->systemType = step->drv->platform;
{
auto i = step->drv.env.find("requiredSystemFeatures");
auto i = step->drv->env.find("requiredSystemFeatures");
StringSet features;
if (i != step->drv.env.end())
if (i != step->drv->env.end())
features = step->requiredSystemFeatures = tokenizeString<std::set<std::string>>(i->second);
if (step->preferLocalBuild)
features.insert("local");
@ -451,12 +453,13 @@ Step::ptr State::createStep(ref<Store> destStore,
/* Are all outputs valid? */
bool valid = true;
PathSet outputs = step->drv.outputPaths();
auto outputs = step->drv->outputPaths();
DerivationOutputs missing;
for (auto & i : step->drv.outputs)
for (auto & i : step->drv->outputs)
if (!destStore->isValidPath(i.second.path)) {
valid = false;
missing[i.first] = i.second;
missing.insert_or_assign(i.first,
DerivationOutput(i.second.path.clone(), std::string(i.second.hashAlgo), std::string(i.second.hash)));
}
/* Try to copy the missing paths from the local store or from
@ -469,7 +472,7 @@ Step::ptr State::createStep(ref<Store> destStore,
avail++;
else if (useSubstitutes) {
SubstitutablePathInfos infos;
localStore->querySubstitutablePathInfos({i.second.path}, infos);
localStore->querySubstitutablePathInfos(singleton(i.second.path), infos);
if (infos.size() == 1)
avail++;
}
@ -482,14 +485,18 @@ Step::ptr State::createStep(ref<Store> destStore,
time_t startTime = time(0);
if (localStore->isValidPath(i.second.path))
printInfo("copying output %1% of %2% from local store", i.second.path, drvPath);
printInfo("copying output %1% of %2% from local store",
localStore->printStorePath(i.second.path),
localStore->printStorePath(drvPath));
else {
printInfo("substituting output %1% of %2%", i.second.path, drvPath);
printInfo("substituting output %1% of %2%",
localStore->printStorePath(i.second.path),
localStore->printStorePath(drvPath));
localStore->ensurePath(i.second.path);
// FIXME: should copy directly from substituter to destStore.
}
copyClosure(ref<Store>(localStore), destStore, {i.second.path});
copyClosure(ref<Store>(localStore), destStore, singleton(i.second.path));
time_t stopTime = time(0);
@ -501,7 +508,10 @@ Step::ptr State::createStep(ref<Store> destStore,
}
} catch (Error & e) {
printError("while copying/substituting output %s of %s: %s", i.second.path, drvPath, e.what());
printError("while copying/substituting output %s of %s: %s",
localStore->printStorePath(i.second.path),
localStore->printStorePath(drvPath),
e.what());
valid = false;
break;
}
@ -511,15 +521,15 @@ Step::ptr State::createStep(ref<Store> destStore,
// FIXME: check whether all outputs are in the binary cache.
if (valid) {
finishedDrvs.insert(drvPath);
finishedDrvs.insert(drvPath.clone());
return 0;
}
/* No, we need to build. */
printMsg(lvlDebug, format("creating build step %1%") % drvPath);
printMsg(lvlDebug, "creating build step %1%", localStore->printStorePath(drvPath));
/* Create steps for the dependencies. */
for (auto & i : step->drv.inputDrvs) {
for (auto & i : step->drv->inputDrvs) {
auto dep = createStep(destStore, conn, build, i.first, 0, step, finishedDrvs, newSteps, newRunnable);
if (dep) {
auto step_(step->state.lock());
@ -610,7 +620,7 @@ BuildOutput State::getBuildOutputCached(Connection & conn, nix::ref<nix::Store>
("select id, buildStatus, releaseName, closureSize, size from Builds b "
"join BuildOutputs o on b.id = o.build "
"where finished = 1 and (buildStatus = 0 or buildStatus = 6) and path = $1",
output.second.path);
localStore->printStorePath(output.second.path));
if (r.empty()) continue;
BuildID id = r[0][0].as<BuildID>();

View file

@ -68,7 +68,7 @@ struct RemoteResult
std::unique_ptr<nix::TokenServer::Token> tokens;
std::shared_ptr<nix::FSAccessor> accessor;
BuildStatus buildStatus()
BuildStatus buildStatus() const
{
return stepStatus == bsCachedFailure ? bsFailed : stepStatus;
}
@ -123,8 +123,8 @@ struct Build
typedef std::weak_ptr<Build> wptr;
BuildID id;
nix::Path drvPath;
std::map<std::string, nix::Path> outputs;
nix::StorePath drvPath;
std::map<std::string, nix::StorePath> outputs;
std::string projectName, jobsetName, jobName;
time_t timestamp;
unsigned int maxSilentTime, buildTimeout;
@ -136,6 +136,9 @@ struct Build
std::atomic_bool finishedInDB{false};
Build(nix::StorePath && drvPath) : drvPath(std::move(drvPath))
{ }
std::string fullJobName()
{
return projectName + ":" + jobsetName + ":" + jobName;
@ -150,8 +153,8 @@ struct Step
typedef std::shared_ptr<Step> ptr;
typedef std::weak_ptr<Step> wptr;
nix::Path drvPath;
nix::Derivation drv;
nix::StorePath drvPath;
std::unique_ptr<nix::Derivation> drv;
std::unique_ptr<nix::ParsedDerivation> parsedDrv;
std::set<std::string> requiredSystemFeatures;
bool preferLocalBuild;
@ -195,12 +198,19 @@ struct Step
/* The time at which this step became runnable. */
system_time runnableSince;
/* The time that we last saw a machine that supports this
step. */
system_time lastSupported = std::chrono::system_clock::now();
};
std::atomic_bool finished{false}; // debugging
nix::Sync<State> state;
Step(nix::StorePath && drvPath) : drvPath(std::move(drvPath))
{ }
~Step()
{
//printMsg(lvlError, format("destroying step %1%") % drvPath);
@ -252,7 +262,7 @@ struct Machine
{
/* Check that this machine is of the type required by the
step. */
if (!systemTypes.count(step->drv.platform == "builtin" ? nix::settings.thisSystem : step->drv.platform))
if (!systemTypes.count(step->drv->platform == "builtin" ? nix::settings.thisSystem : step->drv->platform))
return false;
/* Check that the step requires all mandatory features of this
@ -297,6 +307,9 @@ private:
const float retryBackoff = 3.0;
const unsigned int maxParallelCopyClosure = 4;
/* Time in seconds before unsupported build steps are aborted. */
const unsigned int maxUnsupportedTime = 0;
nix::Path hydraData, logDir;
bool useSubstitutes = false;
@ -313,7 +326,7 @@ private:
queued builds). Note that these are weak pointers. Steps are
kept alive by being reachable from Builds or by being in
progress. */
typedef std::map<nix::Path, Step::wptr> Steps;
typedef std::map<nix::StorePath, Step::wptr> Steps;
nix::Sync<Steps> steps;
/* Build steps that have no unbuilt dependencies. */
@ -342,6 +355,7 @@ private:
counter nrStepsCopyingTo{0};
counter nrStepsCopyingFrom{0};
counter nrStepsWaiting{0};
counter nrUnsupportedSteps{0};
counter nrRetries{0};
counter maxNrRetries{0};
counter totalStepTime{0}; // total time for steps, including closure copying
@ -406,9 +420,6 @@ private:
size_t maxOutputSize;
size_t maxLogSize;
time_t lastStatusLogged = 0;
const int statusLogInterval = 300;
/* Steps that were busy while we encounted a PostgreSQL
error. These need to be cleared at a later time to prevent them
from showing up as busy until the queue runner is restarted. */
@ -454,7 +465,7 @@ private:
const std::string & machine);
int createSubstitutionStep(pqxx::work & txn, time_t startTime, time_t stopTime,
Build::ptr build, const nix::Path & drvPath, const std::string & outputName, const nix::Path & storePath);
Build::ptr build, const nix::StorePath & drvPath, const std::string & outputName, const nix::StorePath & storePath);
void updateBuild(pqxx::work & txn, Build::ptr build, BuildStatus status);
@ -473,10 +484,19 @@ private:
const nix::Derivation & drv);
Step::ptr createStep(nix::ref<nix::Store> store,
Connection & conn, Build::ptr build, const nix::Path & drvPath,
Build::ptr referringBuild, Step::ptr referringStep, std::set<nix::Path> & finishedDrvs,
Connection & conn, Build::ptr build, const nix::StorePath & drvPath,
Build::ptr referringBuild, Step::ptr referringStep, std::set<nix::StorePath> & finishedDrvs,
std::set<Step::ptr> & newSteps, std::set<Step::ptr> & newRunnable);
void failStep(
Connection & conn,
Step::ptr step,
BuildID buildId,
const RemoteResult & result,
Machine::ptr machine,
bool & stepFinished,
bool & quit);
Jobset::ptr createJobset(pqxx::work & txn,
const std::string & projectName, const std::string & jobsetName);
@ -491,6 +511,8 @@ private:
void wakeDispatcher();
void abortUnsupported();
void builder(MachineReservation::ptr reservation);
/* Perform the given build step. Return true if the step is to be
@ -521,9 +543,9 @@ private:
has it. */
std::shared_ptr<nix::PathLocks> acquireGlobalLock();
void dumpStatus(Connection & conn, bool log);
void dumpStatus(Connection & conn);
void addRoot(const nix::Path & storePath);
void addRoot(const nix::StorePath & storePath);
public:

View file

@ -7,7 +7,7 @@
namespace nix {
MakeError(NoTokens, Error)
MakeError(NoTokens, Error);
/* This class hands out tokens. There are only maxTokens tokens
available. Calling get(N) will return a Token object, representing

View file

@ -88,7 +88,7 @@ sub jobsetToHash {
triggertime => $jobset->triggertime,
fetcherrormsg => $jobset->fetcherrormsg,
errortime => $jobset->errortime,
haserrormsg => $jobset->errormsg eq "" ? JSON::false : JSON::true
haserrormsg => defined($jobset->errormsg) && $jobset->errormsg ne "" ? JSON::true : JSON::false
};
}

View file

@ -193,7 +193,8 @@ sub checkPath {
sub serveFile {
my ($c, $path) = @_;
my $res = run(cmd => ["nix", "ls-store", "--store", getStoreUri(), "--json", "$path"]);
my $res = run(cmd => ["nix", "--experimental-features", "nix-command",
"ls-store", "--store", getStoreUri(), "--json", "$path"]);
if ($res->{status}) {
notFound($c, "File '$path' does not exist.") if $res->{stderr} =~ /does not exist/;
@ -217,7 +218,8 @@ sub serveFile {
elsif ($ls->{type} eq "regular") {
$c->stash->{'plain'} = { data => grab(cmd => ["nix", "cat-store", "--store", getStoreUri(), "$path"]) };
$c->stash->{'plain'} = { data => grab(cmd => ["nix", "--experimental-features", "nix-command",
"cat-store", "--store", getStoreUri(), "$path"]) };
# Detect MIME type. Borrowed from Catalyst::Plugin::Static::Simple.
my $type = "text/plain";

View file

@ -172,7 +172,7 @@ sub get_builds : Chained('job') PathPart('') CaptureArgs(0) {
my ($self, $c) = @_;
$c->stash->{allBuilds} = $c->stash->{job}->builds;
$c->stash->{latestSucceeded} = $c->model('DB')->resultset('LatestSucceededForJob')
->search({}, {bind => [$c->stash->{project}->name, $c->stash->{jobset}->name, $c->stash->{job}->name]});
->search({}, {bind => [$c->stash->{jobset}->name, $c->stash->{job}->name]});
$c->stash->{channelBaseName} =
$c->stash->{project}->name . "-" . $c->stash->{jobset}->name . "-" . $c->stash->{job}->name;
}

View file

@ -162,7 +162,7 @@ sub get_builds : Chained('jobsetChain') PathPart('') CaptureArgs(0) {
my ($self, $c) = @_;
$c->stash->{allBuilds} = $c->stash->{jobset}->builds;
$c->stash->{latestSucceeded} = $c->model('DB')->resultset('LatestSucceededForJobset')
->search({}, {bind => [$c->stash->{project}->name, $c->stash->{jobset}->name]});
->search({}, {bind => [$c->stash->{jobset}->name]});
$c->stash->{channelBaseName} =
$c->stash->{project}->name . "-" . $c->stash->{jobset}->name;
}
@ -223,15 +223,10 @@ sub updateJobset {
error($c, "Cannot rename jobset to $jobsetName since that identifier is already taken.")
if $jobsetName ne $oldName && defined $c->stash->{project}->jobsets->find({ name => $jobsetName });
# When the expression is in a .scm file, assume it's a Guile + Guix
# build expression.
my $exprType =
$c->stash->{params}->{"nixexprpath"} =~ /.scm$/ ? "guile" : "nix";
my ($nixExprPath, $nixExprInput) = nixExprPathFromParams $c;
my $enabled = int($c->stash->{params}->{enabled});
die if $enabled < 0 || $enabled > 2;
die if $enabled < 0 || $enabled > 3;
my $shares = int($c->stash->{params}->{schedulingshares} // 1);
error($c, "The number of scheduling shares must be positive.") if $shares <= 0;

View file

@ -68,8 +68,14 @@ sub handleDeclarativeJobsetBuild {
my $id = $build->id;
die "Declarative jobset build $id failed" unless $build->buildstatus == 0;
my $declPath = ($build->buildoutputs)[0]->path;
my $declText = readNixFile($declPath)
or die "Couldn't read declarative specification file $declPath: $!";
my $declText = eval {
readNixFile($declPath)
};
if ($@) {
print STDERR "ERROR: failed to readNixFile $declPath: ", $@, "\n";
die;
}
my $declSpec = decode_json($declText);
txn_do($db, sub {
my @kept = keys %$declSpec;

View file

@ -509,7 +509,8 @@ sub getStoreUri {
# Read a file from the (possibly remote) nix store
sub readNixFile {
my ($path) = @_;
return grab(cmd => ["nix", "cat-store", "--store", getStoreUri(), "$path"]);
return grab(cmd => ["nix", "--experimental-features", "nix-command",
"cat-store", "--store", getStoreUri(), "$path"]);
}

View file

@ -7,6 +7,53 @@ use LWP::UserAgent;
use Hydra::Helper::CatalystUtils;
use JSON;
=head1 NAME
SlackNotification - hydra-notify plugin for sending Slack notifications about
build results
=head1 DESCRIPTION
This plugin reports build statuses to various Slack channels. One can configure
which builds are reported to which channels, and whether reports should be on
state change (regressions and improvements), or for each build.
=head1 CONFIGURATION
The module is configured using the C<slack> block in Hydra's config file. There
can be multiple such blocks in the config file, each configuring different (or
even the same) set of builds and how they report to Slack channels.
The following entries are recognized in the C<slack> block:
=over 4
=item jobs
A pattern for job names. All builds whose job name matches this pattern will
emit a message to the designated Slack channel (see C<url>). The pattern will
match the whole name, thus leaving this field empty will result in no
notifications being sent. To match on all builds, use C<.*>.
=item url
The URL to a L<Slack incoming webhook|https://api.slack.com/messaging/webhooks>.
Slack administrators have to prepare one incoming webhook for each channel. This
URL should be treated as secret, as anyone knowing the URL could post a message
to the Slack workspace (or more precisely, the channel behind it).
=item force
(Optional) An I<integer> indicating whether to report on every build or only on
changes in the status. If not provided, defaults to 0, that is, sending reports
only when build status changes from success to failure, and vice-versa. Any
other value results in reporting on every build.
=back
=cut
sub isEnabled {
my ($self) = @_;
return defined $self->{config}->{slack};
@ -40,20 +87,32 @@ sub buildFinished {
# we send one aggregate message.
my %channels;
foreach my $b ($build, @{$dependents}) {
my $prevBuild = getPreviousBuild($b);
my $jobName = showJobName $b;
my $buildStatus = $b->buildstatus;
my $cancelledOrAborted = $buildStatus == 4 || $buildStatus == 3;
my $prevBuild = getPreviousBuild($b);
my $sameAsPrevious = defined $prevBuild && ($buildStatus == $prevBuild->buildstatus);
my $prevBuildStatus = (defined $prevBuild) ? $prevBuild->buildstatus : -1;
my $prevBuildId = (defined $prevBuild) ? $prevBuild->id : -1;
print STDERR "SlackNotification_Debug job name $jobName status $buildStatus (previous: $prevBuildStatus from $prevBuildId)\n";
foreach my $channel (@config) {
my $force = $channel->{force};
next unless $jobName =~ /^$channel->{jobs}$/;
# If build is cancelled or aborted, do not send email.
next if ! $force && ($b->buildstatus == 4 || $b->buildstatus == 3);
my $force = $channel->{force};
print STDERR "SlackNotification_Debug found match with '$channel->{jobs}' with force=$force\n";
# If build is cancelled or aborted, do not send Slack notification.
next if ! $force && $cancelledOrAborted;
# If there is a previous (that is not cancelled or aborted) build
# with same buildstatus, do not send email.
next if ! $force && defined $prevBuild && ($b->buildstatus == $prevBuild->buildstatus);
# with same buildstatus, do not send Slack notification.
next if ! $force && $sameAsPrevious;
print STDERR "SlackNotification_Debug adding $jobName to the report list\n";
$channels{$channel->{url}} //= { channel => $channel, builds => [] };
push @{$channels{$channel->{url}}->{builds}}, $b;
}
@ -93,6 +152,8 @@ sub buildFinished {
$text .= join(" or ", scalar @x > 1 ? join(", ", @x[0..scalar @x - 2]) : (), $x[-1]);
}
print STDERR "SlackNotification_Debug POSTing to url ending with: ${\substr $url, -8}\n";
my $msg =
{ attachments =>
[{ fallback => "Job " . showJobName($build) . " build number " . $build->id . ": " . showStatus($build),

View file

@ -27,11 +27,11 @@ use base 'DBIx::Class::Core';
__PACKAGE__->load_components("+Hydra::Component::ToJSON");
=head1 TABLE: C<AggregateConstituents>
=head1 TABLE: C<aggregateconstituents>
=cut
__PACKAGE__->table("AggregateConstituents");
__PACKAGE__->table("aggregateconstituents");
=head1 ACCESSORS
@ -103,8 +103,8 @@ __PACKAGE__->belongs_to(
);
# Created by DBIx::Class::Schema::Loader v0.07033 @ 2013-08-15 00:20:01
# DO NOT MODIFY THIS OR ANYTHING ABOVE! md5sum:TLNenyPLIWw2gWsOVhplZw
# Created by DBIx::Class::Schema::Loader v0.07049 @ 2020-02-06 12:22:36
# DO NOT MODIFY THIS OR ANYTHING ABOVE! md5sum:bQfQoSstlaFy7zw8i1R+ow
# You can replace this text with custom code or comments, and it will be preserved on regeneration

View file

@ -27,11 +27,11 @@ use base 'DBIx::Class::Core';
__PACKAGE__->load_components("+Hydra::Component::ToJSON");
=head1 TABLE: C<BuildInputs>
=head1 TABLE: C<buildinputs>
=cut
__PACKAGE__->table("BuildInputs");
__PACKAGE__->table("buildinputs");
=head1 ACCESSORS
@ -40,6 +40,7 @@ __PACKAGE__->table("BuildInputs");
data_type: 'integer'
is_auto_increment: 1
is_nullable: 0
sequence: 'buildinputs_id_seq'
=head2 build
@ -98,7 +99,12 @@ __PACKAGE__->table("BuildInputs");
__PACKAGE__->add_columns(
"id",
{ data_type => "integer", is_auto_increment => 1, is_nullable => 0 },
{
data_type => "integer",
is_auto_increment => 1,
is_nullable => 0,
sequence => "buildinputs_id_seq",
},
"build",
{ data_type => "integer", is_foreign_key => 1, is_nullable => 1 },
"name",
@ -176,8 +182,8 @@ __PACKAGE__->belongs_to(
);
# Created by DBIx::Class::Schema::Loader v0.07033 @ 2013-10-08 13:08:15
# DO NOT MODIFY THIS OR ANYTHING ABOVE! md5sum:OaJPzRM+8XGsu3eIkqeYEw
# Created by DBIx::Class::Schema::Loader v0.07049 @ 2020-02-06 12:22:36
# DO NOT MODIFY THIS OR ANYTHING ABOVE! md5sum:/Fwb8emBsvwrZlEab2X+gQ
my %hint = (
columns => [

View file

@ -27,11 +27,11 @@ use base 'DBIx::Class::Core';
__PACKAGE__->load_components("+Hydra::Component::ToJSON");
=head1 TABLE: C<BuildMetrics>
=head1 TABLE: C<buildmetrics>
=cut
__PACKAGE__->table("BuildMetrics");
__PACKAGE__->table("buildmetrics");
=head1 ACCESSORS
@ -177,8 +177,8 @@ __PACKAGE__->belongs_to(
);
# Created by DBIx::Class::Schema::Loader v0.07043 @ 2015-07-30 16:52:20
# DO NOT MODIFY THIS OR ANYTHING ABOVE! md5sum:qoPm5/le+sVHigW4Dmum2Q
# Created by DBIx::Class::Schema::Loader v0.07049 @ 2020-02-06 12:22:36
# DO NOT MODIFY THIS OR ANYTHING ABOVE! md5sum:Roy7h/K9u7DQOzet4B1sbA
sub json_hint {
return { columns => ['value', 'unit'] };

View file

@ -27,11 +27,11 @@ use base 'DBIx::Class::Core';
__PACKAGE__->load_components("+Hydra::Component::ToJSON");
=head1 TABLE: C<BuildOutputs>
=head1 TABLE: C<buildoutputs>
=cut
__PACKAGE__->table("BuildOutputs");
__PACKAGE__->table("buildoutputs");
=head1 ACCESSORS
@ -94,8 +94,8 @@ __PACKAGE__->belongs_to(
);
# Created by DBIx::Class::Schema::Loader v0.07033 @ 2013-06-13 01:54:50
# DO NOT MODIFY THIS OR ANYTHING ABOVE! md5sum:V8MbzKvZNEaeHBJV67+ZMQ
# Created by DBIx::Class::Schema::Loader v0.07049 @ 2020-02-06 12:22:36
# DO NOT MODIFY THIS OR ANYTHING ABOVE! md5sum:71R9clwAP6vzDh10EukTaw
my %hint = (
columns => [

View file

@ -27,11 +27,11 @@ use base 'DBIx::Class::Core';
__PACKAGE__->load_components("+Hydra::Component::ToJSON");
=head1 TABLE: C<BuildProducts>
=head1 TABLE: C<buildproducts>
=cut
__PACKAGE__->table("BuildProducts");
__PACKAGE__->table("buildproducts");
=head1 ACCESSORS
@ -143,8 +143,8 @@ __PACKAGE__->belongs_to(
);
# Created by DBIx::Class::Schema::Loader v0.07043 @ 2016-04-13 14:49:33
# DO NOT MODIFY THIS OR ANYTHING ABOVE! md5sum:kONECZn56f7sqfrLviiUOQ
# Created by DBIx::Class::Schema::Loader v0.07049 @ 2020-02-06 12:22:36
# DO NOT MODIFY THIS OR ANYTHING ABOVE! md5sum:iI0gmKqQxiPBTy5QsM6tpQ
my %hint = (
columns => [

View file

@ -27,11 +27,11 @@ use base 'DBIx::Class::Core';
__PACKAGE__->load_components("+Hydra::Component::ToJSON");
=head1 TABLE: C<BuildStepOutputs>
=head1 TABLE: C<buildstepoutputs>
=cut
__PACKAGE__->table("BuildStepOutputs");
__PACKAGE__->table("buildstepoutputs");
=head1 ACCESSORS
@ -119,8 +119,8 @@ __PACKAGE__->belongs_to(
);
# Created by DBIx::Class::Schema::Loader v0.07033 @ 2013-06-13 01:54:50
# DO NOT MODIFY THIS OR ANYTHING ABOVE! md5sum:A/4v3ugXYbuYoKPlOvC6mg
# Created by DBIx::Class::Schema::Loader v0.07049 @ 2020-02-06 12:22:36
# DO NOT MODIFY THIS OR ANYTHING ABOVE! md5sum:Y6DpbTM6z4cOGoYIhD3i1A
# You can replace this text with custom code or comments, and it will be preserved on regeneration

View file

@ -27,11 +27,11 @@ use base 'DBIx::Class::Core';
__PACKAGE__->load_components("+Hydra::Component::ToJSON");
=head1 TABLE: C<BuildSteps>
=head1 TABLE: C<buildsteps>
=cut
__PACKAGE__->table("BuildSteps");
__PACKAGE__->table("buildsteps");
=head1 ACCESSORS
@ -215,8 +215,8 @@ __PACKAGE__->belongs_to(
);
# Created by DBIx::Class::Schema::Loader v0.07045 @ 2016-12-07 13:48:19
# DO NOT MODIFY THIS OR ANYTHING ABOVE! md5sum:3FYkqSUfgWmiqZzmX8J4TA
# Created by DBIx::Class::Schema::Loader v0.07049 @ 2020-02-06 12:22:36
# DO NOT MODIFY THIS OR ANYTHING ABOVE! md5sum:AMjHq4g/fSUv/lZuZOljYg
my %hint = (
columns => [

View file

@ -27,11 +27,11 @@ use base 'DBIx::Class::Core';
__PACKAGE__->load_components("+Hydra::Component::ToJSON");
=head1 TABLE: C<Builds>
=head1 TABLE: C<builds>
=cut
__PACKAGE__->table("Builds");
__PACKAGE__->table("builds");
=head1 ACCESSORS
@ -40,6 +40,7 @@ __PACKAGE__->table("Builds");
data_type: 'integer'
is_auto_increment: 1
is_nullable: 0
sequence: 'builds_id_seq'
=head2 finished
@ -63,6 +64,12 @@ __PACKAGE__->table("Builds");
is_foreign_key: 1
is_nullable: 0
=head2 jobset_id
data_type: 'integer'
is_foreign_key: 1
is_nullable: 0
=head2 job
data_type: 'text'
@ -200,7 +207,12 @@ __PACKAGE__->table("Builds");
__PACKAGE__->add_columns(
"id",
{ data_type => "integer", is_auto_increment => 1, is_nullable => 0 },
{
data_type => "integer",
is_auto_increment => 1,
is_nullable => 0,
sequence => "builds_id_seq",
},
"finished",
{ data_type => "integer", is_nullable => 0 },
"timestamp",
@ -209,6 +221,8 @@ __PACKAGE__->add_columns(
{ data_type => "text", is_foreign_key => 1, is_nullable => 0 },
"jobset",
{ data_type => "text", is_foreign_key => 1, is_nullable => 0 },
"jobset_id",
{ data_type => "integer", is_foreign_key => 1, is_nullable => 0 },
"job",
{ data_type => "text", is_foreign_key => 1, is_nullable => 0 },
"nixname",
@ -451,6 +465,21 @@ Related object: L<Hydra::Schema::Jobsets>
__PACKAGE__->belongs_to(
"jobset",
"Hydra::Schema::Jobsets",
{ id => "jobset_id" },
{ is_deferrable => 0, on_delete => "CASCADE", on_update => "NO ACTION" },
);
=head2 jobset_project_jobset
Type: belongs_to
Related object: L<Hydra::Schema::Jobsets>
=cut
__PACKAGE__->belongs_to(
"jobset_project_jobset",
"Hydra::Schema::Jobsets",
{ name => "jobset", project => "project" },
{ is_deferrable => 0, on_delete => "NO ACTION", on_update => "CASCADE" },
);
@ -544,8 +573,8 @@ __PACKAGE__->many_to_many(
);
# Created by DBIx::Class::Schema::Loader v0.07049 @ 2019-08-19 16:12:37
# DO NOT MODIFY THIS OR ANYTHING ABOVE! md5sum:VjYbAQwv4THW2VfWQ5ajYQ
# Created by DBIx::Class::Schema::Loader v0.07049 @ 2020-02-06 12:34:25
# DO NOT MODIFY THIS OR ANYTHING ABOVE! md5sum:EEXlcKN/ydXJ129vT0jTUw
__PACKAGE__->has_many(
"dependents",
@ -608,8 +637,8 @@ QUERY
makeQueries('', "");
makeQueries('ForProject', "and project = ?");
makeQueries('ForJobset', "and project = ? and jobset = ?");
makeQueries('ForJob', "and project = ? and jobset = ? and job = ?");
makeQueries('ForJobset', "and jobset_id = (select id from jobsets j where j.name = ?)");
makeQueries('ForJob', "and jobset_id = (select id from jobsets j where j.name = ?) and job = ?");
my %hint = (

View file

@ -27,11 +27,11 @@ use base 'DBIx::Class::Core';
__PACKAGE__->load_components("+Hydra::Component::ToJSON");
=head1 TABLE: C<CachedBazaarInputs>
=head1 TABLE: C<cachedbazaarinputs>
=cut
__PACKAGE__->table("CachedBazaarInputs");
__PACKAGE__->table("cachedbazaarinputs");
=head1 ACCESSORS
@ -83,8 +83,8 @@ __PACKAGE__->add_columns(
__PACKAGE__->set_primary_key("uri", "revision");
# Created by DBIx::Class::Schema::Loader v0.07033 @ 2013-06-13 01:54:50
# DO NOT MODIFY THIS OR ANYTHING ABOVE! md5sum:zvun8uhxwrr7B8EsqBoCjA
# Created by DBIx::Class::Schema::Loader v0.07049 @ 2020-02-06 12:22:36
# DO NOT MODIFY THIS OR ANYTHING ABOVE! md5sum:X8L4C57lMOctdqOKSmfA/g
# You can replace this text with custom content, and it will be preserved on regeneration

View file

@ -27,11 +27,11 @@ use base 'DBIx::Class::Core';
__PACKAGE__->load_components("+Hydra::Component::ToJSON");
=head1 TABLE: C<CachedCVSInputs>
=head1 TABLE: C<cachedcvsinputs>
=cut
__PACKAGE__->table("CachedCVSInputs");
__PACKAGE__->table("cachedcvsinputs");
=head1 ACCESSORS
@ -99,8 +99,8 @@ __PACKAGE__->add_columns(
__PACKAGE__->set_primary_key("uri", "module", "sha256hash");
# Created by DBIx::Class::Schema::Loader v0.07033 @ 2013-06-13 01:54:50
# DO NOT MODIFY THIS OR ANYTHING ABOVE! md5sum:Vi1qzjW52Lnsl0JSmGzy0w
# Created by DBIx::Class::Schema::Loader v0.07049 @ 2020-02-06 12:22:36
# DO NOT MODIFY THIS OR ANYTHING ABOVE! md5sum:6eQ+i/th+oVZNRiDPd2luA
# You can replace this text with custom content, and it will be preserved on regeneration
1;

View file

@ -27,11 +27,11 @@ use base 'DBIx::Class::Core';
__PACKAGE__->load_components("+Hydra::Component::ToJSON");
=head1 TABLE: C<CachedDarcsInputs>
=head1 TABLE: C<cacheddarcsinputs>
=cut
__PACKAGE__->table("CachedDarcsInputs");
__PACKAGE__->table("cacheddarcsinputs");
=head1 ACCESSORS
@ -90,8 +90,8 @@ __PACKAGE__->add_columns(
__PACKAGE__->set_primary_key("uri", "revision");
# Created by DBIx::Class::Schema::Loader v0.07033 @ 2013-09-20 11:08:50
# DO NOT MODIFY THIS OR ANYTHING ABOVE! md5sum:Yl1slt3SAizijgu0KUTn0A
# Created by DBIx::Class::Schema::Loader v0.07049 @ 2020-02-06 12:22:36
# DO NOT MODIFY THIS OR ANYTHING ABOVE! md5sum:Buwq42sBXQVfYUy01WMyYw
# You can replace this text with custom code or comments, and it will be preserved on regeneration

View file

@ -27,11 +27,11 @@ use base 'DBIx::Class::Core';
__PACKAGE__->load_components("+Hydra::Component::ToJSON");
=head1 TABLE: C<CachedGitInputs>
=head1 TABLE: C<cachedgitinputs>
=cut
__PACKAGE__->table("CachedGitInputs");
__PACKAGE__->table("cachedgitinputs");
=head1 ACCESSORS
@ -92,7 +92,7 @@ __PACKAGE__->add_columns(
__PACKAGE__->set_primary_key("uri", "branch", "revision");
# Created by DBIx::Class::Schema::Loader v0.07033 @ 2013-06-13 01:54:50
# DO NOT MODIFY THIS OR ANYTHING ABOVE! md5sum:I4hI02FKRMkw76WV/KBocA
# Created by DBIx::Class::Schema::Loader v0.07049 @ 2020-02-06 12:22:36
# DO NOT MODIFY THIS OR ANYTHING ABOVE! md5sum:0sdK9uQZpx869oqS5thRLw
1;

View file

@ -27,11 +27,11 @@ use base 'DBIx::Class::Core';
__PACKAGE__->load_components("+Hydra::Component::ToJSON");
=head1 TABLE: C<CachedHgInputs>
=head1 TABLE: C<cachedhginputs>
=cut
__PACKAGE__->table("CachedHgInputs");
__PACKAGE__->table("cachedhginputs");
=head1 ACCESSORS
@ -92,8 +92,8 @@ __PACKAGE__->add_columns(
__PACKAGE__->set_primary_key("uri", "branch", "revision");
# Created by DBIx::Class::Schema::Loader v0.07033 @ 2013-06-13 01:54:50
# DO NOT MODIFY THIS OR ANYTHING ABOVE! md5sum:qS/eiiZXmpc7KpTHdtaT7g
# Created by DBIx::Class::Schema::Loader v0.07049 @ 2020-02-06 12:22:36
# DO NOT MODIFY THIS OR ANYTHING ABOVE! md5sum:dYfjQ0SJG/mBrsZemAW3zw
# You can replace this text with custom content, and it will be preserved on regeneration

View file

@ -27,11 +27,11 @@ use base 'DBIx::Class::Core';
__PACKAGE__->load_components("+Hydra::Component::ToJSON");
=head1 TABLE: C<CachedPathInputs>
=head1 TABLE: C<cachedpathinputs>
=cut
__PACKAGE__->table("CachedPathInputs");
__PACKAGE__->table("cachedpathinputs");
=head1 ACCESSORS
@ -90,7 +90,7 @@ __PACKAGE__->add_columns(
__PACKAGE__->set_primary_key("srcpath", "sha256hash");
# Created by DBIx::Class::Schema::Loader v0.07033 @ 2013-06-13 01:54:50
# DO NOT MODIFY THIS OR ANYTHING ABOVE! md5sum:28rja0vR1glJJ15hzVfjsQ
# Created by DBIx::Class::Schema::Loader v0.07049 @ 2020-02-06 12:22:36
# DO NOT MODIFY THIS OR ANYTHING ABOVE! md5sum:oV7tbWLNEMC8byKf9UnAlw
1;

View file

@ -27,11 +27,11 @@ use base 'DBIx::Class::Core';
__PACKAGE__->load_components("+Hydra::Component::ToJSON");
=head1 TABLE: C<CachedSubversionInputs>
=head1 TABLE: C<cachedsubversioninputs>
=cut
__PACKAGE__->table("CachedSubversionInputs");
__PACKAGE__->table("cachedsubversioninputs");
=head1 ACCESSORS
@ -83,7 +83,7 @@ __PACKAGE__->add_columns(
__PACKAGE__->set_primary_key("uri", "revision");
# Created by DBIx::Class::Schema::Loader v0.07033 @ 2013-06-13 01:54:50
# DO NOT MODIFY THIS OR ANYTHING ABOVE! md5sum:3qXfnvkOVj25W94bfhQ65w
# Created by DBIx::Class::Schema::Loader v0.07049 @ 2020-02-06 12:22:36
# DO NOT MODIFY THIS OR ANYTHING ABOVE! md5sum:VGt/0HG84eNZr9OIA8jzow
1;

View file

@ -27,11 +27,11 @@ use base 'DBIx::Class::Core';
__PACKAGE__->load_components("+Hydra::Component::ToJSON");
=head1 TABLE: C<FailedPaths>
=head1 TABLE: C<failedpaths>
=cut
__PACKAGE__->table("FailedPaths");
__PACKAGE__->table("failedpaths");
=head1 ACCESSORS
@ -57,8 +57,8 @@ __PACKAGE__->add_columns("path", { data_type => "text", is_nullable => 0 });
__PACKAGE__->set_primary_key("path");
# Created by DBIx::Class::Schema::Loader v0.07033 @ 2015-06-10 14:48:16
# DO NOT MODIFY THIS OR ANYTHING ABOVE! md5sum:WFgjfjH+szE6Ntcicmaflw
# Created by DBIx::Class::Schema::Loader v0.07049 @ 2020-02-06 12:22:36
# DO NOT MODIFY THIS OR ANYTHING ABOVE! md5sum:jr3XiGO4lWAzqfATbsMwFw
# You can replace this text with custom code or comments, and it will be preserved on regeneration

View file

@ -27,11 +27,11 @@ use base 'DBIx::Class::Core';
__PACKAGE__->load_components("+Hydra::Component::ToJSON");
=head1 TABLE: C<Jobs>
=head1 TABLE: C<jobs>
=cut
__PACKAGE__->table("Jobs");
__PACKAGE__->table("jobs");
=head1 ACCESSORS
@ -47,6 +47,12 @@ __PACKAGE__->table("Jobs");
is_foreign_key: 1
is_nullable: 0
=head2 jobset_id
data_type: 'integer'
is_foreign_key: 1
is_nullable: 0
=head2 name
data_type: 'text'
@ -59,6 +65,8 @@ __PACKAGE__->add_columns(
{ data_type => "text", is_foreign_key => 1, is_nullable => 0 },
"jobset",
{ data_type => "text", is_foreign_key => 1, is_nullable => 0 },
"jobset_id",
{ data_type => "integer", is_foreign_key => 1, is_nullable => 0 },
"name",
{ data_type => "text", is_nullable => 0 },
);
@ -130,6 +138,21 @@ Related object: L<Hydra::Schema::Jobsets>
__PACKAGE__->belongs_to(
"jobset",
"Hydra::Schema::Jobsets",
{ id => "jobset_id" },
{ is_deferrable => 0, on_delete => "CASCADE", on_update => "NO ACTION" },
);
=head2 jobset_project_jobset
Type: belongs_to
Related object: L<Hydra::Schema::Jobsets>
=cut
__PACKAGE__->belongs_to(
"jobset_project_jobset",
"Hydra::Schema::Jobsets",
{ name => "jobset", project => "project" },
{ is_deferrable => 0, on_delete => "CASCADE", on_update => "CASCADE" },
);
@ -169,7 +192,25 @@ __PACKAGE__->has_many(
);
# Created by DBIx::Class::Schema::Loader v0.07043 @ 2015-07-30 16:52:20
# DO NOT MODIFY THIS OR ANYTHING ABOVE! md5sum:vDAo9bzLca+QWfhOb9OLMg
# Created by DBIx::Class::Schema::Loader v0.07049 @ 2020-02-06 12:33:28
# DO NOT MODIFY THIS OR ANYTHING ABOVE! md5sum:C5Tyh8Ke4yC6q7KIFVOHcQ
=head2 builds
Type: has_many
Related object: L<Hydra::Sc2hema::Builds>
=cut
__PACKAGE__->has_many(
"builds",
"Hydra::Schema::Builds",
{
"foreign.job" => "self.name",
"foreign.jobset_id" => "self.jobset_id",
},
undef,
);
1;

View file

@ -27,11 +27,11 @@ use base 'DBIx::Class::Core';
__PACKAGE__->load_components("+Hydra::Component::ToJSON");
=head1 TABLE: C<JobsetEvalInputs>
=head1 TABLE: C<jobsetevalinputs>
=cut
__PACKAGE__->table("JobsetEvalInputs");
__PACKAGE__->table("jobsetevalinputs");
=head1 ACCESSORS
@ -166,8 +166,8 @@ __PACKAGE__->belongs_to(
);
# Created by DBIx::Class::Schema::Loader v0.07033 @ 2013-06-13 01:54:50
# DO NOT MODIFY THIS OR ANYTHING ABOVE! md5sum:1Dp8B58leBLh4GK0GPw2zg
# Created by DBIx::Class::Schema::Loader v0.07049 @ 2020-02-06 12:22:36
# DO NOT MODIFY THIS OR ANYTHING ABOVE! md5sum:/cFQGBLhvpmBO1UJztgIAg
my %hint = (
columns => [

View file

@ -27,11 +27,11 @@ use base 'DBIx::Class::Core';
__PACKAGE__->load_components("+Hydra::Component::ToJSON");
=head1 TABLE: C<JobsetEvalMembers>
=head1 TABLE: C<jobsetevalmembers>
=cut
__PACKAGE__->table("JobsetEvalMembers");
__PACKAGE__->table("jobsetevalmembers");
=head1 ACCESSORS
@ -110,8 +110,8 @@ __PACKAGE__->belongs_to(
);
# Created by DBIx::Class::Schema::Loader v0.07033 @ 2013-06-13 01:54:50
# DO NOT MODIFY THIS OR ANYTHING ABOVE! md5sum:ccPNQe/QnSjTAC3uGWe8Ng
# Created by DBIx::Class::Schema::Loader v0.07049 @ 2020-02-06 12:22:36
# DO NOT MODIFY THIS OR ANYTHING ABOVE! md5sum:T+dJFh/sDO8WsasqYVLRSQ
# You can replace this text with custom content, and it will be preserved on regeneration

View file

@ -27,11 +27,11 @@ use base 'DBIx::Class::Core';
__PACKAGE__->load_components("+Hydra::Component::ToJSON");
=head1 TABLE: C<JobsetEvals>
=head1 TABLE: C<jobsetevals>
=cut
__PACKAGE__->table("JobsetEvals");
__PACKAGE__->table("jobsetevals");
=head1 ACCESSORS
@ -40,6 +40,7 @@ __PACKAGE__->table("JobsetEvals");
data_type: 'integer'
is_auto_increment: 1
is_nullable: 0
sequence: 'jobsetevals_id_seq'
=head2 project
@ -88,11 +89,21 @@ __PACKAGE__->table("JobsetEvals");
data_type: 'integer'
is_nullable: 1
=head2 flake
data_type: 'text'
is_nullable: 1
=cut
__PACKAGE__->add_columns(
"id",
{ data_type => "integer", is_auto_increment => 1, is_nullable => 0 },
{
data_type => "integer",
is_auto_increment => 1,
is_nullable => 0,
sequence => "jobsetevals_id_seq",
},
"project",
{ data_type => "text", is_foreign_key => 1, is_nullable => 0 },
"jobset",
@ -111,6 +122,8 @@ __PACKAGE__->add_columns(
{ data_type => "integer", is_nullable => 1 },
"nrsucceeded",
{ data_type => "integer", is_nullable => 1 },
"flake",
{ data_type => "text", is_nullable => 1 },
);
=head1 PRIMARY KEY
@ -188,8 +201,8 @@ __PACKAGE__->belongs_to(
);
# Created by DBIx::Class::Schema::Loader v0.07033 @ 2013-06-13 01:54:50
# DO NOT MODIFY THIS OR ANYTHING ABOVE! md5sum:SlEiF8oN6FBK262uSiMKiw
# Created by DBIx::Class::Schema::Loader v0.07049 @ 2020-02-09 15:21:11
# DO NOT MODIFY THIS OR ANYTHING ABOVE! md5sum:Ar6GRni8AcAQmuZyg6tFKw
__PACKAGE__->has_many(
"buildIds",

View file

@ -27,11 +27,11 @@ use base 'DBIx::Class::Core';
__PACKAGE__->load_components("+Hydra::Component::ToJSON");
=head1 TABLE: C<JobsetInputAlts>
=head1 TABLE: C<jobsetinputalts>
=cut
__PACKAGE__->table("JobsetInputAlts");
__PACKAGE__->table("jobsetinputalts");
=head1 ACCESSORS
@ -121,7 +121,7 @@ __PACKAGE__->belongs_to(
);
# Created by DBIx::Class::Schema::Loader v0.07033 @ 2013-06-13 01:54:50
# DO NOT MODIFY THIS OR ANYTHING ABOVE! md5sum:UUO37lIuEYm0GiR92m/fyA
# Created by DBIx::Class::Schema::Loader v0.07049 @ 2020-02-06 12:22:36
# DO NOT MODIFY THIS OR ANYTHING ABOVE! md5sum:nh8dQDL9FtgzXcwjDufDMQ
1;

View file

@ -27,11 +27,11 @@ use base 'DBIx::Class::Core';
__PACKAGE__->load_components("+Hydra::Component::ToJSON");
=head1 TABLE: C<JobsetInputs>
=head1 TABLE: C<jobsetinputs>
=cut
__PACKAGE__->table("JobsetInputs");
__PACKAGE__->table("jobsetinputs");
=head1 ACCESSORS
@ -130,28 +130,9 @@ __PACKAGE__->has_many(
undef,
);
=head2 jobsets
Type: has_many
Related object: L<Hydra::Schema::Jobsets>
=cut
__PACKAGE__->has_many(
"jobsets",
"Hydra::Schema::Jobsets",
{
"foreign.name" => "self.jobset",
"foreign.nixexprinput" => "self.name",
"foreign.project" => "self.project",
},
undef,
);
# Created by DBIx::Class::Schema::Loader v0.07033 @ 2013-10-08 13:06:15
# DO NOT MODIFY THIS OR ANYTHING ABOVE! md5sum:+mZZqLjQNwblb/EWW1alLQ
# Created by DBIx::Class::Schema::Loader v0.07049 @ 2020-02-06 12:22:36
# DO NOT MODIFY THIS OR ANYTHING ABOVE! md5sum:5uKwEhDXso4IR1TFmwRxiA
my %hint = (
relations => {

View file

@ -27,11 +27,11 @@ use base 'DBIx::Class::Core';
__PACKAGE__->load_components("+Hydra::Component::ToJSON");
=head1 TABLE: C<JobsetRenames>
=head1 TABLE: C<jobsetrenames>
=cut
__PACKAGE__->table("JobsetRenames");
__PACKAGE__->table("jobsetrenames");
=head1 ACCESSORS
@ -110,8 +110,8 @@ __PACKAGE__->belongs_to(
);
# Created by DBIx::Class::Schema::Loader v0.07033 @ 2014-04-23 23:13:51
# DO NOT MODIFY THIS OR ANYTHING ABOVE! md5sum:SBpKWF9swFc9T1Uc0VFlgA
# Created by DBIx::Class::Schema::Loader v0.07049 @ 2020-02-06 12:22:36
# DO NOT MODIFY THIS OR ANYTHING ABOVE! md5sum:eOQbJ2O/p0G1317m3IC/KA
# You can replace this text with custom code or comments, and it will be preserved on regeneration

View file

@ -27,20 +27,26 @@ use base 'DBIx::Class::Core';
__PACKAGE__->load_components("+Hydra::Component::ToJSON");
=head1 TABLE: C<Jobsets>
=head1 TABLE: C<jobsets>
=cut
__PACKAGE__->table("Jobsets");
__PACKAGE__->table("jobsets");
=head1 ACCESSORS
=head2 name
data_type: 'text'
is_foreign_key: 1
is_nullable: 0
=head2 id
data_type: 'integer'
is_auto_increment: 1
is_nullable: 0
sequence: 'jobsets_id_seq'
=head2 project
data_type: 'text'
@ -55,13 +61,12 @@ __PACKAGE__->table("Jobsets");
=head2 nixexprinput
data_type: 'text'
is_foreign_key: 1
is_nullable: 0
is_nullable: 1
=head2 nixexprpath
data_type: 'text'
is_nullable: 0
is_nullable: 1
=head2 errormsg
@ -139,19 +144,37 @@ __PACKAGE__->table("Jobsets");
data_type: 'integer'
is_nullable: 1
=head2 type
data_type: 'integer'
default_value: 0
is_nullable: 0
=head2 flake
data_type: 'text'
is_nullable: 1
=cut
__PACKAGE__->add_columns(
"name",
{ data_type => "text", is_foreign_key => 1, is_nullable => 0 },
{ data_type => "text", is_nullable => 0 },
"id",
{
data_type => "integer",
is_auto_increment => 1,
is_nullable => 0,
sequence => "jobsets_id_seq",
},
"project",
{ data_type => "text", is_foreign_key => 1, is_nullable => 0 },
"description",
{ data_type => "text", is_nullable => 1 },
"nixexprinput",
{ data_type => "text", is_foreign_key => 1, is_nullable => 0 },
{ data_type => "text", is_nullable => 1 },
"nixexprpath",
{ data_type => "text", is_nullable => 0 },
{ data_type => "text", is_nullable => 1 },
"errormsg",
{ data_type => "text", is_nullable => 1 },
"errortime",
@ -180,6 +203,10 @@ __PACKAGE__->add_columns(
{ data_type => "boolean", is_nullable => 1 },
"starttime",
{ data_type => "integer", is_nullable => 1 },
"type",
{ data_type => "integer", default_value => 0, is_nullable => 0 },
"flake",
{ data_type => "text", is_nullable => 1 },
);
=head1 PRIMARY KEY
@ -196,6 +223,20 @@ __PACKAGE__->add_columns(
__PACKAGE__->set_primary_key("project", "name");
=head1 UNIQUE CONSTRAINTS
=head2 C<jobsets_id_unique>
=over 4
=item * L</id>
=back
=cut
__PACKAGE__->add_unique_constraint("jobsets_id_unique", ["id"]);
=head1 RELATIONS
=head2 buildmetrics
@ -216,7 +257,7 @@ __PACKAGE__->has_many(
undef,
);
=head2 builds
=head2 builds_jobset_ids
Type: has_many
@ -225,7 +266,22 @@ Related object: L<Hydra::Schema::Builds>
=cut
__PACKAGE__->has_many(
"builds",
"builds_jobset_ids",
"Hydra::Schema::Builds",
{ "foreign.jobset_id" => "self.id" },
undef,
);
=head2 builds_project_jobsets
Type: has_many
Related object: L<Hydra::Schema::Builds>
=cut
__PACKAGE__->has_many(
"builds_project_jobsets",
"Hydra::Schema::Builds",
{
"foreign.jobset" => "self.name",
@ -234,7 +290,7 @@ __PACKAGE__->has_many(
undef,
);
=head2 jobs
=head2 jobs_jobset_ids
Type: has_many
@ -243,7 +299,22 @@ Related object: L<Hydra::Schema::Jobs>
=cut
__PACKAGE__->has_many(
"jobs",
"jobs_jobset_ids",
"Hydra::Schema::Jobs",
{ "foreign.jobset_id" => "self.id" },
undef,
);
=head2 jobs_project_jobsets
Type: has_many
Related object: L<Hydra::Schema::Jobs>
=cut
__PACKAGE__->has_many(
"jobs_project_jobsets",
"Hydra::Schema::Jobs",
{
"foreign.jobset" => "self.name",
@ -270,21 +341,6 @@ __PACKAGE__->has_many(
undef,
);
=head2 jobsetinput
Type: belongs_to
Related object: L<Hydra::Schema::JobsetInputs>
=cut
__PACKAGE__->belongs_to(
"jobsetinput",
"Hydra::Schema::JobsetInputs",
{ jobset => "name", name => "nixexprinput", project => "project" },
{ is_deferrable => 0, on_delete => "NO ACTION", on_update => "NO ACTION" },
);
=head2 jobsetinputs
Type: has_many
@ -352,8 +408,43 @@ __PACKAGE__->has_many(
);
# Created by DBIx::Class::Schema::Loader v0.07045 @ 2017-03-09 13:03:05
# DO NOT MODIFY THIS OR ANYTHING ABOVE! md5sum:ivYvsUyhEeaeI4EmRQ0/QQ
# Created by DBIx::Class::Schema::Loader v0.07049 @ 2020-02-09 15:32:17
# DO NOT MODIFY THIS OR ANYTHING ABOVE! md5sum:P8+t7rgpOqkGwRdM2b+3Bw
=head2 builds
Type: has_many
Related object: L<Hydra::Schema::Builds>
=cut
__PACKAGE__->has_many(
"builds",
"Hydra::Schema::Builds",
{ "foreign.jobset_id" => "self.id" },
undef,
);
=head2 jobs
Type: has_many
Related object: L<Hydra::Schema::Jobs>
=cut
__PACKAGE__->has_many(
"jobs",
"Hydra::Schema::Jobs",
{ "foreign.jobset_id" => "self.id" },
undef,
);
__PACKAGE__->add_column(
"+id" => { retrieve_on_insert => 1 }
);
my %hint = (
columns => [

View file

@ -27,11 +27,11 @@ use base 'DBIx::Class::Core';
__PACKAGE__->load_components("+Hydra::Component::ToJSON");
=head1 TABLE: C<NewsItems>
=head1 TABLE: C<newsitems>
=cut
__PACKAGE__->table("NewsItems");
__PACKAGE__->table("newsitems");
=head1 ACCESSORS
@ -40,6 +40,7 @@ __PACKAGE__->table("NewsItems");
data_type: 'integer'
is_auto_increment: 1
is_nullable: 0
sequence: 'newsitems_id_seq'
=head2 contents
@ -61,7 +62,12 @@ __PACKAGE__->table("NewsItems");
__PACKAGE__->add_columns(
"id",
{ data_type => "integer", is_auto_increment => 1, is_nullable => 0 },
{
data_type => "integer",
is_auto_increment => 1,
is_nullable => 0,
sequence => "newsitems_id_seq",
},
"contents",
{ data_type => "text", is_nullable => 0 },
"createtime",
@ -100,7 +106,7 @@ __PACKAGE__->belongs_to(
);
# Created by DBIx::Class::Schema::Loader v0.07033 @ 2013-06-13 01:54:50
# DO NOT MODIFY THIS OR ANYTHING ABOVE! md5sum:3CRNsvd+YnZp9c80tuZREQ
# Created by DBIx::Class::Schema::Loader v0.07049 @ 2020-02-06 12:22:36
# DO NOT MODIFY THIS OR ANYTHING ABOVE! md5sum:r6vX8VG/+NQraIVKFgHzxQ
1;

View file

@ -27,11 +27,11 @@ use base 'DBIx::Class::Core';
__PACKAGE__->load_components("+Hydra::Component::ToJSON");
=head1 TABLE: C<NrBuilds>
=head1 TABLE: C<nrbuilds>
=cut
__PACKAGE__->table("NrBuilds");
__PACKAGE__->table("nrbuilds");
=head1 ACCESSORS
@ -67,8 +67,8 @@ __PACKAGE__->add_columns(
__PACKAGE__->set_primary_key("what");
# Created by DBIx::Class::Schema::Loader v0.07033 @ 2013-08-12 17:59:18
# DO NOT MODIFY THIS OR ANYTHING ABOVE! md5sum:CK8eJGC803nGj0wnete9xg
# Created by DBIx::Class::Schema::Loader v0.07049 @ 2020-02-06 12:22:36
# DO NOT MODIFY THIS OR ANYTHING ABOVE! md5sum:qv1I8Wu7KXHAs+pyBn2ofA
# You can replace this text with custom code or comments, and it will be preserved on regeneration

View file

@ -27,11 +27,11 @@ use base 'DBIx::Class::Core';
__PACKAGE__->load_components("+Hydra::Component::ToJSON");
=head1 TABLE: C<ProjectMembers>
=head1 TABLE: C<projectmembers>
=cut
__PACKAGE__->table("ProjectMembers");
__PACKAGE__->table("projectmembers");
=head1 ACCESSORS
@ -103,8 +103,8 @@ __PACKAGE__->belongs_to(
);
# Created by DBIx::Class::Schema::Loader v0.07033 @ 2013-06-13 01:54:50
# DO NOT MODIFY THIS OR ANYTHING ABOVE! md5sum:imPoiaitrTbX0vVNlF6dPA
# Created by DBIx::Class::Schema::Loader v0.07049 @ 2020-02-06 12:22:36
# DO NOT MODIFY THIS OR ANYTHING ABOVE! md5sum:e/hYmoNmcEUoGhRqtwdyQw
# You can replace this text with custom content, and it will be preserved on regeneration

View file

@ -27,11 +27,11 @@ use base 'DBIx::Class::Core';
__PACKAGE__->load_components("+Hydra::Component::ToJSON");
=head1 TABLE: C<Projects>
=head1 TABLE: C<projects>
=cut
__PACKAGE__->table("Projects");
__PACKAGE__->table("projects");
=head1 ACCESSORS
@ -303,8 +303,8 @@ Composing rels: L</projectmembers> -> username
__PACKAGE__->many_to_many("usernames", "projectmembers", "username");
# Created by DBIx::Class::Schema::Loader v0.07043 @ 2016-03-11 10:39:17
# DO NOT MODIFY THIS OR ANYTHING ABOVE! md5sum:1ats3brIVhRTWLToIYSoaQ
# Created by DBIx::Class::Schema::Loader v0.07049 @ 2020-02-06 12:22:36
# DO NOT MODIFY THIS OR ANYTHING ABOVE! md5sum:pcF/8351zyo9VL6N5eimdQ
my %hint = (
columns => [

View file

@ -27,11 +27,11 @@ use base 'DBIx::Class::Core';
__PACKAGE__->load_components("+Hydra::Component::ToJSON");
=head1 TABLE: C<ReleaseMembers>
=head1 TABLE: C<releasemembers>
=cut
__PACKAGE__->table("ReleaseMembers");
__PACKAGE__->table("releasemembers");
=head1 ACCESSORS
@ -135,7 +135,7 @@ __PACKAGE__->belongs_to(
);
# Created by DBIx::Class::Schema::Loader v0.07033 @ 2013-06-13 01:54:50
# DO NOT MODIFY THIS OR ANYTHING ABOVE! md5sum:7M7WPlGQT6rNHKJ+82/KSA
# Created by DBIx::Class::Schema::Loader v0.07049 @ 2020-02-06 12:22:36
# DO NOT MODIFY THIS OR ANYTHING ABOVE! md5sum:k4z2YeB4gRAeAP6hmR93sQ
1;

View file

@ -27,11 +27,11 @@ use base 'DBIx::Class::Core';
__PACKAGE__->load_components("+Hydra::Component::ToJSON");
=head1 TABLE: C<Releases>
=head1 TABLE: C<releases>
=cut
__PACKAGE__->table("Releases");
__PACKAGE__->table("releases");
=head1 ACCESSORS
@ -119,7 +119,7 @@ __PACKAGE__->has_many(
);
# Created by DBIx::Class::Schema::Loader v0.07033 @ 2013-06-13 01:54:50
# DO NOT MODIFY THIS OR ANYTHING ABOVE! md5sum:qISBiwvboB8dIdinaE45mg
# Created by DBIx::Class::Schema::Loader v0.07049 @ 2020-02-06 12:22:36
# DO NOT MODIFY THIS OR ANYTHING ABOVE! md5sum:b4M/tHOhsy234tgTf+wqjQ
1;

View file

@ -27,11 +27,11 @@ use base 'DBIx::Class::Core';
__PACKAGE__->load_components("+Hydra::Component::ToJSON");
=head1 TABLE: C<SchemaVersion>
=head1 TABLE: C<schemaversion>
=cut
__PACKAGE__->table("SchemaVersion");
__PACKAGE__->table("schemaversion");
=head1 ACCESSORS
@ -45,8 +45,8 @@ __PACKAGE__->table("SchemaVersion");
__PACKAGE__->add_columns("version", { data_type => "integer", is_nullable => 0 });
# Created by DBIx::Class::Schema::Loader v0.07033 @ 2013-06-13 01:54:50
# DO NOT MODIFY THIS OR ANYTHING ABOVE! md5sum:08/7gbEQp1TqBiWFJXVY0w
# Created by DBIx::Class::Schema::Loader v0.07049 @ 2020-02-06 12:22:36
# DO NOT MODIFY THIS OR ANYTHING ABOVE! md5sum:2wy4FsRYVVo2RTCWXcmgvg
# You can replace this text with custom code or comments, and it will be preserved on regeneration

View file

@ -27,11 +27,11 @@ use base 'DBIx::Class::Core';
__PACKAGE__->load_components("+Hydra::Component::ToJSON");
=head1 TABLE: C<StarredJobs>
=head1 TABLE: C<starredjobs>
=cut
__PACKAGE__->table("StarredJobs");
__PACKAGE__->table("starredjobs");
=head1 ACCESSORS
@ -153,8 +153,8 @@ __PACKAGE__->belongs_to(
);
# Created by DBIx::Class::Schema::Loader v0.07033 @ 2013-10-14 15:46:29
# DO NOT MODIFY THIS OR ANYTHING ABOVE! md5sum:naj5aKWuw8hLE6klmvW9Eg
# Created by DBIx::Class::Schema::Loader v0.07049 @ 2020-02-06 12:22:36
# DO NOT MODIFY THIS OR ANYTHING ABOVE! md5sum:fw4FfzmOhzDk0ZoSuNr2ww
# You can replace this text with custom code or comments, and it will be preserved on regeneration

View file

@ -27,11 +27,11 @@ use base 'DBIx::Class::Core';
__PACKAGE__->load_components("+Hydra::Component::ToJSON");
=head1 TABLE: C<SystemStatus>
=head1 TABLE: C<systemstatus>
=cut
__PACKAGE__->table("SystemStatus");
__PACKAGE__->table("systemstatus");
=head1 ACCESSORS
@ -67,8 +67,8 @@ __PACKAGE__->add_columns(
__PACKAGE__->set_primary_key("what");
# Created by DBIx::Class::Schema::Loader v0.07043 @ 2015-07-30 16:01:22
# DO NOT MODIFY THIS OR ANYTHING ABOVE! md5sum:JCYi4+HwM22iucdFkhBjMg
# Created by DBIx::Class::Schema::Loader v0.07049 @ 2020-02-06 12:22:36
# DO NOT MODIFY THIS OR ANYTHING ABOVE! md5sum:GeXpTVktMXjHENa/P3qOxw
# You can replace this text with custom code or comments, and it will be preserved on regeneration

View file

@ -27,11 +27,11 @@ use base 'DBIx::Class::Core';
__PACKAGE__->load_components("+Hydra::Component::ToJSON");
=head1 TABLE: C<SystemTypes>
=head1 TABLE: C<systemtypes>
=cut
__PACKAGE__->table("SystemTypes");
__PACKAGE__->table("systemtypes");
=head1 ACCESSORS
@ -68,7 +68,7 @@ __PACKAGE__->add_columns(
__PACKAGE__->set_primary_key("system");
# Created by DBIx::Class::Schema::Loader v0.07033 @ 2013-06-13 01:54:50
# DO NOT MODIFY THIS OR ANYTHING ABOVE! md5sum:8cC34cEw9T3+x+7uRs4KHQ
# Created by DBIx::Class::Schema::Loader v0.07049 @ 2020-02-06 12:22:36
# DO NOT MODIFY THIS OR ANYTHING ABOVE! md5sum:fYeKQQSS5J8rjO3t+Hbz0g
1;

View file

@ -27,11 +27,11 @@ use base 'DBIx::Class::Core';
__PACKAGE__->load_components("+Hydra::Component::ToJSON");
=head1 TABLE: C<UriRevMapper>
=head1 TABLE: C<urirevmapper>
=cut
__PACKAGE__->table("UriRevMapper");
__PACKAGE__->table("urirevmapper");
=head1 ACCESSORS
@ -67,8 +67,8 @@ __PACKAGE__->add_columns(
__PACKAGE__->set_primary_key("baseuri");
# Created by DBIx::Class::Schema::Loader v0.07033 @ 2013-06-13 01:54:50
# DO NOT MODIFY THIS OR ANYTHING ABOVE! md5sum:G2GAF/Rb7cRkRegH94LwIA
# Created by DBIx::Class::Schema::Loader v0.07049 @ 2020-02-06 12:22:36
# DO NOT MODIFY THIS OR ANYTHING ABOVE! md5sum:FOg2/BVJK3yg8MAYMrqZOQ
# You can replace this text with custom content, and it will be preserved on regeneration

View file

@ -27,11 +27,11 @@ use base 'DBIx::Class::Core';
__PACKAGE__->load_components("+Hydra::Component::ToJSON");
=head1 TABLE: C<UserRoles>
=head1 TABLE: C<userroles>
=cut
__PACKAGE__->table("UserRoles");
__PACKAGE__->table("userroles");
=head1 ACCESSORS
@ -87,7 +87,7 @@ __PACKAGE__->belongs_to(
);
# Created by DBIx::Class::Schema::Loader v0.07033 @ 2013-06-13 01:54:50
# DO NOT MODIFY THIS OR ANYTHING ABOVE! md5sum:aS+ivlFpndqIv8U578zz9A
# Created by DBIx::Class::Schema::Loader v0.07049 @ 2020-02-06 12:22:36
# DO NOT MODIFY THIS OR ANYTHING ABOVE! md5sum:LUw2PDFvUHs0E0UZ3oHFxw
1;

View file

@ -27,11 +27,11 @@ use base 'DBIx::Class::Core';
__PACKAGE__->load_components("+Hydra::Component::ToJSON");
=head1 TABLE: C<Users>
=head1 TABLE: C<users>
=cut
__PACKAGE__->table("Users");
__PACKAGE__->table("users");
=head1 ACCESSORS
@ -192,8 +192,8 @@ Composing rels: L</projectmembers> -> project
__PACKAGE__->many_to_many("projects", "projectmembers", "project");
# Created by DBIx::Class::Schema::Loader v0.07043 @ 2016-05-27 11:32:14
# DO NOT MODIFY THIS OR ANYTHING ABOVE! md5sum:Az1+V+ztJoWUt50NLQR3xg
# Created by DBIx::Class::Schema::Loader v0.07049 @ 2020-02-06 12:22:36
# DO NOT MODIFY THIS OR ANYTHING ABOVE! md5sum:4/WZ95asbnGmK+nEHb4sLQ
my %hint = (
columns => [

View file

@ -12,7 +12,7 @@ struct Connection : pqxx::connection
std::string getFlags()
{
using namespace nix;
auto s = getEnv("HYDRA_DBI", "dbi:Pg:dbname=hydra;");
auto s = getEnv("HYDRA_DBI").value_or("dbi:Pg:dbname=hydra;");
std::string prefix = "dbi:Pg:";
if (std::string(s, 0, prefix.size()) != prefix)
throw Error("$HYDRA_DBI does not denote a PostgreSQL database");

View file

@ -14,9 +14,9 @@ struct Config
/* Read hydra.conf. */
auto hydraConfigFile = getEnv("HYDRA_CONFIG");
if (pathExists(hydraConfigFile)) {
if (hydraConfigFile && pathExists(*hydraConfigFile)) {
for (auto line : tokenizeString<Strings>(readFile(hydraConfigFile), "\n")) {
for (auto line : tokenizeString<Strings>(readFile(*hydraConfigFile), "\n")) {
line = trim(string(line, 0, line.find('#')));
auto eq = line.find('=');

View file

@ -186,7 +186,7 @@ END;
IF b.finished && b.buildstatus != 0; nrFailedConstituents = nrFailedConstituents + 1; END;
END;
%];
[%+ IF nrFinished == nrMembers && nrFailedConstituents == 0 %]
[%+ IF nrFinished == nrConstituents && nrFailedConstituents == 0 %]
all [% nrConstituents %] constituent builds succeeded
[% ELSE %]
[% nrFailedConstituents %] out of [% nrConstituents %] constituent builds failed
@ -292,11 +292,9 @@ END;
<th>Last successful build [% INCLUDE renderDateTime timestamp = prevSuccessfulBuild.timestamp %]</th>
[% IF prevSuccessfulBuild && firstBrokenBuild && firstBrokenBuild.id != build.id %]
<th>First broken build [% INCLUDE renderDateTime timestamp = firstBrokenBuild.timestamp %]
<a class="btn btn-mini" href="[% c.uri_for(c.controller('API').action_for('logdiff') prevSuccessfulBuild.id firstBrokenBuild.id ) %]">log diff</a>
</th>
[% END %]
<th>This build [% INCLUDE renderDateTime timestamp = build.timestamp %]
<a class="btn btn-mini" href="[% c.uri_for(c.controller('API').action_for('logdiff') prevSuccessfulBuild.id build.id) %]">log diff</a>
</th>
</thead>
<tr>

View file

@ -229,9 +229,9 @@ BLOCK renderBuildStatusIcon;
[% ELSIF buildstatus == 6 %]
<img src="[% c.uri_for("/static/images/emojione-red-x-274c.svg") %]" height="[% size %]" width="[% size %]" title="Failed with output" alt="Failed with output" class="build-status" />
[% ELSIF buildstatus == 7 %]
<img src="[% c.uri_for("/static/images/emojione-red-x-274c.svg") %]" height="[% size %]" width="[% size %]" title="Timed out" alt="Timed out" class="build-status" />
<img src="[% c.uri_for("/static/images/emojione-stopsign-1f6d1.svg") %]" height="[% size %]" width="[% size %]" title="Timed out" alt="Timed out" class="build-status" />
[% ELSIF buildstatus == 10 %]
<img src="[% c.uri_for("/static/images/emojione-red-x-274c.svg") %]" height="[% size %]" width="[% size %]" title="Log limit exceeded" alt="Log limit exceeded" class="build-status" />
<img src="[% c.uri_for("/static/images/emojione-stopsign-1f6d1.svg") %]" height="[% size %]" width="[% size %]" title="Log limit exceeded" alt="Log limit exceeded" class="build-status" />
[% ELSIF buildstatus == 11 %]
<img src="[% c.uri_for("/static/images/emojione-red-x-274c.svg") %]" height="[% size %]" width="[% size %]" title="Output size limit exceeded" alt="Output size limit exceeded" class="build-status" />
[% ELSIF buildstatus == 12 %]

View file

@ -68,6 +68,7 @@
<input type="hidden" name="enabled" value="[% jobset.enabled %]" />
<button type="button" class="btn" value="1">Enabled</button>
<button type="button" class="btn" value="2">One-shot</button>
<button type="button" class="btn" value="3">One-at-a-time</button>
<button type="button" class="btn" value="0">Disabled</button>
</div>
</div>

View file

@ -129,7 +129,7 @@
<table class="info-table">
<tr>
<th>State:</th>
<td>[% IF jobset.enabled == 0; "Disabled"; ELSIF jobset.enabled == 1; "Enabled"; ELSIF jobset.enabled == 2; "One-shot"; END %]</td>
<td>[% IF jobset.enabled == 0; "Disabled"; ELSIF jobset.enabled == 1; "Enabled"; ELSIF jobset.enabled == 2; "One-shot"; ELSIF jobset.enabled == 3; "One-at-a-time"; END %]</td>
</tr>
<tr>
<th>Description:</th>

View file

@ -1,8 +1,8 @@
EXTRA_DIST = \
$(distributable_scripts) \
hydra-eval-guile-jobs.in
$(distributable_scripts)
distributable_scripts = \
hydra-backfill-ids \
hydra-init \
hydra-eval-jobset \
hydra-server \
@ -16,5 +16,4 @@ distributable_scripts = \
nix-prefetch-hg
bin_SCRIPTS = \
$(distributable_scripts) \
hydra-eval-guile-jobs
$(distributable_scripts)

164
src/script/hydra-backfill-ids Executable file
View file

@ -0,0 +1,164 @@
#! /usr/bin/env perl
use strict;
use utf8;
use Hydra::Model::DB;
STDOUT->autoflush();
STDERR->autoflush(1);
binmode STDERR, ":encoding(utf8)";
my $db = Hydra::Model::DB->new();
my $vacuum = $db->storage->dbh->prepare("VACUUM;");
my $dryRun = defined $ENV{'HYDRA_DRY_RUN'};
my $batchSize = 10000;
my $iterationsPerVacuum = 500;
sub backfillJobsJobsetId {
my ($skipLocked) = @_;
my $logPrefix;
if ($skipLocked) {
$logPrefix = "(pass 1/2)";
} else {
$logPrefix = "(pass 2/2)";
}
print STDERR "$logPrefix Backfilling Jobs records where jobset_id is NULL...\n";
my $totalToGoSth = $db->storage->dbh->prepare(<<QUERY);
SELECT COUNT(*) FROM jobs WHERE jobset_id IS NULL
QUERY
$totalToGoSth->execute();
my ($totalToGo) = $totalToGoSth->fetchrow_array;
my $skipLockedStmt = $skipLocked ? "FOR UPDATE SKIP LOCKED" : "";
my $update10kJobs = $db->storage->dbh->prepare(<<QUERY);
UPDATE jobs
SET jobset_id = (
SELECT jobsets.id
FROM jobsets
WHERE jobsets.name = jobs.jobset
AND jobsets.project = jobs.project
)
WHERE (jobs.project, jobs.jobset, jobs.name) in (
SELECT jobsprime.project, jobsprime.jobset, jobsprime.name
FROM jobs jobsprime
WHERE jobsprime.jobset_id IS NULL
$skipLockedStmt
LIMIT ?
);
QUERY
print STDERR "$logPrefix Total Jobs records without a jobset_id: $totalToGo\n";
my $iteration = 0;
my $affected;
do {
$iteration++;
$affected = $update10kJobs->execute($batchSize);
print STDERR "$logPrefix (batch #$iteration; $totalToGo remaining) Jobs.jobset_id: affected $affected rows...\n";
$totalToGo -= $affected;
if ($iteration % $iterationsPerVacuum == 0) {
print STDERR "$logPrefix (batch #$iteration) Vacuuming...\n";
$vacuum->execute();
}
} while ($affected > 0);
if ($skipLocked) {
backfillJobsJobsetId(0);
}
}
sub backfillBuildsJobsetId {
my ($skipLocked) = @_;
my $logPrefix;
if ($skipLocked) {
$logPrefix = "(pass 1/2)";
print STDERR "$logPrefix Backfilling unlocked Builds records where jobset_id is NULL...\n";
} else {
$logPrefix = "(pass 2/2)";
print STDERR "$logPrefix Backfilling all Builds records where jobset_id is NULL...\n";
}
my $skipLockedStmt = $skipLocked ? "FOR UPDATE SKIP LOCKED" : "";
my $update10kBuilds = $db->storage->dbh->prepare(<<"QUERY");
WITH updateprogress AS (
UPDATE builds
SET jobset_id = (
SELECT jobsets.id
FROM jobsets
WHERE jobsets.name = builds.jobset
AND jobsets.project = builds.project
)
WHERE builds.id in (
SELECT buildprime.id
FROM builds buildprime
WHERE buildprime.jobset_id IS NULL
AND buildprime.id >= ?
ORDER BY buildprime.id
$skipLockedStmt
LIMIT ?
)
RETURNING id
)
SELECT
count(*) AS affected,
max(updateprogress.id) AS highest_id
FROM updateprogress;
QUERY
my $lowestNullIdSth = $db->storage->dbh->prepare(<<QUERY);
SELECT id FROM builds WHERE jobset_id IS NULL ORDER BY id LIMIT 1
QUERY
$lowestNullIdSth->execute();
my ($highestId) = $lowestNullIdSth->fetchrow_array;
my $totalToGoSth = $db->storage->dbh->prepare(<<QUERY);
SELECT COUNT(*) FROM builds WHERE jobset_id IS NULL AND id >= ?
QUERY
$totalToGoSth->execute($highestId);
my ($totalToGo) = $totalToGoSth->fetchrow_array;
print STDERR "$logPrefix Total Builds records without a jobset_id: $totalToGo, starting at $highestId\n";
my $iteration = 0;
my $affected;
do {
my $previousHighId = $highestId;
$iteration++;
$update10kBuilds->execute($highestId, $batchSize);
($affected, $highestId) = $update10kBuilds->fetchrow_array;
print STDERR "$logPrefix (batch #$iteration; $totalToGo remaining) Builds.jobset_id: affected $affected rows; max ID: $previousHighId -> $highestId\n";
$totalToGo -= $affected;
if ($iteration % $iterationsPerVacuum == 0) {
print STDERR "$logPrefix (batch #$iteration) Vacuuming...\n";
$vacuum->execute();
}
} while ($affected > 0);
if ($skipLocked) {
backfillBuildsJobsetId(0);
}
}
die "syntax: $0\n" unless @ARGV == 0;
print STDERR "Beginning with a VACUUM\n";
$vacuum->execute();
backfillJobsJobsetId(1);
backfillBuildsJobsetId(1);
print STDERR "Ending with a VACUUM\n";
$vacuum->execute();

View file

@ -1,249 +0,0 @@
#!/bin/sh
# Aside from this initial boilerplate, this is actually -*- scheme -*- code.
main="(module-ref (resolve-interface '(hydra-eval-guile-jobs)) 'eval-guile-jobs)"
# Keep the host's GUILE_LOAD_PATH unchanged to allow the installed Guix to
# be used. This moves Guix modules possibly out of control, but solves
# bootstrapping issues.
#
# Use `--fresh-auto-compile' to ignore any available .go, and force
# recompilation. This is because checkouts in the store has mtime set to
# the epoch, and thus .go files look newer, even though they may not
# correspond.
exec ${GUILE:-@GUILE@} --no-auto-compile --fresh-auto-compile \
-l "$0" -c "(apply $main (cdr (command-line)))" "$@"
!#
;;; Copyright © 2012, 2013, 2014 Ludovic Courtès <ludo@gnu.org>
;;;
;;; This file is part of Hydra.
;;;
;;; Hydra is free software: you can redistribute it and/or modify
;;; it under the terms of the GNU General Public License as published by
;;; the Free Software Foundation, either version 3 of the License, or
;;; (at your option) any later version.
;;;
;;; Hydra is distributed in the hope that it will be useful,
;;; but WITHOUT ANY WARRANTY; without even the implied warranty of
;;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
;;; GNU General Public License for more details.
;;;
;;; You should have received a copy of the GNU General Public License
;;; along with Hydra. If not, see <http://www.gnu.org/licenses/>.
(define-module (hydra-eval-guile-jobs)
#:use-module (sxml simple)
#:use-module (ice-9 match)
#:use-module (ice-9 regex)
#:use-module (srfi srfi-1)
#:use-module (srfi srfi-11)
#:export (job-evaluations->xml
eval-guile-jobs))
(define (guix-variable module name)
"Dynamically link variable NAME under Guix module MODULE and return it.
Note: this is used instead of `@', because when using `@' in an uncompiled
file, Guile tries to load the module directly as it reads the source, which
fails in our case, leading to the creation of empty (guix ...) modules."
;; TODO: fail with an XML error description
(let ((m (resolve-interface `(guix ,module))))
(module-ref m name)))
(define (%derivation-system drv)
;; XXX: Awful hack to workaround the fact that `derivation-system', which
;; is a macro, cannot be referred to dynamically.
(struct-ref drv 3))
(define strip-store-path
(let* ((store (or (getenv "NIX_STORE_DIR") "/nix/store"))
(store-path-rx
(make-regexp (string-append "^.*" (regexp-quote store)
"/[^-]+-(.+)$"))))
(lambda (path)
(or (and=> (regexp-exec store-path-rx path)
(lambda (match)
(let ((path (match:substring match 1)))
path)))
path))))
(define (derivation-path->name drv)
"Return the base name of DRV, sans hash and `.drv' extension."
(let ((d (strip-store-path drv)))
(if (string-suffix? ".drv" d)
(string-drop-right d 4)
d)))
(define (register-gc-root drv roots-dir)
"Register a permanent garbage collector root under ROOTS-DIR for DRV."
(let ((root (string-append roots-dir "/" (basename drv))))
(unless (file-exists? root)
(symlink drv root))))
(define* (job-evaluations->sxml jobs
#:key gc-roots-dir)
"Return the hydra-eval-jobs SXML form for the result of JOBS, a list of
symbol/thunk pairs."
`(*TOP*
(*PI* xml "version='1.0' encoding='utf-8'")
"\n"
(jobs "\n"
,@(map (match-lambda
(((? symbol? name) . (? thunk? thunk))
(let* ((result (save-module-excursion
(lambda ()
(set-current-module %user-module)
(with-output-to-port (%make-void-port "w")
thunk))))
(drv (assoc-ref result 'derivation)))
(define (opt-attr xml-name name)
(match (assoc name result)
((_ . value)
`((,xml-name ,value)))
(_
'())))
(when gc-roots-dir
;; Register DRV as a GC root so that it's not collected by
;; the time 'hydra-queue-runner' attempts to build it.
(register-gc-root drv gc-roots-dir))
;; XXX: Add <arg ...> tags?
`(job (@ (jobName ,name)
(drvPath ,drv)
,@(opt-attr 'homepage 'home-page)
(license
,(let loop ((license (assoc-ref result 'license)))
(match license
((? struct?)
(struct-ref license 0))
((l ...)
(string-join (map loop l)))
(_ ""))))
,@(opt-attr 'description 'description)
(maintainers
,(string-join (or (assoc-ref result 'maintainers)
'())
", "))
(maxSilent
,(number->string (or (assoc-ref result
'max-silent-time)
3600)))
(timeout
,(number->string (or (assoc-ref result 'timeout)
72000)))
(nixName ,(derivation-path->name drv))
(schedulingPriority
,(number->string (or (assoc-ref result
'scheduling-priority)
10)))
(system
,(call-with-input-file drv
(compose %derivation-system
(guix-variable 'derivations
'read-derivation)))))
;; Resolve Guix modules lazily.
,(map (match-lambda
((name . path)
`(output (@ (name ,name) (path ,path)))))
((guix-variable 'derivations
'derivation-path->output-paths)
drv))
"\n"))))
jobs))))
(define* (job-evaluations->xml jobs port
#:key gc-roots-dir)
(set-port-encoding! port "UTF-8")
(sxml->xml (job-evaluations->sxml jobs #:gc-roots-dir gc-roots-dir)
port))
;;;
;;; Command-line entry point.
;;;
(define (parse-arguments args)
"Traverse ARGS, a list of command-line arguments compatible with
`hydra-eval-jobs', and return the name of the file that defines the jobs, an
expression that returns the entry point in that file (a unary procedure), the
list of name/value pairs passed to that entry point, as well as a GC root
directory or #f."
(define (module-directory dir)
(let ((d (string-append dir "/share/guile/site/2.0")))
(if (file-exists? d)
d
dir)))
(let loop ((args args)
(result '())
(file #f)
(entry 'hydra-jobs)
(roots-dir #f))
(match args
(()
(if (not file)
(error "hydra-eval-guile-jobs: no expression file given")
(values file entry (reverse result) roots-dir)))
(("-I" name=dir rest ...)
(let* ((dir (match (string-tokenize name=dir
(char-set-complement (char-set
#\=)))
((_ dir) dir)
((dir) dir)))
(dir* (module-directory dir)))
(format (current-error-port) "adding `~a' to the load path~%" dir*)
(set! %load-path (cons dir* %load-path))
(set! %load-compiled-path (cons dir* %load-compiled-path)))
(loop rest result file entry roots-dir))
(("--argstr" name value rest ...)
(loop rest (alist-cons (string->symbol name) value result)
file entry roots-dir))
(("--arg" name expr rest ...)
(let ((value (eval (call-with-input-string expr read)
(current-module))))
(loop rest (alist-cons (string->symbol name) value result)
file entry roots-dir)))
(("--gc-roots-dir" dir rest ...)
(loop rest result file entry dir))
(("-j" _ rest ...) ; XXX: what's this?
(loop rest result file entry roots-dir))
(("--entry" expr rest ...) ; entry point, like `guile -e'
(let ((expr (call-with-input-string expr read)))
(loop rest result file expr roots-dir)))
((file rest ...) ; source file that defines the jobs
(loop rest result file entry roots-dir))
(_
(error "hydra-eval-guile-jobs: invalid arguments" args)))))
(define %user-module
;; Hydra user module.
;; TODO: Make it a sandbox.
(let ((m (make-module)))
(beautify-user-module! m)
m))
(define (eval-guile-jobs . args)
(setlocale LC_ALL "")
(let-values (((file entry args gc-roots-dir)
(parse-arguments args)))
(save-module-excursion
(lambda ()
(set-current-module %user-module)
;; The standard output must contain only XML.
(with-output-to-port (%make-void-port "w")
(lambda ()
(primitive-load file)))))
(let* ((entry (eval entry %user-module))
(store ((guix-variable 'store 'open-connection)))
(jobs (entry store args)))
(unless (string? gc-roots-dir)
(format (current-error-port)
"warning: --gc-roots-dir not specified~%"))
(job-evaluations->xml jobs (current-output-port)
#:gc-roots-dir gc-roots-dir))))

View file

@ -82,7 +82,7 @@ sub getPath {
my $substituter = $config->{eval_substituter};
system("nix", "copy", "--from", $substituter, "--", $path)
system("nix", "--experimental-features", "nix-command", "copy", "--from", $substituter, "--", $path)
if defined $substituter;
return isValidPath($path);
@ -143,7 +143,7 @@ sub fetchInputSystemBuild {
$jobsetName ||= $jobset->name;
my @latestBuilds = $db->resultset('LatestSucceededForJob')
->search({}, {bind => [$projectName, $jobsetName, $jobName]});
->search({}, {bind => [$jobsetName, $jobName]});
my @validBuilds = ();
foreach my $build (@latestBuilds) {
@ -264,35 +264,15 @@ sub fetchInput {
sub booleanToString {
my ($exprType, $value) = @_;
my $result;
if ($exprType eq "guile") {
if ($value eq "true") {
$result = "#t";
} else {
$result = "#f";
}
$result = $value;
} else {
$result = $value;
}
return $result;
my ($value) = @_;
return $value;
}
sub buildInputToString {
my ($exprType, $input) = @_;
my $result;
if ($exprType eq "guile") {
$result = "'((file-name . \"" . ${input}->{storePath} . "\")" .
(defined $input->{revision} ? "(revision . \"" . $input->{revision} . "\")" : "") .
(defined $input->{revCount} ? "(revision-count . " . $input->{revCount} . ")" : "") .
(defined $input->{gitTag} ? "(git-tag . \"" . $input->{gitTag} . "\")" : "") .
(defined $input->{shortRev} ? "(short-revision . \"" . $input->{shortRev} . "\")" : "") .
(defined $input->{version} ? "(version . \"" . $input->{version} . "\")" : "") .
")";
} else {
$result = "{ outPath = builtins.storePath " . $input->{storePath} . "" .
my ($input) = @_;
return
"{ outPath = builtins.storePath " . $input->{storePath} . "" .
"; inputType = \"" . $input->{type} . "\"" .
(defined $input->{uri} ? "; uri = \"" . $input->{uri} . "\"" : "") .
(defined $input->{revNumber} ? "; rev = " . $input->{revNumber} . "" : "") .
@ -305,12 +285,10 @@ sub buildInputToString {
(defined $input->{drvPath} ? "; drvPath = builtins.storePath " . $input->{drvPath} . "" : "") .
";}";
}
return $result;
}
sub inputsToArgs {
my ($inputInfo, $exprType) = @_;
my ($inputInfo) = @_;
my @res = ();
foreach my $input (sort keys %{$inputInfo}) {
@ -327,14 +305,12 @@ sub inputsToArgs {
push @res, "--argstr", $input, $alt->{value};
}
elsif ($alt->{type} eq "boolean") {
push @res, "--arg", $input, booleanToString($exprType, $alt->{value});
push @res, "--arg", $input, booleanToString($alt->{value});
}
elsif ($alt->{type} eq "nix") {
die "input type nix only supported for Nix-based jobsets\n" unless $exprType eq "nix";
push @res, "--arg", $input, $alt->{value};
}
elsif ($alt->{type} eq "eval") {
die "input type eval only supported for Nix-based jobsets\n" unless $exprType eq "nix";
my $s = "{ ";
# FIXME: escape $_. But dots should not be escaped.
$s .= "$_ = builtins.storePath ${\$alt->{jobs}->{$_}}; "
@ -343,7 +319,7 @@ sub inputsToArgs {
push @res, "--arg", $input, $s;
}
else {
push @res, "--arg", $input, buildInputToString($exprType, $alt);
push @res, "--arg", $input, buildInputToString($alt);
}
}
@ -352,18 +328,16 @@ sub inputsToArgs {
sub evalJobs {
my ($inputInfo, $exprType, $nixExprInputName, $nixExprPath) = @_;
my ($inputInfo, $nixExprInputName, $nixExprPath) = @_;
my $nixExprInput = $inputInfo->{$nixExprInputName}->[0]
or die "cannot find the input containing the job expression\n";
my $evaluator = ($exprType eq "guile") ? "hydra-eval-guile-jobs" : "hydra-eval-jobs";
my @cmd = ($evaluator,
my @cmd = ("hydra-eval-jobs",
"<" . $nixExprInputName . "/" . $nixExprPath . ">",
"--gc-roots-dir", getGCRootsDir,
"-j", 1,
inputsToArgs($inputInfo, $exprType));
inputsToArgs($inputInfo));
if (defined $ENV{'HYDRA_DEBUG'}) {
sub escape {
@ -376,7 +350,7 @@ sub evalJobs {
}
(my $res, my $jobsJSON, my $stderr) = captureStdoutStderr(21600, @cmd);
die "$evaluator returned " . ($res & 127 ? "signal $res" : "exit code " . ($res >> 8))
die "hydra-eval-jobs returned " . ($res & 127 ? "signal $res" : "exit code " . ($res >> 8))
. ":\n" . ($stderr ? decode("utf-8", $stderr) : "(no output)\n")
if $res;
@ -417,7 +391,12 @@ sub checkBuild {
my $build;
txn_do($db, sub {
my $job = $jobset->jobs->update_or_create({ name => $jobName });
my $job = $jobset->jobs->update_or_create({
name => $jobName,
jobset_id => $jobset->id,
project => $jobset->project,
jobset => $jobset->name,
});
# Don't add a build that has already been scheduled for this
# job, or has been built but is still a "current" build for
@ -464,6 +443,9 @@ sub checkBuild {
# Add the build to the database.
$build = $job->builds->create(
{ timestamp => $time
, project => $jobset->project
, jobset => $jobset->name
, jobset_id => $jobset->id
, description => null($buildInfo->{description})
, license => null($buildInfo->{license})
, homepage => null($buildInfo->{homepage})
@ -587,7 +569,6 @@ sub checkJobsetWrapped {
$jobset->discard_changes;
$inputInfo->{"declInput"} = [ $declInput ];
}
my $exprType = $jobset->nixexprpath =~ /.scm$/ ? "guile" : "nix";
# Fetch all values for all inputs.
my $checkoutStart = clock_gettime(CLOCK_MONOTONIC);
@ -613,7 +594,7 @@ sub checkJobsetWrapped {
# Hash the arguments to hydra-eval-jobs and check the
# JobsetInputHashes to see if the previous evaluation had the same
# inputs. If so, bail out.
my @args = ($jobset->nixexprinput, $jobset->nixexprpath, inputsToArgs($inputInfo, $exprType));
my @args = ($jobset->nixexprinput, $jobset->nixexprpath, inputsToArgs($inputInfo));
my $argsHash = sha256_hex("@args");
my $prevEval = getPrevJobsetEval($db, $jobset, 0);
if (defined $prevEval && $prevEval->hash eq $argsHash && !$dryRun && !$jobset->forceeval) {
@ -628,7 +609,7 @@ sub checkJobsetWrapped {
# Evaluate the job expression.
my $evalStart = clock_gettime(CLOCK_MONOTONIC);
my ($jobs, $nixExprInput) = evalJobs($inputInfo, $exprType, $jobset->nixexprinput, $jobset->nixexprpath);
my ($jobs, $nixExprInput) = evalJobs($inputInfo, $jobset->nixexprinput, $jobset->nixexprpath);
my $evalStop = clock_gettime(CLOCK_MONOTONIC);
if ($jobsetsJobset) {
@ -716,7 +697,7 @@ sub checkJobsetWrapped {
foreach my $job (values %{$jobs}) {
next unless $job->{constituents};
my $x = $drvPathToId{$job->{drvPath}} or die;
foreach my $drvPath (split / /, $job->{constituents}) {
foreach my $drvPath (@{$job->{constituents}}) {
my $constituent = $drvPathToId{$drvPath};
if (defined $constituent) {
$db->resultset('AggregateConstituents')->update_or_create({aggregate => $x->{id}, constituent => $constituent->{id}});

View file

@ -44,6 +44,17 @@ my @versions = $db->resultset('SchemaVersion')->all;
die "couldn't get Hydra schema version!" if scalar @versions != 1;
my $schemaVersion = $versions[0]->version;
if ($schemaVersion <= 60) {
print STDERR <<QUOTE;
WARNING: Schema version 62 and 63 make nullable jobset_id fields on
Builds and Jobs non-nullable. On big Hydra servers, this
migration will take many hours. Because of that, the
migration is not automatic, and must be performed manually.
To backfill these IDs, run: hydra-backfill-ids
QUOTE
}
for (my $n = $schemaVersion; $n < $maxSchemaVersion; $n++) {
my $m = $n + 1;
print STDERR "upgrading Hydra schema from version $n to $m\n";

View file

@ -34,6 +34,7 @@ sub sendQueueRunnerStats {
gauge("hydra.queue.steps.unfinished", $json->{nrUnfinishedSteps});
gauge("hydra.queue.steps.finished", $json->{nrStepsDone});
gauge("hydra.queue.steps.retries", $json->{nrRetries});
gauge("hydra.queue.steps.unsupported", $json->{nrUnsupportedSteps});
gauge("hydra.queue.steps.max_retries", $json->{maxNrRetries});
if ($json->{nrStepsDone}) {
gauge("hydra.queue.steps.avg_total_time", $json->{avgStepTime});

View file

@ -2,7 +2,6 @@ sqldir = $(libexecdir)/hydra/sql
nobase_dist_sql_DATA = \
hydra-postgresql.sql \
hydra.sql \
hydra-sqlite.sql \
test.sql \
upgrade-*.sql \
update-dbix.pl
@ -10,10 +9,5 @@ nobase_dist_sql_DATA = \
hydra-postgresql.sql: hydra.sql
cpp -P -E -traditional-cpp -DPOSTGRESQL hydra.sql > $@ || rm -f $@
hydra-sqlite.sql: hydra.sql
cpp -P -E -traditional-cpp -DSQLITE hydra.sql > $@ || rm -f $@
update-dbix: hydra-sqlite.sql
rm -f tmp.sqlite
sqlite3 tmp.sqlite < hydra-sqlite.sql
perl -I ../lib -MDBIx::Class::Schema::Loader=make_schema_at,dump_to_dir:../lib update-dbix.pl
update-dbix: hydra-postgresql.sql
./update-dbix-harness.sh

View file

@ -52,15 +52,16 @@ create table ProjectMembers (
-- describing build jobs.
create table Jobsets (
name text not null,
id serial not null,
project text not null,
description text,
nixExprInput text not null, -- name of the jobsetInput containing the Nix or Guix expression
nixExprPath text not null, -- relative path of the Nix or Guix expression
nixExprInput text, -- name of the jobsetInput containing the Nix or Guix expression
nixExprPath text, -- relative path of the Nix or Guix expression
errorMsg text, -- used to signal the last evaluation error etc. for this jobset
errorTime integer, -- timestamp associated with errorMsg
lastCheckedTime integer, -- last time the evaluator looked at this jobset
triggerTime integer, -- set if we were triggered by a push event
enabled integer not null default 1, -- 0 = disabled, 1 = enabled, 2 = one-shot
enabled integer not null default 1, -- 0 = disabled, 1 = enabled, 2 = one-shot, 3 = one-at-a-time
enableEmail integer not null default 1,
hidden integer not null default 0,
emailOverride text not null,
@ -70,9 +71,14 @@ create table Jobsets (
fetchErrorMsg text,
forceEval boolean,
startTime integer, -- if jobset is currently running
type integer not null default 0, -- 0 == legacy, 1 == flake
flake text,
check (schedulingShares > 0),
check ((type = 0) = (nixExprInput is not null and nixExprPath is not null)),
check ((type = 1) = (flake is not null)),
primary key (project, name),
foreign key (project) references Projects(name) on delete cascade on update cascade
foreign key (project) references Projects(name) on delete cascade on update cascade,
constraint Jobsets_id_unique UNIQUE(id)
#ifdef SQLITE
,
foreign key (project, name, nixExprInput) references JobsetInputs(project, jobset, name)
@ -140,9 +146,11 @@ create table JobsetInputAlts (
create table Jobs (
project text not null,
jobset text not null,
jobset_id integer not null,
name text not null,
primary key (project, jobset, name),
foreign key (jobset_id) references Jobsets(id) on delete cascade,
foreign key (project) references Projects(name) on delete cascade on update cascade,
foreign key (project, jobset) references Jobsets(project, name) on delete cascade on update cascade
);
@ -162,6 +170,7 @@ create table Builds (
-- Info about the inputs.
project text not null,
jobset text not null,
jobset_id integer not null,
job text not null,
-- Info about the build result.
@ -181,7 +190,8 @@ create table Builds (
-- Copy of the nixExprInput/nixExprPath fields of the jobset that
-- instantiated this build. Needed if we want to reproduce this
-- build.
-- build. FIXME: this should be stored in JobsetEvals, storing it
-- here is denormal.
nixExprInput text,
nixExprPath text,
@ -227,6 +237,7 @@ create table Builds (
check (finished = 0 or (stoptime is not null and stoptime != 0)),
check (finished = 0 or (starttime is not null and starttime != 0)),
foreign key (jobset_id) references Jobsets(id) on delete cascade,
foreign key (project) references Projects(name) on update cascade,
foreign key (project, jobset) references Jobsets(project, name) on update cascade,
foreign key (project, jobset, job) references Jobs(project, jobset, name) on update cascade
@ -522,6 +533,8 @@ create table JobsetEvals (
nrBuilds integer,
nrSucceeded integer, -- set lazily when all builds are finished
flake text, -- immutable flake reference
foreign key (project) references Projects(name) on delete cascade on update cascade,
foreign key (project, jobset) references Jobsets(project, name) on delete cascade on update cascade
);
@ -669,6 +682,8 @@ create index IndexBuildsOnProject on Builds(project);
create index IndexBuildsOnTimestamp on Builds(timestamp);
create index IndexBuildsOnFinishedStopTime on Builds(finished, stoptime DESC);
create index IndexBuildsOnJobFinishedId on builds(project, jobset, job, system, finished, id DESC);
create index IndexBuildsOnJobsetIdFinishedId on Builds(id DESC, finished, job, jobset_id);
create index IndexFinishedSuccessfulBuilds on Builds(id DESC, buildstatus, finished, job, jobset_id) where buildstatus = 0 and finished = 1;
create index IndexBuildsOnDrvPath on Builds(drvPath);
create index IndexCachedHgInputsOnHash on CachedHgInputs(uri, branch, sha256hash);
create index IndexCachedGitInputsOnHash on CachedGitInputs(uri, branch, sha256hash);

40
src/sql/update-dbix-harness.sh Executable file
View file

@ -0,0 +1,40 @@
#!/usr/bin/env bash
readonly scratch=$(mktemp -d -t tmp.XXXXXXXXXX)
readonly socket=$scratch/socket
readonly data=$scratch/data
readonly dbname=hydra-update-dbix
function finish {
set +e
pg_ctl -D "$data" \
-o "-F -h '' -k \"$socket\"" \
-w stop -m immediate
if [ -f "$data/postmaster.pid" ]; then
pg_ctl -D "$data" \
-o "-F -h '' -k \"$socket\"" \
-w kill TERM "$(cat "$data/postmaster.pid")"
fi
rm -rf "$scratch"
}
trap finish EXIT
set -e
mkdir -p "$socket"
initdb -D "$data"
pg_ctl -D "$data" \
-o "-F -h '' -k \"${socket}\"" \
-w start
createdb -h "$socket" "$dbname"
psql -h "$socket" "$dbname" -f ./hydra-postgresql.sql
perl -I ../lib \
-MDBIx::Class::Schema::Loader=make_schema_at,dump_to_dir:../lib \
update-dbix.pl "dbi:Pg:dbname=$dbname;host=$socket"

View file

@ -1,8 +1,49 @@
use Cwd;
die "$0: dbi connection string required \n" if scalar @ARGV != 1;
make_schema_at("Hydra::Schema", {
naming => { ALL => "v5" },
relationships => 1,
moniker_map => sub { return "$_"; },
moniker_map => {
"aggregateconstituents" => "AggregateConstituents",
"buildinputs" => "BuildInputs",
"buildmetrics" => "BuildMetrics",
"buildoutputs" => "BuildOutputs",
"buildproducts" => "BuildProducts",
"builds" => "Builds",
"buildstepoutputs" => "BuildStepOutputs",
"buildsteps" => "BuildSteps",
"cachedbazaarinputs" => "CachedBazaarInputs",
"cachedcvsinputs" => "CachedCVSInputs",
"cacheddarcsinputs" => "CachedDarcsInputs",
"cachedgitinputs" => "CachedGitInputs",
"cachedhginputs" => "CachedHgInputs",
"cachedpathinputs" => "CachedPathInputs",
"cachedsubversioninputs" => "CachedSubversionInputs",
"failedpaths" => "FailedPaths",
"jobs" => "Jobs",
"jobsetevalinputs" => "JobsetEvalInputs",
"jobsetevalmembers" => "JobsetEvalMembers",
"jobsetevals" => "JobsetEvals",
"jobsetinputalts" => "JobsetInputAlts",
"jobsetinputs" => "JobsetInputs",
"jobsetrenames" => "JobsetRenames",
"jobsets" => "Jobsets",
"newsitems" => "NewsItems",
"nrbuilds" => "NrBuilds",
"projectmembers" => "ProjectMembers",
"projects" => "Projects",
"releasemembers" => "ReleaseMembers",
"releases" => "Releases",
"schemaversion" => "SchemaVersion",
"starredjobs" => "StarredJobs",
"systemstatus" => "SystemStatus",
"systemtypes" => "SystemTypes",
"urirevmapper" => "UriRevMapper",
"userroles" => "UserRoles",
"users" => "Users",
} , #sub { return "$_"; },
components => [ "+Hydra::Component::ToJSON" ],
rel_name_map => { buildsteps_builds => "buildsteps" }
}, ["dbi:SQLite:tmp.sqlite"]);
}, [$ARGV[0]]);

7
src/sql/upgrade-58.sql Normal file
View file

@ -0,0 +1,7 @@
alter table Jobsets alter column nixExprInput drop not null;
alter table Jobsets alter column nixExprPath drop not null;
alter table Jobsets add column type integer default 0;
alter table Jobsets add column flake text;
alter table Jobsets add check ((type = 0) = (nixExprInput is not null and nixExprPath is not null));
alter table Jobsets add check ((type = 1) = (flake is not null));
alter table JobsetEvals add column flake text;

4
src/sql/upgrade-59.sql Normal file
View file

@ -0,0 +1,4 @@
-- will automatically add unique IDs to Jobsets.
ALTER TABLE Jobsets
ADD COLUMN id SERIAL NOT NULL,
ADD CONSTRAINT Jobsets_id_unique UNIQUE (id);

10
src/sql/upgrade-60.sql Normal file
View file

@ -0,0 +1,10 @@
-- Add the jobset_id columns to the Jobs table. This will go
-- quickly, since the field is nullable. Note this is just part one of
-- this migration. Future steps involve a piecemeal backfilling, and
-- then making the column non-null.
ALTER TABLE Jobs
ADD COLUMN jobset_id integer NULL,
ADD FOREIGN KEY (jobset_id)
REFERENCES Jobsets(id)
ON DELETE CASCADE;

10
src/sql/upgrade-61.sql Normal file
View file

@ -0,0 +1,10 @@
-- Add the jobset_id columns to the Builds table. This will go
-- quickly, since the field is nullable. Note this is just part one of
-- this migration. Future steps involve a piecemeal backfilling, and
-- then making the column non-null.
ALTER TABLE Builds
ADD COLUMN jobset_id integer NULL,
ADD FOREIGN KEY (jobset_id)
REFERENCES Jobsets(id)
ON DELETE CASCADE;

7
src/sql/upgrade-62.sql Normal file
View file

@ -0,0 +1,7 @@
-- Make the Jobs.jobset_id column NOT NULL. If this upgrade fails,
-- either the admin didn't run the backfiller or there is a bug. If
-- the admin ran the backfiller and there are null columns, it is
-- very important to figure out where the nullable columns came from.
ALTER TABLE Jobs
ALTER COLUMN jobset_id SET NOT NULL;

7
src/sql/upgrade-63.sql Normal file
View file

@ -0,0 +1,7 @@
-- Make the Builds.jobset_id column NOT NULL. If this upgrade fails,
-- either the admin didn't run the backfiller or there is a bug. If
-- the admin ran the backfiller and there are null columns, it is
-- very important to figure out where the nullable columns came from.
ALTER TABLE Builds
ALTER COLUMN jobset_id SET NOT NULL;

4
src/sql/upgrade-64.sql Normal file
View file

@ -0,0 +1,4 @@
-- Index more exactly what the latest-finished query looks for.
create index IndexFinishedSuccessfulBuilds
on Builds(id DESC, buildstatus, finished, job, jobset_id)
where buildstatus = 0 and finished = 1;

2
src/sql/upgrade-65.sql Normal file
View file

@ -0,0 +1,2 @@
-- Add an index like IndexBuildsOnJobFinishedId using jobset_id
create index IndexBuildsOnJobsetIdFinishedId on Builds(id DESC, finished, job, jobset_id);

View file

@ -14,6 +14,7 @@ TESTS_ENVIRONMENT = \
NIX_BUILD_HOOK= \
PGHOST=/tmp \
PERL5LIB="$(srcdir):$(abs_top_srcdir)/src/lib:$$PERL5LIB" \
PYTHONPATH= \
PATH=$(abs_top_srcdir)/src/hydra-evaluator:$(abs_top_srcdir)/src/script:$(abs_top_srcdir)/src/hydra-eval-jobs:$(abs_top_srcdir)/src/hydra-queue-runner:$$PATH \
perl -w