Start of single-process hydra-queue-runner

This commit is contained in:
Eelco Dolstra 2015-05-28 17:39:29 +02:00
parent a91cbefda0
commit dc446c3980
10 changed files with 676 additions and 131 deletions

View file

@ -73,6 +73,7 @@ AC_CONFIG_FILES([
doc/manual/Makefile
src/Makefile
src/hydra-eval-jobs/Makefile
src/hydra-queue-runner/Makefile
src/sql/Makefile
src/xsl/Makefile
src/ttf/Makefile

View file

@ -129,7 +129,7 @@ in rec {
src = tarball;
buildInputs =
[ makeWrapper libtool unzip nukeReferences pkgconfig sqlite
[ makeWrapper libtool unzip nukeReferences pkgconfig sqlite libpqxx
gitAndTools.topGit mercurial darcs subversion bazaar openssl bzip2
guile # optional, for Guile + Guix support
perlDeps perl

View file

@ -1,3 +1,3 @@
SUBDIRS = hydra-eval-jobs sql script lib root xsl ttf
SUBDIRS = hydra-eval-jobs hydra-queue-runner sql script lib root xsl ttf
BOOTCLEAN_SUBDIRS = $(SUBDIRS)
DIST_SUBDIRS = $(SUBDIRS)

View file

@ -0,0 +1,6 @@
bin_PROGRAMS = hydra-queue-runner
hydra_queue_runner_SOURCES = hydra-queue-runner.cc build-result.cc
hydra_queue_runner_LDADD = $(NIX_LIBS) -lpqxx
AM_CXXFLAGS = $(NIX_CFLAGS) -Wall

View file

@ -0,0 +1,112 @@
#include "build-result.hh"
#include "store-api.hh"
#include "misc.hh"
#include "util.hh"
using namespace nix;
BuildResult getBuildResult(const Derivation & drv)
{
BuildResult res;
/* Compute the closure size. */
PathSet outputs;
for (auto & output : drv.outputs)
outputs.insert(output.second.path);
PathSet closure;
for (auto & output : outputs)
computeFSClosure(*store, output, closure);
for (auto & path : closure) {
auto info = store->queryPathInfo(path);
res.closureSize += info.narSize;
if (outputs.find(path) != outputs.end()) res.size += info.narSize;
}
/* Get build products. */
bool explicitProducts = false;
for (auto & output : outputs) {
Path productsFile = output + "/nix-support/hydra-build-products";
if (!pathExists(productsFile)) continue;
explicitProducts = true;
/* For security, resolve symlinks. */
productsFile = canonPath(productsFile, true);
if (!isInStore(productsFile)) continue;
// FIXME: handle I/O errors
auto contents = readFile(productsFile);
auto lines = tokenizeString<Strings>(contents, "\n");
for (auto & line : lines) {
BuildProduct product;
auto words = tokenizeString<Strings>(line);
if (words.size() < 3) continue;
product.type = words.front(); words.pop_front();
product.subtype = words.front(); words.pop_front();
if (string(words.front(), 0, 1) == "\"") {
// FIXME:
throw Error("FIXME");
} else {
product.path = words.front(); words.pop_front();
}
product.defaultPath = words.empty() ? "" : words.front();
/* Ensure that the path exists and points into the
Nix store. */
if (product.path == "" || product.path[0] != '/') continue;
product.path = canonPath(product.path, true);
if (!isInStore(product.path) || !pathExists(product.path)) continue;
/* FIXME: check that the path is in the input closure
of the build? */
product.name = product.path == output ? "" : baseNameOf(product.path);
struct stat st;
if (stat(product.path.c_str(), &st))
throw SysError(format("getting status of %1%") % product.path);
if (S_ISREG(st.st_mode)) {
product.isRegular = true;
product.fileSize = st.st_size;
product.sha1hash = hashFile(htSHA1, product.path);
product.sha256hash = hashFile(htSHA256, product.path);
}
res.products.push_back(product);
}
}
/* If no build products were explicitly declared, then add all
outputs as a product of type "nix-build". */
if (!explicitProducts) {
for (auto & output : drv.outputs) {
BuildProduct product;
product.path = output.second.path;
product.type = "nix-build";
product.subtype = output.first == "out" ? "" : output.first;
product.name = storePathToName(product.path);
struct stat st;
if (stat(product.path.c_str(), &st))
throw SysError(format("getting status of %1%") % product.path);
if (S_ISDIR(st.st_mode))
res.products.push_back(product);
}
}
/* Get the release name from $output/nix-support/hydra-release-name. */
for (auto & output : outputs) {
Path p = output + "/nix-support/hydra-release-name";
if (!pathExists(p)) continue;
// FIXME: handle I/O error
res.releaseName = trim(readFile(p));
// FIXME: validate release name
}
return res;
}

View file

@ -0,0 +1,25 @@
#pragma once
#include "hash.hh"
#include "derivations.hh"
struct BuildProduct
{
nix::Path path, defaultPath;
std::string type, subtype, name;
bool isRegular = false;
nix::Hash sha1hash, sha256hash;
off_t fileSize = 0;
BuildProduct() { }
};
struct BuildResult
{
std::string releaseName;
unsigned long long closureSize = 0, size = 0;
std::list<BuildProduct> products;
};
BuildResult getBuildResult(const nix::Derivation & drv);

View file

@ -0,0 +1,515 @@
#include <iostream>
#include <memory>
#include <map>
#include <pqxx/pqxx>
#include "build-result.hh"
#include "store-api.hh"
#include "derivations.hh"
#include "shared.hh"
#include "globals.hh"
using namespace nix;
typedef enum {
bsSuccess = 0,
bsFailed = 1,
bsDepFailed = 2,
bsAborted = 3,
bsFailedWithOutput = 6,
} BuildStatus;
typedef enum {
bssSuccess = 0,
bssFailed = 1,
bssAborted = 4,
bssBusy = 100, // not stored
} BuildStepStatus;
struct Connection : pqxx::connection
{
Connection() : pqxx::connection("dbname=hydra") { };
};
typedef unsigned int BuildID;
struct Build
{
typedef std::shared_ptr<Build> ptr;
typedef std::weak_ptr<Build> wptr;
BuildID id;
Path drvPath;
std::map<string, Path> outputs;
bool finishedInDB;
Build() : finishedInDB(false) { }
};
struct Step
{
typedef std::shared_ptr<Step> ptr;
typedef std::weak_ptr<Step> wptr;
Path drvPath;
Derivation drv;
/* The build steps on which this step depends. */
std::set<Step::ptr> deps;
/* The build steps that depend on this step. */
std::vector<Step::wptr> rdeps;
/* Builds that have this step as the top-level derivation. */
std::vector<Build::wptr> builds;
};
class State
{
private:
/* The queued builds. */
std::map<BuildID, Build::ptr> builds;
/* All active or pending build steps (i.e. dependencies of the
queued builds). */
std::map<Path, Step::ptr> steps;
/* Build steps that have no unbuilt dependencies. */
std::set<Step::ptr> runnable;
public:
State();
~State();
void markActiveBuildStepsAsAborted(pqxx::connection & conn, time_t stopTime);
int createBuildStep(pqxx::work & txn, time_t startTime, Build::ptr build, Step::ptr step,
BuildStepStatus status, const std::string & errorMsg = "", BuildID propagatedFrom = 0);
void finishBuildStep(pqxx::work & txn, time_t stopTime, BuildID buildId, int stepNr,
BuildStepStatus status, const string & errorMsg = "", BuildID propagatedFrom = 0);
void updateBuild(pqxx::work & txn, Build::ptr build, BuildStatus status);
void getQueuedBuilds(pqxx::connection & conn);
Step::ptr createStep(const Path & drvPath);
void destroyStep(Step::ptr step, bool proceed);
/* Get the builds that depend on the given step. */
std::set<Build::ptr> getDependentBuilds(Step::ptr step);
void doBuildSteps();
void doBuildStep(Step::ptr step);
void markSucceededBuild(pqxx::work & txn, Build::ptr build,
const BuildResult & res, bool isCachedBuild, time_t startTime, time_t stopTime);
};
State::State()
{
}
State::~State()
{
try {
Connection conn;
printMsg(lvlError, "clearing active build steps...");
markActiveBuildStepsAsAborted(conn, time(0));
} catch (...) {
ignoreException();
}
}
void State::markActiveBuildStepsAsAborted(pqxx::connection & conn, time_t stopTime)
{
pqxx::work txn(conn);
auto stm = txn.parameterized
("update BuildSteps set busy = 0, status = $1, stopTime = $2 where busy = 1")
((int) bssAborted);
if (stopTime) stm(stopTime); else stm();
stm.exec();
txn.commit();
}
int State::createBuildStep(pqxx::work & txn, time_t startTime, Build::ptr build, Step::ptr step,
BuildStepStatus status, const std::string & errorMsg, BuildID propagatedFrom)
{
auto res = txn.parameterized("select max(stepnr) from BuildSteps where build = $1")(build->id).exec();
int stepNr = res[0][0].is_null() ? 1 : res[0][0].as<int>() + 1;
auto stm = txn.parameterized
("insert into BuildSteps (build, stepnr, type, drvPath, busy, startTime, system, status, propagatedFrom, errorMsg, stopTime) values ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11)")
(build->id)(stepNr)(0)(step->drvPath)(status == bssBusy ? 1 : 0)(startTime)(step->drv.platform);
if (status == bssBusy) stm(); else stm((int) status);
if (propagatedFrom) stm(propagatedFrom); else stm();
if (errorMsg != "") stm(errorMsg); else stm();
if (status == bssBusy) stm(); else stm(startTime);
stm.exec();
for (auto & output : step->drv.outputs)
txn.parameterized
("insert into BuildStepOutputs (build, stepnr, name, path) values ($1, $2, $3, $4)")
(build->id)(stepNr)(output.first)(output.second.path).exec();
return stepNr;
}
void State::finishBuildStep(pqxx::work & txn, time_t stopTime, BuildID buildId, int stepNr,
BuildStepStatus status, const std::string & errorMsg, BuildID propagatedFrom)
{
auto stm = txn.parameterized
("update BuildSteps set busy = 0, status = $1, propagatedFrom = $4, errorMsg = $5, stopTime = $6 where build = $2 and stepnr = $3")
((int) status)(buildId)(stepNr);
if (propagatedFrom) stm(propagatedFrom); else stm();
if (errorMsg != "") stm(errorMsg); else stm();
if (stopTime) stm(stopTime); else stm();
stm.exec();
}
void State::getQueuedBuilds(pqxx::connection & conn)
{
pqxx::work txn(conn);
// FIXME: query only builds with ID higher than the previous
// highest.
auto res = txn.exec("select * from Builds where finished = 0");
// FIXME: don't process inside a txn.
for (auto const & row : res) {
BuildID id = row["id"].as<BuildID>();
if (builds.find(id) != builds.end()) continue;
Build::ptr build(new Build);
build->id = id;
build->drvPath = row["drvPath"].as<string>();
printMsg(lvlInfo, format("loading build %1% (%2%:%3%:%4%)") % id % row["project"] % row["jobset"] % row["job"]);
if (!store->isValidPath(build->drvPath)) {
/* Derivation has been GC'ed prematurely. */
Connection conn;
pqxx::work txn(conn);
txn.parameterized
("update Builds set finished = 1, buildStatus = $2, startTime = $3, stopTime = $3, errorMsg = $4 where id = $1")
(build->id)
((int) bsAborted)
(time(0))
("derivation was garbage-collected prior to build").exec();
txn.commit();
continue;
}
Step::ptr step = createStep(build->drvPath);
if (!step) {
Derivation drv = readDerivation(build->drvPath);
BuildResult res = getBuildResult(drv);
Connection conn;
pqxx::work txn(conn);
time_t now = time(0);
markSucceededBuild(txn, build, res, true, now, now);
txn.commit();
continue;
}
step->builds.push_back(build);
builds[id] = build;
}
}
Step::ptr State::createStep(const Path & drvPath)
{
auto prev = steps.find(drvPath);
if (prev != steps.end()) return prev->second;
printMsg(lvlInfo, format("considering derivation %1%") % drvPath);
Step::ptr step(new Step);
step->drvPath = drvPath;
step->drv = readDerivation(drvPath);
/* Are all outputs valid? */
bool valid = true;
for (auto & i : step->drv.outputs) {
if (!store->isValidPath(i.second.path)) {
valid = false;
break;
}
}
// FIXME: check whether all outputs are in the binary cache.
if (valid) return 0;
/* No, we need to build. */
printMsg(lvlInfo, format("creating build step %1%") % drvPath);
/* Create steps for the dependencies. */
for (auto & i : step->drv.inputDrvs) {
Step::ptr dep = createStep(i.first);
if (dep) {
step->deps.insert(dep);
dep->rdeps.push_back(step);
}
}
steps[drvPath] = step;
if (step->deps.empty()) runnable.insert(step);
return step;
}
void State::destroyStep(Step::ptr step, bool proceed)
{
steps.erase(step->drvPath);
for (auto & rdep_ : step->rdeps) {
auto rdep = rdep_.lock();
if (!rdep) continue;
assert(rdep->deps.find(step) != rdep->deps.end());
rdep->deps.erase(step);
if (proceed) {
/* If this rdep has no other dependencies, then we can now
build it. */
if (rdep->deps.empty())
runnable.insert(rdep);
} else
/* If step failed, then delete all dependent steps as
well. */
destroyStep(rdep, false);
}
for (auto & build_ : step->builds) {
auto build = build_.lock();
if (!build) continue;
assert(build->drvPath == step->drvPath);
assert(build->finishedInDB);
}
}
std::set<Build::ptr> State::getDependentBuilds(Step::ptr step)
{
std::set<Step::ptr> done;
std::set<Build::ptr> res;
std::function<void(Step::ptr)> visit;
visit = [&](Step::ptr step) {
if (done.find(step) != done.end()) return;
done.insert(step);
for (auto & build : step->builds) {
auto build2 = build.lock();
if (build2) res.insert(build2);
}
for (auto & rdep : step->rdeps) {
auto rdep2 = rdep.lock();
if (rdep2) visit(rdep2);
}
};
visit(step);
return res;
}
void State::doBuildSteps()
{
while (!runnable.empty()) {
printMsg(lvlInfo, format("%1% runnable steps") % runnable.size());
Step::ptr step = *runnable.begin();
runnable.erase(step);
doBuildStep(step);
}
}
void State::doBuildStep(Step::ptr step)
{
assert(step->deps.empty());
/* There can be any number of builds in the database that depend
on this derivation. Arbitrarily pick one (though preferring
those build of which this is the top-level derivation) for the
purpose of creating build steps. We could create a build step
record for every build, but that could be very expensive
(e.g. a stdenv derivation can be a dependency of tens of
thousands of builds), so we don't. */
Build::ptr build;
auto builds = getDependentBuilds(step);
if (builds.empty()) {
/* Apparently all builds that depend on this derivation are
gone (e.g. cancelled). So don't bother. */
printMsg(lvlInfo, format("cancelling build step %1%") % step->drvPath);
destroyStep(step, true);
return;
}
for (auto build2 : builds)
if (build2->drvPath == step->drvPath) { build = build2; break; }
if (!build) build = *builds.begin();
printMsg(lvlInfo, format("performing build step %1% (needed by %2% builds)") % step->drvPath % builds.size());
/* Create a build step record indicating that we started
building. */
Connection conn;
time_t startTime = time(0);
int stepNr;
{
pqxx::work txn(conn);
stepNr = createBuildStep(txn, startTime, build, step, bssBusy);
txn.commit();
}
bool success = false;
std::string errorMsg;
try {
store->buildPaths(PathSet({step->drvPath}));
success = true;
} catch (Error & e) {
errorMsg = e.msg();
}
time_t stopTime = time(0);
BuildResult res;
if (success) res = getBuildResult(step->drv);
// FIXME: handle failed-with-output
// FIXME: handle new builds having been added in the meantime.
{
pqxx::work txn(conn);
if (success) {
finishBuildStep(txn, stopTime, build->id, stepNr, bssSuccess);
/* Mark all builds of which this derivation is the top
level as succeeded. */
for (auto build2_ : step->builds) {
auto build2 = build2_.lock();
if (!build2) continue;
markSucceededBuild(txn, build2, res, false, startTime, stopTime);
}
} else {
/* Create failed build steps for every build that depends
on this. */
finishBuildStep(txn, stopTime, build->id, stepNr, bssFailed, errorMsg);
for (auto build2 : builds) {
if (build == build2) continue;
createBuildStep(txn, stopTime, build2, step, bssFailed, errorMsg, build->id);
}
/* Mark all builds that depend on this derivation as failed. */
for (auto build2 : builds) {
txn.parameterized
("update Builds set finished = 1, isCachedBuild = 0, buildStatus = $2, startTime = $3, stopTime = $4 where id = $1")
(build2->id)
((int) (build2->drvPath == step->drvPath ? bsFailed : bsDepFailed))
(startTime)
(stopTime).exec();
build2->finishedInDB = true; // FIXME: txn might fail
}
}
txn.commit();
}
/* Remove the build step from the graph. */
destroyStep(step, success);
}
void State::markSucceededBuild(pqxx::work & txn, Build::ptr build,
const BuildResult & res, bool isCachedBuild, time_t startTime, time_t stopTime)
{
auto stm = txn.parameterized
("update Builds set finished = 1, buildStatus = $2, startTime = $3, stopTime = $4, size = $5, closureSize = $6, releaseName = $7, isCachedBuild = $8 where id = $1")
(build->id)
((int) bsSuccess)
(startTime)
(stopTime)
(res.size)
(res.closureSize);
if (res.releaseName != "") stm(res.releaseName); else stm();
stm(isCachedBuild ? 1 : 0);
stm.exec();
unsigned int productNr = 1;
for (auto & product : res.products) {
auto stm = txn.parameterized
("insert into BuildProducts (build, productnr, type, subtype, fileSize, sha1hash, sha256hash, path, name, defaultPath) values ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10)")
(build->id)
(productNr++)
(product.type)
(product.subtype);
if (product.isRegular) stm(product.fileSize); else stm();
if (product.isRegular) stm(printHash(product.sha1hash)); else stm();
if (product.isRegular) stm(printHash(product.sha256hash)); else stm();
stm
(product.path)
(product.name)
(product.defaultPath).exec();
}
build->finishedInDB = true; // FIXME: txn might fail
}
int main(int argc, char * * argv)
{
return handleExceptions(argv[0], [&]() {
initNix();
settings.buildVerbosity = lvlVomit;
settings.useSubstitutes = false;
store = openStore();
/* FIXME: need some locking to prevent multiple instances of
hydra-queue-runner. */
Connection conn;
State state;
state.markActiveBuildStepsAsAborted(conn, 0);
state.getQueuedBuilds(conn);
state.doBuildSteps();
});
}

View file

@ -22,20 +22,10 @@ use Hydra::Helper::CatalystUtils;
our @ISA = qw(Exporter);
our @EXPORT = qw(
fetchInput evalJobs checkBuild inputsToArgs
getReleaseName addBuildProducts restartBuild
getPrevJobsetEval
restartBuild getPrevJobsetEval
);
sub getReleaseName {
my ($outPath) = @_;
return undef unless -f "$outPath/nix-support/hydra-release-name";
my $releaseName = read_file("$outPath/nix-support/hydra-release-name");
chomp $releaseName;
return $releaseName;
}
sub parseJobName {
# Parse a job specification of the form `<project>:<jobset>:<job>
# [attrs]'. The project, jobset and attrs may be omitted. The
@ -355,80 +345,6 @@ sub evalJobs {
}
sub addBuildProducts {
my ($db, $build) = @_;
my $productnr = 1;
my $explicitProducts = 0;
my $storeDir = $Nix::Config::storeDir . "/";
foreach my $output ($build->buildoutputs->all) {
my $outPath = $output->path;
if (-e "$outPath/nix-support/hydra-build-products") {
$explicitProducts = 1;
open LIST, "$outPath/nix-support/hydra-build-products" or die;
while (<LIST>) {
/^([\w\-]+)\s+([\w\-]+)\s+("[^"]*"|\S+)(\s+(\S+))?$/ or next;
my $type = $1;
my $subtype = $2 eq "none" ? "" : $2;
my $path = substr($3, 0, 1) eq "\"" ? substr($3, 1, -1) : $3;
my $defaultPath = $5;
# Ensure that the path exists and points into the Nix store.
next unless File::Spec->file_name_is_absolute($path);
$path = pathIsInsidePrefix($path, $Nix::Config::storeDir);
next unless defined $path;
next unless -e $path;
# FIXME: check that the path is in the input closure
# of the build?
my $fileSize, my $sha1, my $sha256;
if (-f $path) {
my $st = stat($path) or die "cannot stat $path: $!";
$fileSize = $st->size;
$sha1 = hashFile("sha1", 0, $path);
$sha256 = hashFile("sha256", 0, $path);
}
my $name = $path eq $outPath ? "" : basename $path;
$db->resultset('BuildProducts')->create(
{ build => $build->id
, productnr => $productnr++
, type => $type
, subtype => $subtype
, path => $path
, filesize => $fileSize
, sha1hash => $sha1
, sha256hash => $sha256
, name => $name
, defaultpath => $defaultPath
});
}
close LIST;
}
}
return if $explicitProducts;
foreach my $output ($build->buildoutputs->all) {
my $outPath = $output->path;
next unless -d $outPath;
$db->resultset('BuildProducts')->create(
{ build => $build->id
, productnr => $productnr++
, type => "nix-build"
, subtype => $output->name eq "out" ? "" : $output->name
, path => $outPath
, name => $build->nixname
});
}
}
# Return the most recent evaluation of the given jobset (that
# optionally had new builds), or undefined if no such evaluation
# exists.
@ -501,40 +417,6 @@ sub checkBuild {
my $time = time();
# Are the outputs already in the Nix store? Then add a cached
# build.
my %extraFlags;
my $allValid = 1;
my $buildStatus;
my $releaseName;
foreach my $name (@outputNames) {
my $path = $buildInfo->{outputs}->{$name};
if (isValidPath($path)) {
if (-f "$path/nix-support/failed") {
$buildStatus = 6;
} else {
$buildStatus //= 0;
}
$releaseName //= getReleaseName($path);
} else {
$allValid = 0;
last;
}
}
if ($allValid) {
%extraFlags =
( finished => 1
, iscachedbuild => 1
, buildstatus => $buildStatus
, starttime => $time
, stoptime => $time
, releasename => $releaseName
);
} else {
%extraFlags = ( finished => 0 );
}
# Add the build to the database.
$build = $job->builds->create(
{ timestamp => $time
@ -550,10 +432,10 @@ sub checkBuild {
, nixexprinput => $jobset->nixexprinput
, nixexprpath => $jobset->nixexprpath
, priority => $buildInfo->{schedulingPriority}
, finished => 0
, busy => 0
, locker => ""
, iscurrent => 1
, %extraFlags
});
$build->buildoutputs->create({ name => $_, path => $buildInfo->{outputs}->{$_} })
@ -562,13 +444,7 @@ sub checkBuild {
$buildMap->{$build->id} = { id => $build->id, jobName => $jobName, new => 1, drvPath => $drvPath };
$$jobOutPathMap{$jobName . "\t" . $firstOutputPath} = $build->id;
if ($build->iscachedbuild) {
#print STDERR " marked as cached build ", $build->id, "\n";
addBuildProducts($db, $build);
notifyBuildFinished($plugins, $build, []);
} else {
print STDERR "added build ${\$build->id} (${\$jobset->project->name}:${\$jobset->name}:$jobName)\n";
}
});
return $build;

View file

@ -33,7 +33,11 @@
</td>
<td>
[% IF step.busy == 0;
IF step.stoptime;
INCLUDE renderDuration duration = step.stoptime - step.starttime;
ELSE;
%]?[%
END;
ELSIF build.finished;
INCLUDE renderDuration duration = build.stoptime - step.starttime;
ELSE;
@ -52,8 +56,10 @@
<span class="error">Timed out</span>
[% ELSIF step.status == 8 %]
<span class="error">Cached failure</span>
[% ELSE %]
[% ELSIF step.errormsg %]
<span class="error">Failed: [% HTML.escape(step.errormsg) %]</span>
[% ELSE %]
<span class="error">Failed</span>
[% END %]
[%%] [%+ IF has_log; INCLUDE renderLogLinks url=log inRow=1; END %]
[%+ IF step.propagatedfrom; %](propagated from [% INCLUDE renderBuildIdLink id=step.propagatedfrom.get_column('id') %])[% END %]

View file

@ -159,11 +159,13 @@ create table Builds (
-- Information about scheduled builds.
priority integer not null default 0,
-- FIXME: remove (obsolete with the new queue runner)
busy integer not null default 0, -- true means someone is building this job now
locker text, -- !!! hostname/pid of the process building this job?
logfile text, -- if busy, the path of the logfile
-- FIXME: remove startTime?
startTime integer, -- if busy/finished, time we started
stopTime integer, -- if finished, time we finished
@ -207,6 +209,8 @@ create table BuildOutputs (
);
-- TODO: normalize this. Currently there can be multiple BuildSteps
-- for a single step.
create table BuildSteps (
build integer not null,
stepnr integer not null,