treewide: clang-tidy modernize
This commit is contained in:
parent
d3fcedbcf5
commit
fd7fd0ad65
|
@ -1,8 +1,12 @@
|
|||
UseColor: true
|
||||
Checks:
|
||||
- -*
|
||||
|
||||
- bugprone-*
|
||||
# kind of nonsense
|
||||
- -bugprone-easily-swappable-parameters
|
||||
# many warnings due to not recognizing `assert` properly
|
||||
- -bugprone-unchecked-optional-access
|
||||
|
||||
- modernize-*
|
||||
- -modernize-use-trailing-return-type
|
||||
|
|
|
@ -14,11 +14,12 @@
|
|||
#include <sys/wait.h>
|
||||
|
||||
#include <boost/format.hpp>
|
||||
#include <utility>
|
||||
|
||||
using namespace nix;
|
||||
using boost::format;
|
||||
|
||||
typedef std::pair<std::string, std::string> JobsetName;
|
||||
using JobsetName = std::pair<std::string, std::string>;
|
||||
|
||||
class JobsetId {
|
||||
public:
|
||||
|
@ -28,8 +29,8 @@ class JobsetId {
|
|||
int id;
|
||||
|
||||
|
||||
JobsetId(const std::string & project, const std::string & jobset, int id)
|
||||
: project{ project }, jobset{ jobset }, id{ id }
|
||||
JobsetId(std::string project, std::string jobset, int id)
|
||||
: project{std::move( project )}, jobset{std::move( jobset )}, id{ id }
|
||||
{
|
||||
}
|
||||
|
||||
|
@ -41,7 +42,7 @@ class JobsetId {
|
|||
friend bool operator== (const JobsetId & lhs, const JobsetName & rhs);
|
||||
friend bool operator!= (const JobsetId & lhs, const JobsetName & rhs);
|
||||
|
||||
std::string display() const {
|
||||
[[nodiscard]] std::string display() const {
|
||||
return str(format("%1%:%2% (jobset#%3%)") % project % jobset % id);
|
||||
}
|
||||
};
|
||||
|
@ -92,7 +93,7 @@ struct Evaluator
|
|||
Pid pid;
|
||||
};
|
||||
|
||||
typedef std::map<JobsetId, Jobset> Jobsets;
|
||||
using Jobsets = std::map<JobsetId, Jobset>;
|
||||
|
||||
std::optional<JobsetName> evalOne;
|
||||
|
||||
|
@ -138,7 +139,7 @@ struct Evaluator
|
|||
|
||||
if (evalOne && name != *evalOne) continue;
|
||||
|
||||
auto res = state->jobsets.try_emplace(name, Jobset{name});
|
||||
auto res = state->jobsets.try_emplace(name, Jobset{.name=name});
|
||||
|
||||
auto & jobset = res.first->second;
|
||||
jobset.lastCheckedTime = row["lastCheckedTime"].as<time_t>(0);
|
||||
|
@ -180,7 +181,7 @@ struct Evaluator
|
|||
|
||||
void startEval(State & state, Jobset & jobset)
|
||||
{
|
||||
time_t now = time(0);
|
||||
time_t now = time(nullptr);
|
||||
|
||||
printInfo("starting evaluation of jobset ‘%s’ (last checked %d s ago)",
|
||||
jobset.name.display(),
|
||||
|
@ -233,7 +234,7 @@ struct Evaluator
|
|||
return false;
|
||||
}
|
||||
|
||||
if (jobset.lastCheckedTime + jobset.checkInterval <= time(0)) {
|
||||
if (jobset.lastCheckedTime + jobset.checkInterval <= time(nullptr)) {
|
||||
// Time to schedule a fresh evaluation. If the jobset
|
||||
// is a ONE_AT_A_TIME jobset, ensure the previous jobset
|
||||
// has no remaining, unfinished work.
|
||||
|
@ -306,7 +307,7 @@ struct Evaluator
|
|||
|
||||
/* Put jobsets in order of ascending trigger time, last checked
|
||||
time, and name. */
|
||||
std::sort(sorted.begin(), sorted.end(),
|
||||
std::ranges::sort(sorted,
|
||||
[](const Jobsets::iterator & a, const Jobsets::iterator & b) {
|
||||
return
|
||||
a->second.triggerTime != b->second.triggerTime
|
||||
|
@ -329,7 +330,7 @@ struct Evaluator
|
|||
|
||||
while (true) {
|
||||
|
||||
time_t now = time(0);
|
||||
time_t now = time(nullptr);
|
||||
|
||||
std::chrono::seconds sleepTime = std::chrono::seconds::max();
|
||||
|
||||
|
@ -416,7 +417,7 @@ struct Evaluator
|
|||
printInfo("evaluation of jobset ‘%s’ %s",
|
||||
jobset.name.display(), statusToString(status));
|
||||
|
||||
auto now = time(0);
|
||||
auto now = time(nullptr);
|
||||
|
||||
jobset.triggerTime = notTriggered;
|
||||
jobset.lastCheckedTime = now;
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
#include <algorithm>
|
||||
#include <cmath>
|
||||
#include <ranges>
|
||||
|
||||
#include <sys/types.h>
|
||||
#include <sys/stat.h>
|
||||
|
@ -134,8 +135,8 @@ static void copyClosureTo(
|
|||
auto sorted = destStore.topoSortPaths(closure);
|
||||
|
||||
StorePathSet missing;
|
||||
for (auto i = sorted.rbegin(); i != sorted.rend(); ++i)
|
||||
if (!present.count(*i)) missing.insert(*i);
|
||||
for (auto & i : std::ranges::reverse_view(sorted))
|
||||
if (!present.count(i)) missing.insert(i);
|
||||
|
||||
printMsg(lvlDebug, "sending %d missing paths", missing.size());
|
||||
|
||||
|
@ -305,12 +306,12 @@ static BuildResult performBuild(
|
|||
|
||||
time_t startTime, stopTime;
|
||||
|
||||
startTime = time(0);
|
||||
startTime = time(nullptr);
|
||||
{
|
||||
MaintainCount<counter> mc(nrStepsBuilding);
|
||||
result = ServeProto::Serialise<BuildResult>::read(localStore, conn);
|
||||
}
|
||||
stopTime = time(0);
|
||||
stopTime = time(nullptr);
|
||||
|
||||
if (!result.startTime) {
|
||||
// If the builder gave `startTime = 0`, use our measurements
|
||||
|
@ -339,10 +340,10 @@ static BuildResult performBuild(
|
|||
// were known
|
||||
assert(outputPath);
|
||||
auto outputHash = outputHashes.at(outputName);
|
||||
auto drvOutput = DrvOutput { outputHash, outputName };
|
||||
auto drvOutput = DrvOutput { .drvHash=outputHash, .outputName=outputName };
|
||||
result.builtOutputs.insert_or_assign(
|
||||
std::move(outputName),
|
||||
Realisation { drvOutput, *outputPath });
|
||||
Realisation { .id=drvOutput, .outPath=*outputPath });
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -635,7 +636,7 @@ void State::buildRemote(ref<Store> destStore,
|
|||
* copying outputs and we end up building too many things that we
|
||||
* haven't been able to allow copy slots for. */
|
||||
assert(reservation.unique());
|
||||
reservation = 0;
|
||||
reservation = nullptr;
|
||||
wakeDispatcher();
|
||||
|
||||
StorePathSet outputs;
|
||||
|
|
|
@ -58,7 +58,7 @@ void State::builder(MachineReservation::ptr reservation)
|
|||
/* If the machine hasn't been released yet, release and wake up the dispatcher. */
|
||||
if (reservation) {
|
||||
assert(reservation.unique());
|
||||
reservation = 0;
|
||||
reservation = nullptr;
|
||||
wakeDispatcher();
|
||||
}
|
||||
|
||||
|
@ -190,7 +190,7 @@ State::StepResult State::doBuildStep(nix::ref<Store> destStore,
|
|||
}
|
||||
});
|
||||
|
||||
time_t stepStartTime = result.startTime = time(0);
|
||||
time_t stepStartTime = result.startTime = time(nullptr);
|
||||
|
||||
/* If any of the outputs have previously failed, then don't bother
|
||||
building again. */
|
||||
|
@ -237,7 +237,7 @@ State::StepResult State::doBuildStep(nix::ref<Store> destStore,
|
|||
}
|
||||
}
|
||||
|
||||
time_t stepStopTime = time(0);
|
||||
time_t stepStopTime = time(nullptr);
|
||||
if (!result.stopTime) result.stopTime = stepStopTime;
|
||||
|
||||
/* For standard failures, we don't care about the error
|
||||
|
|
|
@ -190,7 +190,7 @@ system_time State::doDispatch()
|
|||
}
|
||||
}
|
||||
|
||||
sort(runnableSorted.begin(), runnableSorted.end(),
|
||||
std::ranges::sort(runnableSorted,
|
||||
[](const StepInfo & a, const StepInfo & b)
|
||||
{
|
||||
return
|
||||
|
@ -240,7 +240,7 @@ system_time State::doDispatch()
|
|||
- Then by speed factor.
|
||||
|
||||
- Finally by load. */
|
||||
sort(machinesSorted.begin(), machinesSorted.end(),
|
||||
std::ranges::sort(machinesSorted,
|
||||
[](const MachineInfo & a, const MachineInfo & b) -> bool
|
||||
{
|
||||
float ta = std::round(static_cast<float>(a.currentJobs) / a.machine->speedFactorFloat);
|
||||
|
@ -345,7 +345,7 @@ void State::abortUnsupported()
|
|||
auto machines2 = *machines.lock();
|
||||
|
||||
system_time now = std::chrono::system_clock::now();
|
||||
auto now2 = time(0);
|
||||
auto now2 = time(nullptr);
|
||||
|
||||
std::unordered_set<Step::ptr> aborted;
|
||||
|
||||
|
@ -436,7 +436,7 @@ void Jobset::addStep(time_t startTime, time_t duration)
|
|||
|
||||
void Jobset::pruneSteps()
|
||||
{
|
||||
time_t now = time(0);
|
||||
time_t now = time(nullptr);
|
||||
auto steps_(steps.lock());
|
||||
while (!steps_->empty()) {
|
||||
auto i = steps_->begin();
|
||||
|
@ -464,7 +464,7 @@ State::MachineReservation::~MachineReservation()
|
|||
auto prev = machine->state->currentJobs--;
|
||||
assert(prev);
|
||||
if (prev == 1)
|
||||
machine->state->idleSince = time(0);
|
||||
machine->state->idleSince = time(nullptr);
|
||||
|
||||
{
|
||||
auto machineTypes_(state.machineTypes.lock());
|
||||
|
|
|
@ -14,7 +14,7 @@ struct BuildProduct
|
|||
bool isRegular = false;
|
||||
std::optional<nix::Hash> sha256hash;
|
||||
std::optional<off_t> fileSize;
|
||||
BuildProduct() { }
|
||||
BuildProduct() = default;
|
||||
};
|
||||
|
||||
struct BuildMetric
|
||||
|
|
|
@ -138,7 +138,7 @@ nix::MaintainCount<counter> State::startDbUpdate()
|
|||
{
|
||||
if (nrActiveDbUpdates > 6)
|
||||
printError("warning: %d concurrent database updates; PostgreSQL may be stalled", nrActiveDbUpdates.load());
|
||||
return MaintainCount<counter>(nrActiveDbUpdates);
|
||||
return {nrActiveDbUpdates};
|
||||
}
|
||||
|
||||
|
||||
|
@ -171,7 +171,7 @@ void State::parseMachines(const std::string & contents)
|
|||
for (auto & f : mandatoryFeatures)
|
||||
supportedFeatures.insert(f);
|
||||
|
||||
using MaxJobs = std::remove_const<decltype(nix::Machine::maxJobs)>::type;
|
||||
using MaxJobs = std::remove_const_t<decltype(nix::Machine::maxJobs)>;
|
||||
|
||||
auto machine = std::make_shared<::Machine>(::Machine {{
|
||||
// `storeUri`, not yet used
|
||||
|
@ -594,7 +594,7 @@ std::shared_ptr<PathLocks> State::acquireGlobalLock()
|
|||
createDirs(dirOf(lockPath));
|
||||
|
||||
auto lock = std::make_shared<PathLocks>();
|
||||
if (!lock->lockPaths(PathSet({lockPath}), "", false)) return 0;
|
||||
if (!lock->lockPaths(PathSet({lockPath}), "", false)) return nullptr;
|
||||
|
||||
return lock;
|
||||
}
|
||||
|
@ -602,10 +602,10 @@ std::shared_ptr<PathLocks> State::acquireGlobalLock()
|
|||
|
||||
void State::dumpStatus(Connection & conn)
|
||||
{
|
||||
time_t now = time(0);
|
||||
time_t now = time(nullptr);
|
||||
json statusJson = {
|
||||
{"status", "up"},
|
||||
{"time", time(0)},
|
||||
{"time", time(nullptr)},
|
||||
{"uptime", now - startedAt},
|
||||
{"pid", getpid()},
|
||||
|
||||
|
@ -706,7 +706,7 @@ void State::dumpStatus(Connection & conn)
|
|||
};
|
||||
if (i.second.runnable > 0)
|
||||
machineTypeJson["waitTime"] = i.second.waitTime.count() +
|
||||
i.second.runnable * (time(0) - lastDispatcherCheck);
|
||||
i.second.runnable * (time(nullptr) - lastDispatcherCheck);
|
||||
if (i.second.running == 0)
|
||||
machineTypeJson["lastActive"] = std::chrono::system_clock::to_time_t(i.second.lastActive);
|
||||
}
|
||||
|
@ -848,7 +848,7 @@ void State::run(BuildID buildOne)
|
|||
/* Can't be bothered to shut down cleanly. Goodbye! */
|
||||
auto callback = createInterruptCallback([&]() { std::_Exit(0); });
|
||||
|
||||
startedAt = time(0);
|
||||
startedAt = time(nullptr);
|
||||
this->buildOne = buildOne;
|
||||
|
||||
auto lock = acquireGlobalLock();
|
||||
|
|
|
@ -3,6 +3,7 @@
|
|||
#include "archive.hh"
|
||||
|
||||
#include <unordered_set>
|
||||
#include <utility>
|
||||
|
||||
using namespace nix;
|
||||
|
||||
|
@ -18,8 +19,8 @@ struct Extractor : ParseSink
|
|||
NarMemberData * curMember = nullptr;
|
||||
Path prefix;
|
||||
|
||||
Extractor(NarMemberDatas & members, const Path & prefix)
|
||||
: members(members), prefix(prefix)
|
||||
Extractor(NarMemberDatas & members, Path prefix)
|
||||
: members(members), prefix(std::move(prefix))
|
||||
{ }
|
||||
|
||||
void createDirectory(const Path & path) override
|
||||
|
|
|
@ -13,7 +13,7 @@ struct NarMemberData
|
|||
std::optional<nix::Hash> sha256;
|
||||
};
|
||||
|
||||
typedef std::map<nix::Path, NarMemberData> NarMemberDatas;
|
||||
using NarMemberDatas = std::map<nix::Path, NarMemberData>;
|
||||
|
||||
/* Read a NAR from a source and get to some info about every file
|
||||
inside the NAR. */
|
||||
|
|
|
@ -4,7 +4,8 @@
|
|||
#include "thread-pool.hh"
|
||||
|
||||
#include <cstring>
|
||||
#include <signal.h>
|
||||
#include <utility>
|
||||
#include <csignal>
|
||||
|
||||
using namespace nix;
|
||||
|
||||
|
@ -88,7 +89,7 @@ void State::queueMonitorLoop(Connection & conn)
|
|||
|
||||
struct PreviousFailure : public std::exception {
|
||||
Step::ptr step;
|
||||
PreviousFailure(Step::ptr step) : step(step) { }
|
||||
PreviousFailure(Step::ptr step) : step(std::move(step)) { }
|
||||
};
|
||||
|
||||
|
||||
|
@ -117,7 +118,7 @@ bool State::getQueuedBuilds(Connection & conn,
|
|||
|
||||
for (auto const & row : res) {
|
||||
auto builds_(builds.lock());
|
||||
BuildID id = row["id"].as<BuildID>();
|
||||
auto id = row["id"].as<BuildID>();
|
||||
if (buildOne && id != buildOne) continue;
|
||||
if (builds_->count(id)) continue;
|
||||
|
||||
|
@ -137,7 +138,7 @@ bool State::getQueuedBuilds(Connection & conn,
|
|||
|
||||
newIDs.push_back(id);
|
||||
newBuildsByID[id] = build;
|
||||
newBuildsByPath.emplace(std::make_pair(build->drvPath, id));
|
||||
newBuildsByPath.emplace(build->drvPath, id);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -162,7 +163,7 @@ bool State::getQueuedBuilds(Connection & conn,
|
|||
("update Builds set finished = 1, buildStatus = $2, startTime = $3, stopTime = $3 where id = $1 and finished = 0",
|
||||
build->id,
|
||||
(int) bsAborted,
|
||||
time(0));
|
||||
time(nullptr));
|
||||
txn.commit();
|
||||
build->finishedInDB = true;
|
||||
nrBuildsDone++;
|
||||
|
@ -176,7 +177,7 @@ bool State::getQueuedBuilds(Connection & conn,
|
|||
/* Create steps for this derivation and its dependencies. */
|
||||
try {
|
||||
step = createStep(destStore, conn, build, build->drvPath,
|
||||
build, 0, finishedDrvs, newSteps, newRunnable);
|
||||
build, nullptr, finishedDrvs, newSteps, newRunnable);
|
||||
} catch (PreviousFailure & ex) {
|
||||
|
||||
/* Some step previously failed, so mark the build as
|
||||
|
@ -221,7 +222,7 @@ bool State::getQueuedBuilds(Connection & conn,
|
|||
"where id = $1 and finished = 0",
|
||||
build->id,
|
||||
(int) (ex.step->drvPath == build->drvPath ? bsFailed : bsDepFailed),
|
||||
time(0));
|
||||
time(nullptr));
|
||||
notifyBuildFinished(txn, build->id, {});
|
||||
txn.commit();
|
||||
build->finishedInDB = true;
|
||||
|
@ -254,7 +255,7 @@ bool State::getQueuedBuilds(Connection & conn,
|
|||
{
|
||||
auto mc = startDbUpdate();
|
||||
pqxx::work txn(conn);
|
||||
time_t now = time(0);
|
||||
time_t now = time(nullptr);
|
||||
if (!buildOneDone && build->id == buildOne) buildOneDone = true;
|
||||
printMsg(lvlInfo, "marking build %1% as succeeded (cached)", build->id);
|
||||
markSucceededBuild(txn, build, res, true, now, now);
|
||||
|
@ -438,7 +439,7 @@ Step::ptr State::createStep(ref<Store> destStore,
|
|||
Build::ptr referringBuild, Step::ptr referringStep, std::set<StorePath> & finishedDrvs,
|
||||
std::set<Step::ptr> & newSteps, std::set<Step::ptr> & newRunnable)
|
||||
{
|
||||
if (finishedDrvs.find(drvPath) != finishedDrvs.end()) return 0;
|
||||
if (finishedDrvs.find(drvPath) != finishedDrvs.end()) return nullptr;
|
||||
|
||||
/* Check if the requested step already exists. If not, create a
|
||||
new step. In any case, make the step reachable from
|
||||
|
@ -516,7 +517,7 @@ Step::ptr State::createStep(ref<Store> destStore,
|
|||
std::map<DrvOutput, std::optional<StorePath>> paths;
|
||||
for (auto & [outputName, maybeOutputPath] : destStore->queryPartialDerivationOutputMap(drvPath, &*localStore)) {
|
||||
auto outputHash = outputHashes.at(outputName);
|
||||
paths.insert({{outputHash, outputName}, maybeOutputPath});
|
||||
paths.insert({{.drvHash=outputHash, .outputName=outputName}, maybeOutputPath});
|
||||
}
|
||||
|
||||
auto missing = getMissingRemotePaths(destStore, paths);
|
||||
|
@ -560,7 +561,7 @@ Step::ptr State::createStep(ref<Store> destStore,
|
|||
auto & path = *pathOpt;
|
||||
|
||||
try {
|
||||
time_t startTime = time(0);
|
||||
time_t startTime = time(nullptr);
|
||||
|
||||
if (localStore->isValidPath(path))
|
||||
printInfo("copying output ‘%1%’ of ‘%2%’ from local store",
|
||||
|
@ -578,7 +579,7 @@ Step::ptr State::createStep(ref<Store> destStore,
|
|||
StorePathSet { path },
|
||||
NoRepair, CheckSigs, NoSubstitute);
|
||||
|
||||
time_t stopTime = time(0);
|
||||
time_t stopTime = time(nullptr);
|
||||
|
||||
{
|
||||
auto mc = startDbUpdate();
|
||||
|
@ -602,7 +603,7 @@ Step::ptr State::createStep(ref<Store> destStore,
|
|||
// FIXME: check whether all outputs are in the binary cache.
|
||||
if (valid) {
|
||||
finishedDrvs.insert(drvPath);
|
||||
return 0;
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
/* No, we need to build. */
|
||||
|
@ -610,7 +611,7 @@ Step::ptr State::createStep(ref<Store> destStore,
|
|||
|
||||
/* Create steps for the dependencies. */
|
||||
for (auto & i : step->drv->inputDrvs.map) {
|
||||
auto dep = createStep(destStore, conn, build, i.first, 0, step, finishedDrvs, newSteps, newRunnable);
|
||||
auto dep = createStep(destStore, conn, build, i.first, nullptr, step, finishedDrvs, newSteps, newRunnable);
|
||||
if (dep) {
|
||||
auto step_(step->state.lock());
|
||||
step_->deps.insert(dep);
|
||||
|
@ -658,11 +659,11 @@ Jobset::ptr State::createJobset(pqxx::work & txn,
|
|||
auto res2 = txn.exec_params
|
||||
("select s.startTime, s.stopTime from BuildSteps s join Builds b on build = id "
|
||||
"where s.startTime is not null and s.stopTime > $1 and jobset_id = $2",
|
||||
time(0) - Jobset::schedulingWindow * 10,
|
||||
time(nullptr) - Jobset::schedulingWindow * 10,
|
||||
jobsetID);
|
||||
for (auto const & row : res2) {
|
||||
time_t startTime = row["startTime"].as<time_t>();
|
||||
time_t stopTime = row["stopTime"].as<time_t>();
|
||||
auto startTime = row["startTime"].as<time_t>();
|
||||
auto stopTime = row["stopTime"].as<time_t>();
|
||||
jobset->addStep(startTime, stopTime - startTime);
|
||||
}
|
||||
|
||||
|
@ -702,7 +703,7 @@ BuildOutput State::getBuildOutputCached(Connection & conn, nix::ref<nix::Store>
|
|||
"where finished = 1 and (buildStatus = 0 or buildStatus = 6) and path = $1",
|
||||
localStore->printStorePath(output));
|
||||
if (r.empty()) continue;
|
||||
BuildID id = r[0][0].as<BuildID>();
|
||||
auto id = r[0][0].as<BuildID>();
|
||||
|
||||
printInfo("reusing build %d", id);
|
||||
|
||||
|
|
|
@ -8,6 +8,7 @@
|
|||
#include <queue>
|
||||
#include <regex>
|
||||
#include <semaphore>
|
||||
#include <utility>
|
||||
|
||||
#include <prometheus/counter.h>
|
||||
#include <prometheus/gauge.h>
|
||||
|
@ -26,16 +27,16 @@
|
|||
#include "machines.hh"
|
||||
|
||||
|
||||
typedef unsigned int BuildID;
|
||||
using BuildID = unsigned int;
|
||||
|
||||
typedef unsigned int JobsetID;
|
||||
using JobsetID = unsigned int;
|
||||
|
||||
typedef std::chrono::time_point<std::chrono::system_clock> system_time;
|
||||
using system_time = std::chrono::time_point<std::chrono::system_clock>;
|
||||
|
||||
typedef std::atomic<unsigned long> counter;
|
||||
using counter = std::atomic<unsigned long>;
|
||||
|
||||
|
||||
typedef enum {
|
||||
enum BuildStatus {
|
||||
bsSuccess = 0,
|
||||
bsFailed = 1,
|
||||
bsDepFailed = 2, // builds only
|
||||
|
@ -49,10 +50,10 @@ typedef enum {
|
|||
bsNarSizeLimitExceeded = 11,
|
||||
bsNotDeterministic = 12,
|
||||
bsBusy = 100, // not stored
|
||||
} BuildStatus;
|
||||
};
|
||||
|
||||
|
||||
typedef enum {
|
||||
enum StepState {
|
||||
ssPreparing = 1,
|
||||
ssConnecting = 10,
|
||||
ssSendingInputs = 20,
|
||||
|
@ -60,7 +61,7 @@ typedef enum {
|
|||
ssWaitingForLocalSlot = 35,
|
||||
ssReceivingOutputs = 40,
|
||||
ssPostProcessing = 50,
|
||||
} StepState;
|
||||
};
|
||||
|
||||
|
||||
struct RemoteResult
|
||||
|
@ -78,7 +79,7 @@ struct RemoteResult
|
|||
unsigned int overhead = 0;
|
||||
nix::Path logFile;
|
||||
|
||||
BuildStatus buildStatus() const
|
||||
[[nodiscard]] BuildStatus buildStatus() const
|
||||
{
|
||||
return stepStatus == bsCachedFailure ? bsFailed : stepStatus;
|
||||
}
|
||||
|
@ -95,8 +96,8 @@ class Jobset
|
|||
{
|
||||
public:
|
||||
|
||||
typedef std::shared_ptr<Jobset> ptr;
|
||||
typedef std::weak_ptr<Jobset> wptr;
|
||||
using ptr = std::shared_ptr<Jobset>;
|
||||
using wptr = std::weak_ptr<Jobset>;
|
||||
|
||||
static const time_t schedulingWindow = static_cast<time_t>(24 * 60 * 60);
|
||||
|
||||
|
@ -131,8 +132,8 @@ public:
|
|||
|
||||
struct Build
|
||||
{
|
||||
typedef std::shared_ptr<Build> ptr;
|
||||
typedef std::weak_ptr<Build> wptr;
|
||||
using ptr = std::shared_ptr<Build>;
|
||||
using wptr = std::weak_ptr<Build>;
|
||||
|
||||
BuildID id;
|
||||
nix::StorePath drvPath;
|
||||
|
@ -163,8 +164,8 @@ struct Build
|
|||
|
||||
struct Step
|
||||
{
|
||||
typedef std::shared_ptr<Step> ptr;
|
||||
typedef std::weak_ptr<Step> wptr;
|
||||
using ptr = std::shared_ptr<Step>;
|
||||
using wptr = std::weak_ptr<Step>;
|
||||
|
||||
nix::StorePath drvPath;
|
||||
std::unique_ptr<nix::Derivation> drv;
|
||||
|
@ -221,13 +222,8 @@ struct Step
|
|||
|
||||
nix::Sync<State> state;
|
||||
|
||||
Step(const nix::StorePath & drvPath) : drvPath(drvPath)
|
||||
Step(nix::StorePath drvPath) : drvPath(std::move(drvPath))
|
||||
{ }
|
||||
|
||||
~Step()
|
||||
{
|
||||
//printMsg(lvlError, format("destroying step %1%") % drvPath);
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
|
@ -239,7 +235,7 @@ void visitDependencies(std::function<void(Step::ptr)> visitor, Step::ptr step);
|
|||
|
||||
struct Machine : nix::Machine
|
||||
{
|
||||
typedef std::shared_ptr<Machine> ptr;
|
||||
using ptr = std::shared_ptr<Machine>;
|
||||
|
||||
/* TODO Get rid of: `nix::Machine::storeUri` is normalized in a way
|
||||
we are not yet used to, but once we are, we don't need this. */
|
||||
|
@ -254,7 +250,7 @@ struct Machine : nix::Machine
|
|||
float speedFactorFloat = 1.0;
|
||||
|
||||
struct State {
|
||||
typedef std::shared_ptr<State> ptr;
|
||||
using ptr = std::shared_ptr<State>;
|
||||
counter currentJobs{0};
|
||||
counter nrStepsDone{0};
|
||||
counter totalStepTime{0}; // total time for steps, including closure copying
|
||||
|
@ -358,22 +354,22 @@ private:
|
|||
bool useSubstitutes = false;
|
||||
|
||||
/* The queued builds. */
|
||||
typedef std::map<BuildID, Build::ptr> Builds;
|
||||
using Builds = std::map<BuildID, Build::ptr>;
|
||||
nix::Sync<Builds> builds;
|
||||
|
||||
/* The jobsets. */
|
||||
typedef std::map<std::pair<std::string, std::string>, Jobset::ptr> Jobsets;
|
||||
using Jobsets = std::map<std::pair<std::string, std::string>, Jobset::ptr>;
|
||||
nix::Sync<Jobsets> jobsets;
|
||||
|
||||
/* All active or pending build steps (i.e. dependencies of the
|
||||
queued builds). Note that these are weak pointers. Steps are
|
||||
kept alive by being reachable from Builds or by being in
|
||||
progress. */
|
||||
typedef std::map<nix::StorePath, Step::wptr> Steps;
|
||||
using Steps = std::map<nix::StorePath, Step::wptr>;
|
||||
nix::Sync<Steps> steps;
|
||||
|
||||
/* Build steps that have no unbuilt dependencies. */
|
||||
typedef std::list<Step::wptr> Runnable;
|
||||
using Runnable = std::list<Step::wptr>;
|
||||
nix::Sync<Runnable> runnable;
|
||||
|
||||
/* CV for waking up the dispatcher. */
|
||||
|
@ -385,7 +381,7 @@ private:
|
|||
|
||||
/* The build machines. */
|
||||
std::mutex machinesReadyLock;
|
||||
typedef std::map<std::string, Machine::ptr> Machines;
|
||||
using Machines = std::map<std::string, Machine::ptr>;
|
||||
nix::Sync<Machines> machines; // FIXME: use atomic_shared_ptr
|
||||
|
||||
/* Throttler for CPU-bound local work. */
|
||||
|
@ -431,7 +427,7 @@ private:
|
|||
|
||||
struct MachineReservation
|
||||
{
|
||||
typedef std::shared_ptr<MachineReservation> ptr;
|
||||
using ptr = std::shared_ptr<MachineReservation>;
|
||||
State & state;
|
||||
Step::ptr step;
|
||||
Machine::ptr machine;
|
||||
|
|
Loading…
Reference in a new issue