For completeness, re-implement meta.schedulingPriority
This commit is contained in:
parent
b7965df928
commit
576dc0c120
|
@ -118,9 +118,29 @@ system_time State::doDispatch()
|
|||
a.currentJobs > b.currentJobs;
|
||||
});
|
||||
|
||||
/* Sort the runnable steps by priority. FIXME: O(n lg n);
|
||||
obviously, it would be better to keep a runnable queue sorted
|
||||
by priority. */
|
||||
/* Sort the runnable steps by priority. Priority is establised
|
||||
as follows (in order of precedence):
|
||||
|
||||
- The global priority of the builds that depend on the
|
||||
step. This allows admins to bump a build to the front of
|
||||
the queue.
|
||||
|
||||
- The lowest used scheduling share of the jobsets depending
|
||||
on the step.
|
||||
|
||||
- The local priority of the build, as set via the build's
|
||||
meta.schedulingPriority field. Note that this is not
|
||||
quite correct: the local priority should only be used to
|
||||
establish priority between builds in the same jobset, but
|
||||
here it's used between steps in different jobsets if they
|
||||
happen to have the same lowest used scheduling share. But
|
||||
that's not every likely.
|
||||
|
||||
- The lowest ID of the builds depending on the step;
|
||||
i.e. older builds take priority over new ones.
|
||||
|
||||
FIXME: O(n lg n); obviously, it would be better to keep a
|
||||
runnable queue sorted by priority. */
|
||||
std::vector<Step::ptr> runnableSorted;
|
||||
{
|
||||
auto runnable_(runnable.lock());
|
||||
|
@ -166,6 +186,7 @@ system_time State::doDispatch()
|
|||
return
|
||||
a_->highestGlobalPriority != b_->highestGlobalPriority ? a_->highestGlobalPriority > b_->highestGlobalPriority :
|
||||
a_->lowestShareUsed != b_->lowestShareUsed ? a_->lowestShareUsed < b_->lowestShareUsed :
|
||||
a_->highestLocalPriority != b_->highestLocalPriority ? a_->highestLocalPriority > b_->highestLocalPriority :
|
||||
a_->lowestBuildID < b_->lowestBuildID;
|
||||
});
|
||||
|
||||
|
|
|
@ -67,7 +67,7 @@ void State::getQueuedBuilds(Connection & conn, std::shared_ptr<StoreAPI> store,
|
|||
pqxx::work txn(conn);
|
||||
|
||||
auto res = txn.parameterized
|
||||
("select id, project, jobset, job, drvPath, maxsilent, timeout, timestamp, globalPriority from Builds "
|
||||
("select id, project, jobset, job, drvPath, maxsilent, timeout, timestamp, globalPriority, priority from Builds "
|
||||
"where id > $1 and finished = 0 order by globalPriority desc, id")
|
||||
(lastBuildId).exec();
|
||||
|
||||
|
@ -88,6 +88,7 @@ void State::getQueuedBuilds(Connection & conn, std::shared_ptr<StoreAPI> store,
|
|||
build->buildTimeout = row["timeout"].as<int>();
|
||||
build->timestamp = row["timestamp"].as<time_t>();
|
||||
build->globalPriority = row["globalPriority"].as<int>();
|
||||
build->localPriority = row["priority"].as<int>();
|
||||
build->jobset = createJobset(txn, build->projectName, build->jobsetName);
|
||||
|
||||
newIDs.push_back(id);
|
||||
|
@ -259,6 +260,7 @@ void Build::propagatePriorities()
|
|||
visitDependencies([&](const Step::ptr & step) {
|
||||
auto step_(step->state.lock());
|
||||
step_->highestGlobalPriority = std::max(step_->highestGlobalPriority, globalPriority);
|
||||
step_->highestLocalPriority = std::max(step_->highestLocalPriority, localPriority);
|
||||
step_->lowestBuildID = std::min(step_->lowestBuildID, id);
|
||||
step_->jobsets.insert(jobset);
|
||||
}, toplevel);
|
||||
|
|
|
@ -105,7 +105,7 @@ struct Build
|
|||
std::string projectName, jobsetName, jobName;
|
||||
time_t timestamp;
|
||||
unsigned int maxSilentTime, buildTimeout;
|
||||
int globalPriority;
|
||||
int localPriority, globalPriority;
|
||||
|
||||
std::shared_ptr<Step> toplevel;
|
||||
|
||||
|
@ -164,6 +164,10 @@ struct Step
|
|||
step. */
|
||||
double lowestShareUsed;
|
||||
|
||||
/* The highest local priority of any build depending on this
|
||||
step. */
|
||||
int highestLocalPriority{0};
|
||||
|
||||
/* The lowest ID of any build depending on this step. */
|
||||
BuildID lowestBuildID{std::numeric_limits<BuildID>::max()};
|
||||
};
|
||||
|
|
Loading…
Reference in a new issue