forked from lix-project/lix
a583a2bc59
On a system with multiple CPUs, running Nix operations through the daemon is significantly slower than "direct" mode: $ NIX_REMOTE= nix-instantiate '<nixos>' -A system real 0m0.974s user 0m0.875s sys 0m0.088s $ NIX_REMOTE=daemon nix-instantiate '<nixos>' -A system real 0m2.118s user 0m1.463s sys 0m0.218s The main reason seems to be that the client and the worker get moved to a different CPU after every call to the worker. This patch adds a hack to lock them to the same CPU. With this, the overhead of going through the daemon is very small: $ NIX_REMOTE=daemon nix-instantiate '<nixos>' -A system real 0m1.074s user 0m0.809s sys 0m0.098s
55 lines
1.1 KiB
C++
55 lines
1.1 KiB
C++
#include "types.hh"
|
|
#include "util.hh"
|
|
#include "affinity.hh"
|
|
|
|
#if HAVE_SCHED_H
|
|
#include <sched.h>
|
|
#endif
|
|
|
|
namespace nix {
|
|
|
|
|
|
static bool didSaveAffinity = false;
|
|
static cpu_set_t savedAffinity;
|
|
|
|
|
|
void setAffinityTo(int cpu)
|
|
{
|
|
#if HAVE_SCHED_SETAFFINITY
|
|
if (sched_getaffinity(0, sizeof(cpu_set_t), &savedAffinity) == -1) return;
|
|
didSaveAffinity = true;
|
|
printMsg(lvlDebug, format("locking this thread to CPU %1%") % cpu);
|
|
cpu_set_t newAffinity;
|
|
CPU_ZERO(&newAffinity);
|
|
CPU_SET(cpu, &newAffinity);
|
|
if (sched_setaffinity(0, sizeof(cpu_set_t), &newAffinity) == -1)
|
|
printMsg(lvlError, format("failed to lock thread to CPU %1%") % cpu);
|
|
#endif
|
|
}
|
|
|
|
|
|
int lockToCurrentCPU()
|
|
{
|
|
#if HAVE_SCHED_SETAFFINITY
|
|
if (getEnv("NIX_AFFINITY_HACK", "1") == "1") {
|
|
int cpu = sched_getcpu();
|
|
if (cpu != -1) setAffinityTo(cpu);
|
|
return cpu;
|
|
}
|
|
#endif
|
|
return -1;
|
|
}
|
|
|
|
|
|
void restoreAffinity()
|
|
{
|
|
#if HAVE_SCHED_SETAFFINITY
|
|
if (!didSaveAffinity) return;
|
|
if (sched_setaffinity(0, sizeof(cpu_set_t), &savedAffinity) == -1)
|
|
printMsg(lvlError, "failed to restore affinity %1%");
|
|
#endif
|
|
}
|
|
|
|
|
|
}
|