diff --git a/src/libexpr/eval-inline.hh b/src/libexpr/eval-inline.hh index aef1f6351..3331a7643 100644 --- a/src/libexpr/eval-inline.hh +++ b/src/libexpr/eval-inline.hh @@ -24,6 +24,76 @@ LocalNoInlineNoReturn(void throwTypeError(const Pos & pos, const char * s, const } +/* Note: Various places expect the allocated memory to be zeroed. */ +[[gnu::always_inline]] +inline void * allocBytes(size_t n) +{ + void * p; +#if HAVE_BOEHMGC + p = GC_MALLOC(n); +#else + p = calloc(n, 1); +#endif + if (!p) throw std::bad_alloc(); + return p; +} + + +[[gnu::always_inline]] +Value * EvalState::allocValue() +{ + /* We use the boehm batch allocator to speed up allocations of Values (of which there are many). + GC_malloc_many returns a linked list of objects of the given size, where the first word + of each object is also the pointer to the next object in the list. This also means that we + have to explicitly clear the first word of every object we take. */ + if (!*valueAllocCache) { + *valueAllocCache = GC_malloc_many(sizeof(Value)); + if (!*valueAllocCache) throw std::bad_alloc(); + } + + /* GC_NEXT is a convenience macro for accessing the first word of an object. + Take the first list item, advance the list to the next item, and clear the next pointer. */ + void * p = *valueAllocCache; + *valueAllocCache = GC_NEXT(p); + GC_NEXT(p) = nullptr; + + nrValues++; + return (Value *) p; +} + + +[[gnu::always_inline]] +Env & EvalState::allocEnv(size_t size) +{ + nrEnvs++; + nrValuesInEnvs += size; + + Env * env; + + if (size != 1) + env = (Env *) allocBytes(sizeof(Env) + size * sizeof(Value *)); + else { + /* see allocValue for explanations. */ + if (!*env1AllocCache) { + *env1AllocCache = GC_malloc_many(sizeof(Env) + sizeof(Value *)); + if (!*env1AllocCache) throw std::bad_alloc(); + } + + void * p = *env1AllocCache; + *env1AllocCache = GC_NEXT(p); + GC_NEXT(p) = nullptr; + env = (Env *) p; + } + + env->type = Env::Plain; + + /* We assume that env->values has been cleared by the allocator; maybeThunk() and lookupVar fromWith expect this. */ + + return *env; +} + + +[[gnu::always_inline]] void EvalState::forceValue(Value & v, const Pos & pos) { forceValue(v, [&]() { return pos; }); @@ -52,6 +122,7 @@ void EvalState::forceValue(Value & v, Callable getPos) } +[[gnu::always_inline]] inline void EvalState::forceAttrs(Value & v, const Pos & pos) { forceAttrs(v, [&]() { return pos; }); @@ -59,6 +130,7 @@ inline void EvalState::forceAttrs(Value & v, const Pos & pos) template +[[gnu::always_inline]] inline void EvalState::forceAttrs(Value & v, Callable getPos) { forceValue(v, getPos); @@ -67,6 +139,7 @@ inline void EvalState::forceAttrs(Value & v, Callable getPos) } +[[gnu::always_inline]] inline void EvalState::forceList(Value & v, const Pos & pos) { forceValue(v, pos); @@ -74,18 +147,5 @@ inline void EvalState::forceList(Value & v, const Pos & pos) throwTypeError(pos, "value is %1% while a list was expected", v); } -/* Note: Various places expect the allocated memory to be zeroed. */ -inline void * allocBytes(size_t n) -{ - void * p; -#if HAVE_BOEHMGC - p = GC_MALLOC(n); -#else - p = calloc(n, 1); -#endif - if (!p) throw std::bad_alloc(); - return p; -} - } diff --git a/src/libexpr/eval.cc b/src/libexpr/eval.cc index ddbbdeaa6..038b6bb7c 100644 --- a/src/libexpr/eval.cc +++ b/src/libexpr/eval.cc @@ -867,59 +867,6 @@ inline Value * EvalState::lookupVar(Env * env, const ExprVar & var, bool noEval) } -Value * EvalState::allocValue() -{ - /* We use the boehm batch allocator to speed up allocations of Values (of which there are many). - GC_malloc_many returns a linked list of objects of the given size, where the first word - of each object is also the pointer to the next object in the list. This also means that we - have to explicitly clear the first word of every object we take. */ - if (!*valueAllocCache) { - *valueAllocCache = GC_malloc_many(sizeof(Value)); - if (!*valueAllocCache) throw std::bad_alloc(); - } - - /* GC_NEXT is a convenience macro for accessing the first word of an object. - Take the first list item, advance the list to the next item, and clear the next pointer. */ - void * p = *valueAllocCache; - *valueAllocCache = GC_NEXT(p); - GC_NEXT(p) = nullptr; - - nrValues++; - auto v = (Value *) p; - return v; -} - - -Env & EvalState::allocEnv(size_t size) -{ - nrEnvs++; - nrValuesInEnvs += size; - - Env * env; - - if (size != 1) - env = (Env *) allocBytes(sizeof(Env) + size * sizeof(Value *)); - else { - /* see allocValue for explanations. */ - if (!*env1AllocCache) { - *env1AllocCache = GC_malloc_many(sizeof(Env) + sizeof(Value *)); - if (!*env1AllocCache) throw std::bad_alloc(); - } - - void * p = *env1AllocCache; - *env1AllocCache = GC_NEXT(p); - GC_NEXT(p) = nullptr; - env = (Env *) p; - } - - env->type = Env::Plain; - - /* We assume that env->values has been cleared by the allocator; maybeThunk() and lookupVar fromWith expect this. */ - - return *env; -} - - void EvalState::mkList(Value & v, size_t size) { v.mkList(size); diff --git a/src/libexpr/eval.hh b/src/libexpr/eval.hh index 41384f044..d2efe8a47 100644 --- a/src/libexpr/eval.hh +++ b/src/libexpr/eval.hh @@ -350,8 +350,8 @@ public: void autoCallFunction(Bindings & args, Value & fun, Value & res); /* Allocation primitives. */ - Value * allocValue(); - Env & allocEnv(size_t size); + inline Value * allocValue(); + inline Env & allocEnv(size_t size); Value * allocAttr(Value & vAttrs, const Symbol & name); Value * allocAttr(Value & vAttrs, std::string_view name); @@ -512,3 +512,5 @@ extern EvalSettings evalSettings; static const std::string corepkgsPrefix{"/__corepkgs__/"}; } + +#include "eval-inline.hh"