Ecache: Should know its arena_ind.

What we call an arena_ind is really the index associated with some particular
set of ehooks; the arena is just the user-visible portion of that.  Making this
explicit, and reframing checks in terms of that, makes the code simpler and
cleaner, and helps us avoid passing the arena itself all throughout extent code.

This lets us put back an arena-specific assert.
This commit is contained in:
David Goldblatt 2019-12-13 11:33:03 -08:00 committed by David Goldblatt
parent 372042a082
commit 576d7047ab
4 changed files with 17 additions and 5 deletions

View File

@ -10,6 +10,8 @@ struct ecache_s {
eset_t eset;
/* All stored extents must be in the same state. */
extent_state_t state;
/* The index of the ehooks the ecache is associated with. */
unsigned ind;
/*
* If true, delay coalescing until eviction; otherwise coalesce during
* deallocation.
@ -52,8 +54,13 @@ ecache_nbytes_get(ecache_t *ecache, pszind_t ind) {
return eset_nbytes_get(&ecache->eset, ind);
}
static inline unsigned
ecache_ind_get(ecache_t *ecache) {
return ecache->ind;
}
bool ecache_init(tsdn_t *tsdn, ecache_t *ecache, extent_state_t state,
bool delay_coalesce);
unsigned ind, bool delay_coalesce);
void ecache_prefork(tsdn_t *tsdn, ecache_t *ecache);
void ecache_postfork_parent(tsdn_t *tsdn, ecache_t *ecache);
void ecache_postfork_child(tsdn_t *tsdn, ecache_t *ecache);

View File

@ -2018,14 +2018,16 @@ arena_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) {
* are likely to be reused soon after deallocation, and the cost of
* merging/splitting extents is non-trivial.
*/
if (ecache_init(tsdn, &arena->ecache_dirty, extent_state_dirty, true)) {
if (ecache_init(tsdn, &arena->ecache_dirty, extent_state_dirty, ind,
true)) {
goto label_error;
}
/*
* Coalesce muzzy extents immediately, because operations on them are in
* the critical path much less often than for dirty extents.
*/
if (ecache_init(tsdn, &arena->ecache_muzzy, extent_state_muzzy, false)) {
if (ecache_init(tsdn, &arena->ecache_muzzy, extent_state_muzzy, ind,
false)) {
goto label_error;
}
/*
@ -2035,7 +2037,7 @@ arena_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) {
* in the critical path.
*/
if (ecache_init(tsdn, &arena->ecache_retained, extent_state_retained,
false)) {
ind, false)) {
goto label_error;
}

View File

@ -2,13 +2,14 @@
#include "jemalloc/internal/jemalloc_internal_includes.h"
bool
ecache_init(tsdn_t *tsdn, ecache_t *ecache, extent_state_t state,
ecache_init(tsdn_t *tsdn, ecache_t *ecache, extent_state_t state, unsigned ind,
bool delay_coalesce) {
if (malloc_mutex_init(&ecache->mtx, "extents", WITNESS_RANK_EXTENTS,
malloc_mutex_rank_exclusive)) {
return true;
}
ecache->state = state;
ecache->ind = ind;
ecache->delay_coalesce = delay_coalesce;
eset_init(&ecache->eset, state);
return false;

View File

@ -1062,6 +1062,8 @@ extent_alloc_wrapper(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
static bool
extent_can_coalesce(ecache_t *ecache, const edata_t *inner,
const edata_t *outer) {
assert(edata_arena_ind_get(inner) == ecache_ind_get(ecache));
if (edata_arena_ind_get(inner) != edata_arena_ind_get(outer)) {
return false;
}