edata_cache: Remember the associated base_t.

This will save us some trouble down the line when we stop passing arena pointers
everywhere; we won't have to pass around a base_t pointer either.
This commit is contained in:
David Goldblatt 2019-12-12 18:28:37 -08:00 committed by David Goldblatt
parent ae23e5f426
commit c792f3e4ab
5 changed files with 20 additions and 16 deletions

View File

@ -12,12 +12,13 @@ struct edata_cache_s {
edata_tree_t avail;
atomic_zu_t count;
malloc_mutex_t mtx;
base_t *base;
};
bool edata_cache_init(edata_cache_t *edata_cache);
edata_t *edata_cache_get(tsdn_t *tsdn, edata_cache_t *edata_cache,
base_t *base);
bool edata_cache_init(edata_cache_t *edata_cache, base_t *base);
edata_t *edata_cache_get(tsdn_t *tsdn, edata_cache_t *edata_cache);
void edata_cache_put(tsdn_t *tsdn, edata_cache_t *edata_cache, edata_t *edata);
void edata_cache_prefork(tsdn_t *tsdn, edata_cache_t *edata_cache);
void edata_cache_postfork_parent(tsdn_t *tsdn, edata_cache_t *edata_cache);
void edata_cache_postfork_child(tsdn_t *tsdn, edata_cache_t *edata_cache);

View File

@ -2052,7 +2052,7 @@ arena_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) {
goto label_error;
}
if (edata_cache_init(&arena->edata_cache)) {
if (edata_cache_init(&arena->edata_cache, base)) {
goto label_error;
}

View File

@ -2,23 +2,29 @@
#include "jemalloc/internal/jemalloc_internal_includes.h"
bool
edata_cache_init(edata_cache_t *edata_cache) {
edata_cache_init(edata_cache_t *edata_cache, base_t *base) {
edata_avail_new(&edata_cache->avail);
/*
* This is not strictly necessary, since the edata_cache_t is only
* created inside an arena, which is zeroed on creation. But this is
* handy as a safety measure.
*/
atomic_store_zu(&edata_cache->count, 0, ATOMIC_RELAXED);
if (malloc_mutex_init(&edata_cache->mtx, "edata_cache",
WITNESS_RANK_EDATA_CACHE, malloc_mutex_rank_exclusive)) {
return true;
}
edata_avail_new(&edata_cache->avail);
edata_cache->base = base;
return false;
}
edata_t *
edata_cache_get(tsdn_t *tsdn, edata_cache_t *edata_cache, base_t *base) {
edata_cache_get(tsdn_t *tsdn, edata_cache_t *edata_cache) {
malloc_mutex_lock(tsdn, &edata_cache->mtx);
edata_t *edata = edata_avail_first(&edata_cache->avail);
if (edata == NULL) {
malloc_mutex_unlock(tsdn, &edata_cache->mtx);
return base_alloc_edata(tsdn, base);
return base_alloc_edata(tsdn, edata_cache->base);
}
edata_avail_remove(&edata_cache->avail, edata);
atomic_fetch_sub_zu(&edata_cache->count, 1, ATOMIC_RELAXED);

View File

@ -869,8 +869,7 @@ extent_grow_retained(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
alloc_size = sz_pind2sz(arena->ecache_grow.next + egn_skip);
}
edata_t *edata = edata_cache_get(tsdn, &arena->edata_cache,
arena->base);
edata_t *edata = edata_cache_get(tsdn, &arena->edata_cache);
if (edata == NULL) {
goto label_err;
}
@ -1036,8 +1035,7 @@ extent_alloc_wrapper(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
WITNESS_RANK_CORE, 0);
size_t esize = size + pad;
edata_t *edata = edata_cache_get(tsdn, &arena->edata_cache,
arena->base);
edata_t *edata = edata_cache_get(tsdn, &arena->edata_cache);
if (edata == NULL) {
return NULL;
}
@ -1430,8 +1428,7 @@ extent_split_impl(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
return NULL;
}
edata_t *trail = edata_cache_get(tsdn, &arena->edata_cache,
arena->base);
edata_t *trail = edata_cache_get(tsdn, &arena->edata_cache);
if (trail == NULL) {
goto label_error_a;
}

View File

@ -123,7 +123,7 @@ extent_alloc_dss(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size,
return NULL;
}
gap = edata_cache_get(tsdn, &arena->edata_cache, arena->base);
gap = edata_cache_get(tsdn, &arena->edata_cache);
if (gap == NULL) {
return NULL;
}