PA: Parameterize emap. Move emap_global to arena.

This lets us test the PA module without interfering with the global emap used by
the real allocator (the one not under test).
This commit is contained in:
David Goldblatt 2020-03-14 10:49:34 -07:00 committed by David Goldblatt
parent f730577277
commit 294b276fc7
19 changed files with 211 additions and 184 deletions

View File

@ -15,6 +15,7 @@ extern const char *percpu_arena_mode_names[];
extern const uint64_t h_steps[SMOOTHSTEP_NSTEPS];
extern malloc_mutex_t arenas_lock;
extern emap_t arena_emap_global;
extern size_t opt_oversize_threshold;
extern size_t oversize_threshold;

View File

@ -48,10 +48,12 @@ arena_prof_info_get(tsd_t *tsd, const void *ptr, emap_alloc_ctx_t *alloc_ctx,
/* Static check. */
if (alloc_ctx == NULL) {
edata = emap_edata_lookup(tsd_tsdn(tsd), &emap_global, ptr);
edata = emap_edata_lookup(tsd_tsdn(tsd), &arena_emap_global,
ptr);
is_slab = edata_slab_get(edata);
} else if (unlikely(!(is_slab = alloc_ctx->slab))) {
edata = emap_edata_lookup(tsd_tsdn(tsd), &emap_global, ptr);
edata = emap_edata_lookup(tsd_tsdn(tsd), &arena_emap_global,
ptr);
}
if (unlikely(!is_slab)) {
@ -75,15 +77,15 @@ arena_prof_tctx_reset(tsd_t *tsd, const void *ptr,
/* Static check. */
if (alloc_ctx == NULL) {
edata_t *edata = emap_edata_lookup(tsd_tsdn(tsd), &emap_global,
ptr);
edata_t *edata = emap_edata_lookup(tsd_tsdn(tsd),
&arena_emap_global, ptr);
if (unlikely(!edata_slab_get(edata))) {
large_prof_tctx_reset(edata);
}
} else {
if (unlikely(!alloc_ctx->slab)) {
edata_t *edata = emap_edata_lookup(tsd_tsdn(tsd),
&emap_global, ptr);
&arena_emap_global, ptr);
large_prof_tctx_reset(edata);
}
}
@ -94,7 +96,8 @@ arena_prof_tctx_reset_sampled(tsd_t *tsd, const void *ptr) {
cassert(config_prof);
assert(ptr != NULL);
edata_t *edata = emap_edata_lookup(tsd_tsdn(tsd), &emap_global, ptr);
edata_t *edata = emap_edata_lookup(tsd_tsdn(tsd), &arena_emap_global,
ptr);
assert(!edata_slab_get(edata));
large_prof_tctx_reset(edata);
@ -157,7 +160,7 @@ arena_malloc(tsdn_t *tsdn, arena_t *arena, size_t size, szind_t ind, bool zero,
JEMALLOC_ALWAYS_INLINE arena_t *
arena_aalloc(tsdn_t *tsdn, const void *ptr) {
edata_t *edata = emap_edata_lookup(tsdn, &emap_global, ptr);
edata_t *edata = emap_edata_lookup(tsdn, &arena_emap_global, ptr);
unsigned arena_ind = edata_arena_ind_get(edata);
return (arena_t *)atomic_load_p(&arenas[arena_ind], ATOMIC_RELAXED);
}
@ -166,7 +169,7 @@ JEMALLOC_ALWAYS_INLINE size_t
arena_salloc(tsdn_t *tsdn, const void *ptr) {
assert(ptr != NULL);
emap_alloc_ctx_t alloc_ctx;
emap_alloc_ctx_lookup(tsdn, &emap_global, ptr, &alloc_ctx);
emap_alloc_ctx_lookup(tsdn, &arena_emap_global, ptr, &alloc_ctx);
assert(alloc_ctx.szind != SC_NSIZES);
return sz_index2size(alloc_ctx.szind);
@ -184,8 +187,8 @@ arena_vsalloc(tsdn_t *tsdn, const void *ptr) {
*/
emap_full_alloc_ctx_t full_alloc_ctx;
bool missing = emap_full_alloc_ctx_try_lookup(tsdn, &emap_global, ptr,
&full_alloc_ctx);
bool missing = emap_full_alloc_ctx_try_lookup(tsdn, &arena_emap_global,
ptr, &full_alloc_ctx);
if (missing) {
return 0;
}
@ -208,7 +211,8 @@ arena_dalloc_large_no_tcache(tsdn_t *tsdn, void *ptr, szind_t szind) {
if (config_prof && unlikely(szind < SC_NBINS)) {
arena_dalloc_promoted(tsdn, ptr, NULL, true);
} else {
edata_t *edata = emap_edata_lookup(tsdn, &emap_global, ptr);
edata_t *edata = emap_edata_lookup(tsdn, &arena_emap_global,
ptr);
large_dalloc(tsdn, edata);
}
}
@ -218,10 +222,11 @@ arena_dalloc_no_tcache(tsdn_t *tsdn, void *ptr) {
assert(ptr != NULL);
emap_alloc_ctx_t alloc_ctx;
emap_alloc_ctx_lookup(tsdn, &emap_global, ptr, &alloc_ctx);
emap_alloc_ctx_lookup(tsdn, &arena_emap_global, ptr, &alloc_ctx);
if (config_debug) {
edata_t *edata = emap_edata_lookup(tsdn, &emap_global, ptr);
edata_t *edata = emap_edata_lookup(tsdn, &arena_emap_global,
ptr);
assert(alloc_ctx.szind == edata_szind_get(edata));
assert(alloc_ctx.szind < SC_NSIZES);
assert(alloc_ctx.slab == edata_slab_get(edata));
@ -246,7 +251,8 @@ arena_dalloc_large(tsdn_t *tsdn, void *ptr, tcache_t *tcache, szind_t szind,
slow_path);
}
} else {
edata_t *edata = emap_edata_lookup(tsdn, &emap_global, ptr);
edata_t *edata = emap_edata_lookup(tsdn, &arena_emap_global,
ptr);
large_dalloc(tsdn, edata);
}
}
@ -267,11 +273,13 @@ arena_dalloc(tsdn_t *tsdn, void *ptr, tcache_t *tcache,
alloc_ctx = *caller_alloc_ctx;
} else {
util_assume(!tsdn_null(tsdn));
emap_alloc_ctx_lookup(tsdn, &emap_global, ptr, &alloc_ctx);
emap_alloc_ctx_lookup(tsdn, &arena_emap_global, ptr,
&alloc_ctx);
}
if (config_debug) {
edata_t *edata = emap_edata_lookup(tsdn, &emap_global, ptr);
edata_t *edata = emap_edata_lookup(tsdn, &arena_emap_global,
ptr);
assert(alloc_ctx.szind == edata_szind_get(edata));
assert(alloc_ctx.szind < SC_NSIZES);
assert(alloc_ctx.slab == edata_slab_get(edata));
@ -303,15 +311,16 @@ arena_sdalloc_no_tcache(tsdn_t *tsdn, void *ptr, size_t size) {
}
if ((config_prof && opt_prof) || config_debug) {
emap_alloc_ctx_lookup(tsdn, &emap_global, ptr, &alloc_ctx);
emap_alloc_ctx_lookup(tsdn, &arena_emap_global, ptr,
&alloc_ctx);
assert(alloc_ctx.szind == sz_size2index(size));
assert((config_prof && opt_prof)
|| alloc_ctx.slab == (alloc_ctx.szind < SC_NBINS));
if (config_debug) {
edata_t *edata = emap_edata_lookup(tsdn, &emap_global,
ptr);
edata_t *edata = emap_edata_lookup(tsdn,
&arena_emap_global, ptr);
assert(alloc_ctx.szind == edata_szind_get(edata));
assert(alloc_ctx.slab == edata_slab_get(edata));
}
@ -341,7 +350,7 @@ arena_sdalloc(tsdn_t *tsdn, void *ptr, size_t size, tcache_t *tcache,
if (config_prof && opt_prof) {
if (caller_alloc_ctx == NULL) {
/* Uncommon case and should be a static check. */
emap_alloc_ctx_lookup(tsdn, &emap_global, ptr,
emap_alloc_ctx_lookup(tsdn, &arena_emap_global, ptr,
&alloc_ctx);
assert(alloc_ctx.szind == sz_size2index(size));
} else {
@ -357,7 +366,8 @@ arena_sdalloc(tsdn_t *tsdn, void *ptr, size_t size, tcache_t *tcache,
}
if (config_debug) {
edata_t *edata = emap_edata_lookup(tsdn, &emap_global, ptr);
edata_t *edata = emap_edata_lookup(tsdn, &arena_emap_global,
ptr);
assert(alloc_ctx.szind == edata_szind_get(edata));
assert(alloc_ctx.slab == edata_slab_get(edata));
}

View File

@ -26,8 +26,6 @@ struct emap_full_alloc_ctx_s {
edata_t *edata;
};
extern emap_t emap_global;
bool emap_init(emap_t *emap, base_t *base, bool zeroed);
/*

View File

@ -47,10 +47,10 @@ bool extent_purge_lazy_wrapper(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata,
size_t offset, size_t length);
bool extent_purge_forced_wrapper(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata,
size_t offset, size_t length);
edata_t *extent_split_wrapper(tsdn_t *tsdn, edata_cache_t *edata_cache,
ehooks_t *ehooks, edata_t *edata, size_t size_a, szind_t szind_a,
bool slab_a, size_t size_b, szind_t szind_b, bool slab_b);
bool extent_merge_wrapper(tsdn_t *tsdn, ehooks_t *ehooks,
edata_t *extent_split_wrapper(tsdn_t *tsdn, pa_shard_t *shard,
edata_cache_t *edata_cache, ehooks_t *ehooks, edata_t *edata, size_t size_a,
szind_t szind_a, bool slab_a, size_t size_b, szind_t szind_b, bool slab_b);
bool extent_merge_wrapper(tsdn_t *tsdn, pa_shard_t *shard, ehooks_t *ehooks,
edata_cache_t *edata_cache, edata_t *a, edata_t *b);
bool extent_boot(void);

View File

@ -5,6 +5,7 @@
#include "jemalloc/internal/decay.h"
#include "jemalloc/internal/ecache.h"
#include "jemalloc/internal/edata_cache.h"
#include "jemalloc/internal/emap.h"
#include "jemalloc/internal/lockedint.h"
enum pa_decay_purge_setting_e {
@ -140,6 +141,9 @@ struct pa_shard_s {
decay_t decay_dirty; /* dirty --> muzzy */
decay_t decay_muzzy; /* muzzy --> retained */
/* The emap this shard is tied to. */
emap_t *emap;
/* The base from which we get the ehooks and allocate metadat. */
base_t *base;
};
@ -171,9 +175,10 @@ pa_shard_ehooks_get(pa_shard_t *shard) {
}
/* Returns true on error. */
bool pa_shard_init(tsdn_t *tsdn, pa_shard_t *shard, base_t *base, unsigned ind,
pa_shard_stats_t *stats, malloc_mutex_t *stats_mtx, nstime_t *cur_time,
ssize_t dirty_decay_ms, ssize_t muzzy_decay_ms);
bool pa_shard_init(tsdn_t *tsdn, pa_shard_t *shard, emap_t *emap, base_t *base,
unsigned ind, pa_shard_stats_t *stats, malloc_mutex_t *stats_mtx,
nstime_t *cur_time, ssize_t dirty_decay_ms, ssize_t muzzy_decay_ms);
/*
* This does the PA-specific parts of arena reset (i.e. freeing all active
* allocations).

View File

@ -37,6 +37,8 @@ ssize_t opt_muzzy_decay_ms = MUZZY_DECAY_MS_DEFAULT;
static atomic_zd_t dirty_decay_ms_default;
static atomic_zd_t muzzy_decay_ms_default;
emap_t arena_emap_global;
const uint64_t h_steps[SMOOTHSTEP_NSTEPS] = {
#define STEP(step, h, x, y) \
h,
@ -668,7 +670,7 @@ arena_reset(tsd_t *tsd, arena_t *arena) {
malloc_mutex_unlock(tsd_tsdn(tsd), &arena->large_mtx);
emap_alloc_ctx_t alloc_ctx;
emap_alloc_ctx_lookup(tsd_tsdn(tsd), &emap_global, ptr,
emap_alloc_ctx_lookup(tsd_tsdn(tsd), &arena_emap_global, ptr,
&alloc_ctx);
assert(alloc_ctx.szind != SC_NSIZES);
@ -1064,11 +1066,11 @@ arena_prof_promote(tsdn_t *tsdn, void *ptr, size_t usize) {
safety_check_set_redzone(ptr, usize, SC_LARGE_MINCLASS);
}
edata_t *edata = emap_edata_lookup(tsdn, &emap_global, ptr);
edata_t *edata = emap_edata_lookup(tsdn, &arena_emap_global, ptr);
szind_t szind = sz_size2index(usize);
edata_szind_set(edata, szind);
emap_remap(tsdn, &emap_global, edata, szind, /* slab */ false);
emap_remap(tsdn, &arena_emap_global, edata, szind, /* slab */ false);
prof_idump_rollback(tsdn, usize);
@ -1081,7 +1083,7 @@ arena_prof_demote(tsdn_t *tsdn, edata_t *edata, const void *ptr) {
assert(ptr != NULL);
edata_szind_set(edata, SC_NBINS);
emap_remap(tsdn, &emap_global, edata, SC_NBINS, /* slab */ false);
emap_remap(tsdn, &arena_emap_global, edata, SC_NBINS, /* slab */ false);
assert(isalloc(tsdn, ptr) == SC_LARGE_MINCLASS);
@ -1094,7 +1096,7 @@ arena_dalloc_promoted(tsdn_t *tsdn, void *ptr, tcache_t *tcache,
cassert(config_prof);
assert(opt_prof);
edata_t *edata = emap_edata_lookup(tsdn, &emap_global, ptr);
edata_t *edata = emap_edata_lookup(tsdn, &arena_emap_global, ptr);
size_t usize = edata_usize_get(edata);
size_t bumped_usize = arena_prof_demote(tsdn, edata, ptr);
if (config_opt_safety_checks && usize < SC_LARGE_MINCLASS) {
@ -1223,7 +1225,7 @@ arena_dalloc_bin(tsdn_t *tsdn, arena_t *arena, edata_t *edata, void *ptr) {
void
arena_dalloc_small(tsdn_t *tsdn, void *ptr) {
edata_t *edata = emap_edata_lookup(tsdn, &emap_global, ptr);
edata_t *edata = emap_edata_lookup(tsdn, &arena_emap_global, ptr);
arena_t *arena = arena_get_from_edata(edata);
arena_dalloc_bin(tsdn, arena, edata, ptr);
@ -1237,7 +1239,7 @@ arena_ralloc_no_move(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size,
/* Calls with non-zero extra had to clamp extra. */
assert(extra == 0 || size + extra <= SC_LARGE_MAXCLASS);
edata_t *edata = emap_edata_lookup(tsdn, &emap_global, ptr);
edata_t *edata = emap_edata_lookup(tsdn, &arena_emap_global, ptr);
if (unlikely(size > SC_LARGE_MAXCLASS)) {
ret = true;
goto done;
@ -1271,7 +1273,7 @@ arena_ralloc_no_move(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size,
ret = true;
}
done:
assert(edata == emap_edata_lookup(tsdn, &emap_global, ptr));
assert(edata == emap_edata_lookup(tsdn, &arena_emap_global, ptr));
*newsize = edata_usize_get(edata);
return ret;
@ -1491,7 +1493,7 @@ arena_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) {
nstime_t cur_time;
nstime_init_update(&cur_time);
if (pa_shard_init(tsdn, &arena->pa_shard, base, ind,
if (pa_shard_init(tsdn, &arena->pa_shard, &arena_emap_global, base, ind,
&arena->stats.pa_shard_stats, LOCKEDINT_MTX(arena->stats.mtx),
&cur_time, arena_dirty_decay_ms_default_get(),
arena_muzzy_decay_ms_default_get())) {

View File

@ -2650,7 +2650,7 @@ arenas_lookup_ctl(tsd_t *tsd, const size_t *mib,
ret = EINVAL;
malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);
WRITE(ptr, void *);
edata = emap_edata_lookup(tsd_tsdn(tsd), &emap_global, ptr);
edata = emap_edata_lookup(tsd_tsdn(tsd), &arena_emap_global, ptr);
if (edata == NULL)
goto label_return;

View File

@ -189,8 +189,8 @@ ehooks_default_split(extent_hooks_t *extent_hooks, void *addr, size_t size,
static inline bool
ehooks_same_sn(tsdn_t *tsdn, void *addr_a, void *addr_b) {
edata_t *a = emap_edata_lookup(tsdn, &emap_global, addr_a);
edata_t *b = emap_edata_lookup(tsdn, &emap_global, addr_b);
edata_t *a = emap_edata_lookup(tsdn, &arena_emap_global, addr_a);
edata_t *b = emap_edata_lookup(tsdn, &arena_emap_global, addr_b);
return edata_sn_comp(a, b) == 0;
}
@ -253,9 +253,9 @@ bool
ehooks_default_merge(extent_hooks_t *extent_hooks, void *addr_a, size_t size_a,
void *addr_b, size_t size_b, bool committed, unsigned arena_ind) {
tsdn_t *tsdn = tsdn_fetch();
edata_t *a = emap_edata_lookup(tsdn, &emap_global, addr_a);
edata_t *a = emap_edata_lookup(tsdn, &arena_emap_global, addr_a);
bool head_a = edata_is_head_get(a);
edata_t *b = emap_edata_lookup(tsdn, &emap_global, addr_b);
edata_t *b = emap_edata_lookup(tsdn, &arena_emap_global, addr_b);
bool head_b = edata_is_head_get(b);
return ehooks_default_merge_impl(tsdn, addr_a, head_a, addr_b, head_b);
}

View File

@ -3,8 +3,6 @@
#include "jemalloc/internal/emap.h"
emap_t emap_global;
/*
* Note: Ends without at semicolon, so that
* EMAP_DECLARE_RTREE_CTX;

View File

@ -19,11 +19,11 @@ static bool extent_purge_lazy_impl(tsdn_t *tsdn, ehooks_t *ehooks,
edata_t *edata, size_t offset, size_t length, bool growing_retained);
static bool extent_purge_forced_impl(tsdn_t *tsdn, ehooks_t *ehooks,
edata_t *edata, size_t offset, size_t length, bool growing_retained);
static edata_t *extent_split_impl(tsdn_t *tsdn, edata_cache_t *edata_cache,
ehooks_t *ehooks, edata_t *edata, size_t size_a, szind_t szind_a,
bool slab_a, size_t size_b, szind_t szind_b, bool slab_b,
static edata_t *extent_split_impl(tsdn_t *tsdn, pa_shard_t *shard,
edata_cache_t *edata_cache, ehooks_t *ehooks, edata_t *edata, size_t size_a,
szind_t szind_a, bool slab_a, size_t size_b, szind_t szind_b, bool slab_b,
bool growing_retained);
static bool extent_merge_impl(tsdn_t *tsdn, ehooks_t *ehooks,
static bool extent_merge_impl(tsdn_t *tsdn, pa_shard_t *shard, ehooks_t *ehooks,
edata_cache_t *edata_cache, edata_t *a, edata_t *b, bool growing_retained);
/* Used exclusively for gdump triggering. */
@ -36,14 +36,14 @@ static atomic_zu_t highpages;
* definition.
*/
static void extent_deregister(tsdn_t *tsdn, edata_t *edata);
static void extent_deregister(tsdn_t *tsdn, pa_shard_t *shard, edata_t *edata);
static edata_t *extent_recycle(tsdn_t *tsdn, pa_shard_t *shard,
ehooks_t *ehooks, ecache_t *ecache, void *new_addr, size_t usize,
size_t alignment, bool slab, szind_t szind, bool *zero, bool *commit,
bool growing_retained);
static edata_t *extent_try_coalesce(tsdn_t *tsdn, edata_cache_t *edata_cache,
ehooks_t *ehooks, ecache_t *ecache, edata_t *edata, bool *coalesced,
bool growing_retained);
static edata_t *extent_try_coalesce(tsdn_t *tsdn, pa_shard_t *shard,
edata_cache_t *edata_cache, ehooks_t *ehooks, ecache_t *ecache,
edata_t *edata, bool *coalesced, bool growing_retained);
static void extent_record(tsdn_t *tsdn, pa_shard_t *shard, ehooks_t *ehooks,
ecache_t *ecache, edata_t *edata, bool growing_retained);
static edata_t *extent_alloc_retained(tsdn_t *tsdn, pa_shard_t *shard,
@ -53,12 +53,13 @@ static edata_t *extent_alloc_retained(tsdn_t *tsdn, pa_shard_t *shard,
/******************************************************************************/
static bool
extent_try_delayed_coalesce(tsdn_t *tsdn, edata_cache_t *edata_cache,
ehooks_t *ehooks, ecache_t *ecache, edata_t *edata) {
extent_try_delayed_coalesce(tsdn_t *tsdn, pa_shard_t *shard,
edata_cache_t *edata_cache, ehooks_t *ehooks, ecache_t *ecache,
edata_t *edata) {
edata_state_set(edata, extent_state_active);
bool coalesced;
edata = extent_try_coalesce(tsdn, edata_cache, ehooks, ecache, edata,
&coalesced, false);
edata = extent_try_coalesce(tsdn, shard, edata_cache, ehooks, ecache,
edata, &coalesced, false);
edata_state_set(edata, ecache->state);
if (!coalesced) {
@ -156,8 +157,8 @@ ecache_evict(tsdn_t *tsdn, pa_shard_t *shard, ehooks_t *ehooks,
break;
}
/* Try to coalesce. */
if (extent_try_delayed_coalesce(tsdn, &shard->edata_cache,
ehooks, ecache, edata)) {
if (extent_try_delayed_coalesce(tsdn, shard,
&shard->edata_cache, ehooks, ecache, edata)) {
break;
}
/*
@ -178,7 +179,7 @@ ecache_evict(tsdn_t *tsdn, pa_shard_t *shard, ehooks_t *ehooks,
edata_state_set(edata, extent_state_active);
break;
case extent_state_retained:
extent_deregister(tsdn, edata);
extent_deregister(tsdn, shard, edata);
break;
default:
not_reached();
@ -278,26 +279,27 @@ extent_gdump_sub(tsdn_t *tsdn, const edata_t *edata) {
}
static bool
extent_register_impl(tsdn_t *tsdn, edata_t *edata, bool gdump_add) {
extent_register_impl(tsdn_t *tsdn, pa_shard_t *shard, edata_t *edata,
bool gdump_add) {
/*
* We need to hold the lock to protect against a concurrent coalesce
* operation that sees us in a partial state.
*/
emap_lock_edata(tsdn, &emap_global, edata);
emap_lock_edata(tsdn, shard->emap, edata);
szind_t szind = edata_szind_get_maybe_invalid(edata);
bool slab = edata_slab_get(edata);
if (emap_register_boundary(tsdn, &emap_global, edata, szind, slab)) {
emap_unlock_edata(tsdn, &emap_global, edata);
if (emap_register_boundary(tsdn, shard->emap, edata, szind, slab)) {
emap_unlock_edata(tsdn, shard->emap, edata);
return true;
}
if (slab) {
emap_register_interior(tsdn, &emap_global, edata, szind);
emap_register_interior(tsdn, shard->emap, edata, szind);
}
emap_unlock_edata(tsdn, &emap_global, edata);
emap_unlock_edata(tsdn, shard->emap, edata);
if (config_prof && gdump_add) {
extent_gdump_add(tsdn, edata);
@ -307,18 +309,18 @@ extent_register_impl(tsdn_t *tsdn, edata_t *edata, bool gdump_add) {
}
static bool
extent_register(tsdn_t *tsdn, edata_t *edata) {
return extent_register_impl(tsdn, edata, true);
extent_register(tsdn_t *tsdn, pa_shard_t *shard, edata_t *edata) {
return extent_register_impl(tsdn, shard, edata, true);
}
static bool
extent_register_no_gdump_add(tsdn_t *tsdn, edata_t *edata) {
return extent_register_impl(tsdn, edata, false);
extent_register_no_gdump_add(tsdn_t *tsdn, pa_shard_t *shard, edata_t *edata) {
return extent_register_impl(tsdn, shard, edata, false);
}
static void
extent_reregister(tsdn_t *tsdn, edata_t *edata) {
bool err = extent_register(tsdn, edata);
extent_reregister(tsdn_t *tsdn, pa_shard_t *shard, edata_t *edata) {
bool err = extent_register(tsdn, shard, edata);
assert(!err);
}
@ -326,14 +328,15 @@ extent_reregister(tsdn_t *tsdn, edata_t *edata) {
* Removes all pointers to the given extent from the global rtree.
*/
static void
extent_deregister_impl(tsdn_t *tsdn, edata_t *edata, bool gdump) {
emap_lock_edata(tsdn, &emap_global, edata);
emap_deregister_boundary(tsdn, &emap_global, edata);
extent_deregister_impl(tsdn_t *tsdn, pa_shard_t *shard, edata_t *edata,
bool gdump) {
emap_lock_edata(tsdn, shard->emap, edata);
emap_deregister_boundary(tsdn, shard->emap, edata);
if (edata_slab_get(edata)) {
emap_deregister_interior(tsdn, &emap_global, edata);
emap_deregister_interior(tsdn, shard->emap, edata);
edata_slab_set(edata, false);
}
emap_unlock_edata(tsdn, &emap_global, edata);
emap_unlock_edata(tsdn, shard->emap, edata);
if (config_prof && gdump) {
extent_gdump_sub(tsdn, edata);
@ -341,13 +344,14 @@ extent_deregister_impl(tsdn_t *tsdn, edata_t *edata, bool gdump) {
}
static void
extent_deregister(tsdn_t *tsdn, edata_t *edata) {
extent_deregister_impl(tsdn, edata, true);
extent_deregister(tsdn_t *tsdn, pa_shard_t *shard, edata_t *edata) {
extent_deregister_impl(tsdn, shard, edata, true);
}
static void
extent_deregister_no_gdump_sub(tsdn_t *tsdn, edata_t *edata) {
extent_deregister_impl(tsdn, edata, false);
extent_deregister_no_gdump_sub(tsdn_t *tsdn, pa_shard_t *shard,
edata_t *edata) {
extent_deregister_impl(tsdn, shard, edata, false);
}
/*
@ -380,7 +384,7 @@ extent_recycle_extract(tsdn_t *tsdn, pa_shard_t *shard, ehooks_t *ehooks,
malloc_mutex_lock(tsdn, &ecache->mtx);
edata_t *edata;
if (new_addr != NULL) {
edata = emap_lock_edata_from_addr(tsdn, &emap_global, new_addr,
edata = emap_lock_edata_from_addr(tsdn, shard->emap, new_addr,
false);
if (edata != NULL) {
/*
@ -395,7 +399,7 @@ extent_recycle_extract(tsdn_t *tsdn, pa_shard_t *shard, ehooks_t *ehooks,
!= ecache->state) {
edata = NULL;
}
emap_unlock_edata(tsdn, &emap_global, unlock_edata);
emap_unlock_edata(tsdn, shard->emap, unlock_edata);
}
} else {
/*
@ -478,9 +482,9 @@ extent_split_interior(tsdn_t *tsdn, pa_shard_t *shard, ehooks_t *ehooks,
/* Split the lead. */
if (leadsize != 0) {
*lead = *edata;
*edata = extent_split_impl(tsdn, &shard->edata_cache, ehooks,
*lead, leadsize, SC_NSIZES, false, size + trailsize, szind,
slab, growing_retained);
*edata = extent_split_impl(tsdn, shard, &shard->edata_cache,
ehooks, *lead, leadsize, SC_NSIZES, false, size + trailsize,
szind, slab, growing_retained);
if (*edata == NULL) {
*to_leak = *lead;
*lead = NULL;
@ -490,9 +494,9 @@ extent_split_interior(tsdn_t *tsdn, pa_shard_t *shard, ehooks_t *ehooks,
/* Split the trail. */
if (trailsize != 0) {
*trail = extent_split_impl(tsdn, &shard->edata_cache, ehooks,
*edata, size, szind, slab, trailsize, SC_NSIZES, false,
growing_retained);
*trail = extent_split_impl(tsdn, shard, &shard->edata_cache,
ehooks, *edata, size, szind, slab, trailsize, SC_NSIZES,
false, growing_retained);
if (*trail == NULL) {
*to_leak = *edata;
*to_salvage = *lead;
@ -504,7 +508,7 @@ extent_split_interior(tsdn_t *tsdn, pa_shard_t *shard, ehooks_t *ehooks,
if (leadsize == 0 && trailsize == 0) {
edata_szind_set(*edata, szind);
emap_remap(tsdn, &emap_global, *edata, szind, slab);
emap_remap(tsdn, shard->emap, *edata, szind, slab);
}
return extent_split_interior_ok;
@ -555,14 +559,14 @@ extent_recycle_split(tsdn_t *tsdn, pa_shard_t *shard, ehooks_t *ehooks,
*/
assert(result == extent_split_interior_error);
if (to_salvage != NULL) {
extent_deregister(tsdn, to_salvage);
extent_deregister(tsdn, shard, to_salvage);
}
if (to_leak != NULL) {
void *leak = edata_base_get(to_leak);
extent_deregister_no_gdump_sub(tsdn, to_leak);
extent_deregister_no_gdump_sub(tsdn, shard, to_leak);
extents_abandon_vm(tsdn, shard, ehooks, ecache, to_leak,
growing_retained);
assert(emap_lock_edata_from_addr(tsdn, &emap_global,
assert(emap_lock_edata_from_addr(tsdn, shard->emap,
leak, false) == NULL);
}
return NULL;
@ -614,7 +618,7 @@ extent_recycle(tsdn_t *tsdn, pa_shard_t *shard, ehooks_t *ehooks,
assert(edata_state_get(edata) == extent_state_active);
if (slab) {
edata_slab_set(edata, slab);
emap_register_interior(tsdn, &emap_global, edata, szind);
emap_register_interior(tsdn, shard->emap, edata, szind);
}
if (*zero) {
@ -681,7 +685,7 @@ extent_grow_retained(tsdn_t *tsdn, pa_shard_t *shard, ehooks_t *ehooks,
extent_state_active, zeroed, committed, /* ranged */ false,
EXTENT_IS_HEAD);
if (extent_register_no_gdump_add(tsdn, edata)) {
if (extent_register_no_gdump_add(tsdn, shard, edata)) {
edata_cache_put(tsdn, &shard->edata_cache, edata);
goto label_err;
}
@ -725,7 +729,7 @@ extent_grow_retained(tsdn_t *tsdn, pa_shard_t *shard, ehooks_t *ehooks,
&shard->ecache_retained, to_salvage, true);
}
if (to_leak != NULL) {
extent_deregister_no_gdump_sub(tsdn, to_leak);
extent_deregister_no_gdump_sub(tsdn, shard, to_leak);
extents_abandon_vm(tsdn, shard, ehooks,
&shard->ecache_retained, to_leak, true);
}
@ -769,7 +773,7 @@ extent_grow_retained(tsdn_t *tsdn, pa_shard_t *shard, ehooks_t *ehooks,
}
if (slab) {
edata_slab_set(edata, true);
emap_register_interior(tsdn, &emap_global, edata, szind);
emap_register_interior(tsdn, shard->emap, edata, szind);
}
if (*zero && !edata_zeroed_get(edata)) {
void *addr = edata_base_get(edata);
@ -834,7 +838,7 @@ extent_alloc_wrapper(tsdn_t *tsdn, pa_shard_t *shard, ehooks_t *ehooks,
size, slab, szind, pa_shard_extent_sn_next(shard),
extent_state_active, *zero, *commit, /* ranged */ false,
EXTENT_NOT_HEAD);
if (extent_register(tsdn, edata)) {
if (extent_register(tsdn, shard, edata)) {
edata_cache_put(tsdn, &shard->edata_cache, edata);
return NULL;
}
@ -864,15 +868,15 @@ extent_can_coalesce(ecache_t *ecache, const edata_t *inner,
}
static bool
extent_coalesce(tsdn_t *tsdn, edata_cache_t *edata_cache, ehooks_t *ehooks,
ecache_t *ecache, edata_t *inner, edata_t *outer, bool forward,
bool growing_retained) {
extent_coalesce(tsdn_t *tsdn, pa_shard_t *shard, edata_cache_t *edata_cache,
ehooks_t *ehooks, ecache_t *ecache, edata_t *inner, edata_t *outer,
bool forward, bool growing_retained) {
assert(extent_can_coalesce(ecache, inner, outer));
extent_activate_locked(tsdn, ecache, outer);
malloc_mutex_unlock(tsdn, &ecache->mtx);
bool err = extent_merge_impl(tsdn, ehooks, edata_cache,
bool err = extent_merge_impl(tsdn, shard, ehooks, edata_cache,
forward ? inner : outer, forward ? outer : inner, growing_retained);
malloc_mutex_lock(tsdn, &ecache->mtx);
@ -884,9 +888,10 @@ extent_coalesce(tsdn_t *tsdn, edata_cache_t *edata_cache, ehooks_t *ehooks,
}
static edata_t *
extent_try_coalesce_impl(tsdn_t *tsdn, edata_cache_t *edata_cache,
ehooks_t *ehooks, ecache_t *ecache, edata_t *edata, bool *coalesced,
bool growing_retained, bool inactive_only) {
extent_try_coalesce_impl(tsdn_t *tsdn, pa_shard_t *shard,
edata_cache_t *edata_cache, ehooks_t *ehooks, ecache_t *ecache,
edata_t *edata, bool *coalesced, bool growing_retained,
bool inactive_only) {
/*
* We avoid checking / locking inactive neighbors for large size
* classes, since they are eagerly coalesced on deallocation which can
@ -901,7 +906,7 @@ extent_try_coalesce_impl(tsdn_t *tsdn, edata_cache_t *edata_cache,
again = false;
/* Try to coalesce forward. */
edata_t *next = emap_lock_edata_from_addr(tsdn, &emap_global,
edata_t *next = emap_lock_edata_from_addr(tsdn, shard->emap,
edata_past_get(edata), inactive_only);
if (next != NULL) {
/*
@ -912,10 +917,10 @@ extent_try_coalesce_impl(tsdn_t *tsdn, edata_cache_t *edata_cache,
bool can_coalesce = extent_can_coalesce(ecache,
edata, next);
emap_unlock_edata(tsdn, &emap_global, next);
emap_unlock_edata(tsdn, shard->emap, next);
if (can_coalesce && !extent_coalesce(tsdn, edata_cache,
ehooks, ecache, edata, next, true,
if (can_coalesce && !extent_coalesce(tsdn, shard,
edata_cache, ehooks, ecache, edata, next, true,
growing_retained)) {
if (ecache->delay_coalesce) {
/* Do minimal coalescing. */
@ -927,15 +932,15 @@ extent_try_coalesce_impl(tsdn_t *tsdn, edata_cache_t *edata_cache,
}
/* Try to coalesce backward. */
edata_t *prev = emap_lock_edata_from_addr(tsdn, &emap_global,
edata_t *prev = emap_lock_edata_from_addr(tsdn, shard->emap,
edata_before_get(edata), inactive_only);
if (prev != NULL) {
bool can_coalesce = extent_can_coalesce(ecache, edata,
prev);
emap_unlock_edata(tsdn, &emap_global, prev);
emap_unlock_edata(tsdn, shard->emap, prev);
if (can_coalesce && !extent_coalesce(tsdn, edata_cache,
ehooks, ecache, edata, prev, false,
if (can_coalesce && !extent_coalesce(tsdn, shard,
edata_cache, ehooks, ecache, edata, prev, false,
growing_retained)) {
edata = prev;
if (ecache->delay_coalesce) {
@ -955,18 +960,19 @@ extent_try_coalesce_impl(tsdn_t *tsdn, edata_cache_t *edata_cache,
}
static edata_t *
extent_try_coalesce(tsdn_t *tsdn, edata_cache_t *edata_cache, ehooks_t *ehooks,
ecache_t *ecache, edata_t *edata, bool *coalesced, bool growing_retained) {
return extent_try_coalesce_impl(tsdn, edata_cache, ehooks, ecache,
edata, coalesced, growing_retained, false);
extent_try_coalesce(tsdn_t *tsdn, pa_shard_t *shard, edata_cache_t *edata_cache,
ehooks_t *ehooks, ecache_t *ecache, edata_t *edata, bool *coalesced,
bool growing_retained) {
return extent_try_coalesce_impl(tsdn, shard, edata_cache, ehooks,
ecache, edata, coalesced, growing_retained, false);
}
static edata_t *
extent_try_coalesce_large(tsdn_t *tsdn, edata_cache_t *edata_cache,
ehooks_t *ehooks, ecache_t *ecache, edata_t *edata, bool *coalesced,
bool growing_retained) {
return extent_try_coalesce_impl(tsdn, edata_cache, ehooks, ecache,
edata, coalesced, growing_retained, true);
extent_try_coalesce_large(tsdn_t *tsdn, pa_shard_t *shard,
edata_cache_t *edata_cache, ehooks_t *ehooks, ecache_t *ecache,
edata_t *edata, bool *coalesced, bool growing_retained) {
return extent_try_coalesce_impl(tsdn, shard, edata_cache, ehooks,
ecache, edata, coalesced, growing_retained, true);
}
/* Purge a single extent to retained / unmapped directly. */
@ -1007,22 +1013,22 @@ extent_record(tsdn_t *tsdn, pa_shard_t *shard, ehooks_t *ehooks,
edata_szind_set(edata, SC_NSIZES);
if (edata_slab_get(edata)) {
emap_deregister_interior(tsdn, &emap_global, edata);
emap_deregister_interior(tsdn, shard->emap, edata);
edata_slab_set(edata, false);
}
emap_assert_mapped(tsdn, &emap_global, edata);
emap_assert_mapped(tsdn, shard->emap, edata);
if (!ecache->delay_coalesce) {
edata = extent_try_coalesce(tsdn, &shard->edata_cache, ehooks,
ecache, edata, NULL, growing_retained);
edata = extent_try_coalesce(tsdn, shard, &shard->edata_cache,
ehooks, ecache, edata, NULL, growing_retained);
} else if (edata_size_get(edata) >= SC_LARGE_MINCLASS) {
assert(ecache == &shard->ecache_dirty);
/* Always coalesce large extents eagerly. */
bool coalesced;
do {
assert(edata_state_get(edata) == extent_state_active);
edata = extent_try_coalesce_large(tsdn,
edata = extent_try_coalesce_large(tsdn, shard,
&shard->edata_cache, ehooks, ecache, edata,
&coalesced, growing_retained);
} while (coalesced);
@ -1045,7 +1051,7 @@ extent_dalloc_gap(tsdn_t *tsdn, pa_shard_t *shard, ehooks_t *ehooks,
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
WITNESS_RANK_CORE, 0);
if (extent_register(tsdn, edata)) {
if (extent_register(tsdn, shard, edata)) {
edata_cache_put(tsdn, &shard->edata_cache, edata);
return;
}
@ -1088,11 +1094,11 @@ extent_dalloc_wrapper(tsdn_t *tsdn, pa_shard_t *shard, ehooks_t *ehooks,
* Deregister first to avoid a race with other allocating
* threads, and reregister if deallocation fails.
*/
extent_deregister(tsdn, edata);
extent_deregister(tsdn, shard, edata);
if (!extent_dalloc_wrapper_try(tsdn, shard, ehooks, edata)) {
return;
}
extent_reregister(tsdn, edata);
extent_reregister(tsdn, shard, edata);
}
/* Try to decommit; purge if that fails. */
@ -1131,7 +1137,7 @@ extent_destroy_wrapper(tsdn_t *tsdn, pa_shard_t *shard, ehooks_t *ehooks,
WITNESS_RANK_CORE, 0);
/* Deregister first to avoid a race with other allocating threads. */
extent_deregister(tsdn, edata);
extent_deregister(tsdn, shard, edata);
edata_addr_set(edata, edata_base_get(edata));
@ -1213,9 +1219,10 @@ extent_purge_forced_wrapper(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata,
* and returns the trail (except in case of error).
*/
static edata_t *
extent_split_impl(tsdn_t *tsdn, edata_cache_t *edata_cache, ehooks_t *ehooks,
edata_t *edata, size_t size_a, szind_t szind_a, bool slab_a,
size_t size_b, szind_t szind_b, bool slab_b, bool growing_retained) {
extent_split_impl(tsdn_t *tsdn, pa_shard_t *shard, edata_cache_t *edata_cache,
ehooks_t *ehooks, edata_t *edata, size_t size_a, szind_t szind_a,
bool slab_a, size_t size_b, szind_t szind_b, bool slab_b,
bool growing_retained) {
assert(edata_size_get(edata) == size_a + size_b);
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
WITNESS_RANK_CORE, growing_retained ? 1 : 0);
@ -1235,13 +1242,13 @@ extent_split_impl(tsdn_t *tsdn, edata_cache_t *edata_cache, ehooks_t *ehooks,
edata_zeroed_get(edata), edata_committed_get(edata),
edata_ranged_get(edata), EXTENT_NOT_HEAD);
emap_prepare_t prepare;
bool err = emap_split_prepare(tsdn, &emap_global, &prepare, edata,
bool err = emap_split_prepare(tsdn, shard->emap, &prepare, edata,
size_a, szind_a, slab_a, trail, size_b, szind_b, slab_b);
if (err) {
goto label_error_b;
}
emap_lock_edata2(tsdn, &emap_global, edata, trail);
emap_lock_edata2(tsdn, shard->emap, edata, trail);
err = ehooks_split(tsdn, ehooks, edata_base_get(edata), size_a + size_b,
size_a, size_b, edata_committed_get(edata));
@ -1252,14 +1259,14 @@ extent_split_impl(tsdn_t *tsdn, edata_cache_t *edata_cache, ehooks_t *ehooks,
edata_size_set(edata, size_a);
edata_szind_set(edata, szind_a);
emap_split_commit(tsdn, &emap_global, &prepare, edata, size_a, szind_a,
emap_split_commit(tsdn, shard->emap, &prepare, edata, size_a, szind_a,
slab_a, trail, size_b, szind_b, slab_b);
emap_unlock_edata2(tsdn, &emap_global, edata, trail);
emap_unlock_edata2(tsdn, shard->emap, edata, trail);
return trail;
label_error_c:
emap_unlock_edata2(tsdn, &emap_global, edata, trail);
emap_unlock_edata2(tsdn, shard->emap, edata, trail);
label_error_b:
edata_cache_put(tsdn, edata_cache, trail);
label_error_a:
@ -1267,16 +1274,16 @@ label_error_a:
}
edata_t *
extent_split_wrapper(tsdn_t *tsdn, edata_cache_t *edata_cache, ehooks_t *ehooks,
edata_t *edata, size_t size_a, szind_t szind_a, bool slab_a,
size_t size_b, szind_t szind_b, bool slab_b) {
return extent_split_impl(tsdn, edata_cache, ehooks, edata, size_a,
szind_a, slab_a, size_b, szind_b, slab_b, false);
extent_split_wrapper(tsdn_t *tsdn, pa_shard_t *shard,
edata_cache_t *edata_cache, ehooks_t *ehooks, edata_t *edata, size_t size_a,
szind_t szind_a, bool slab_a, size_t size_b, szind_t szind_b, bool slab_b) {
return extent_split_impl(tsdn, shard, edata_cache, ehooks, edata,
size_a, szind_a, slab_a, size_b, szind_b, slab_b, false);
}
static bool
extent_merge_impl(tsdn_t *tsdn, ehooks_t *ehooks, edata_cache_t *edata_cache,
edata_t *a, edata_t *b, bool growing_retained) {
extent_merge_impl(tsdn_t *tsdn, pa_shard_t *shard, ehooks_t *ehooks,
edata_cache_t *edata_cache, edata_t *a, edata_t *b, bool growing_retained) {
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
WITNESS_RANK_CORE, growing_retained ? 1 : 0);
assert(edata_base_get(a) < edata_base_get(b));
@ -1298,9 +1305,9 @@ extent_merge_impl(tsdn_t *tsdn, ehooks_t *ehooks, edata_cache_t *edata_cache,
* than extent_{,de}register() to do things in the right order.
*/
emap_prepare_t prepare;
emap_merge_prepare(tsdn, &emap_global, &prepare, a, b);
emap_merge_prepare(tsdn, shard->emap, &prepare, a, b);
emap_lock_edata2(tsdn, &emap_global, a, b);
emap_lock_edata2(tsdn, shard->emap, a, b);
edata_size_set(a, edata_size_get(a) + edata_size_get(b));
edata_szind_set(a, SC_NSIZES);
@ -1308,8 +1315,8 @@ extent_merge_impl(tsdn_t *tsdn, ehooks_t *ehooks, edata_cache_t *edata_cache,
edata_sn_get(a) : edata_sn_get(b));
edata_zeroed_set(a, edata_zeroed_get(a) && edata_zeroed_get(b));
emap_merge_commit(tsdn, &emap_global, &prepare, a, b);
emap_unlock_edata2(tsdn, &emap_global, a, b);
emap_merge_commit(tsdn, shard->emap, &prepare, a, b);
emap_unlock_edata2(tsdn, shard->emap, a, b);
edata_cache_put(tsdn, edata_cache, b);
@ -1317,9 +1324,9 @@ extent_merge_impl(tsdn_t *tsdn, ehooks_t *ehooks, edata_cache_t *edata_cache,
}
bool
extent_merge_wrapper(tsdn_t *tsdn, ehooks_t *ehooks, edata_cache_t *edata_cache,
edata_t *a, edata_t *b) {
return extent_merge_impl(tsdn, ehooks, edata_cache, a, b, false);
extent_merge_wrapper(tsdn_t *tsdn, pa_shard_t *shard, ehooks_t *ehooks,
edata_cache_t *edata_cache, edata_t *a, edata_t *b) {
return extent_merge_impl(tsdn, shard, ehooks, edata_cache, a, b, false);
}
bool

View File

@ -6,7 +6,7 @@ inspect_extent_util_stats_get(tsdn_t *tsdn, const void *ptr, size_t *nfree,
size_t *nregs, size_t *size) {
assert(ptr != NULL && nfree != NULL && nregs != NULL && size != NULL);
const edata_t *edata = emap_edata_lookup(tsdn, &emap_global, ptr);
const edata_t *edata = emap_edata_lookup(tsdn, &arena_emap_global, ptr);
if (unlikely(edata == NULL)) {
*nfree = *nregs = *size = 0;
return;
@ -31,7 +31,7 @@ inspect_extent_util_stats_verbose_get(tsdn_t *tsdn, const void *ptr,
assert(ptr != NULL && nfree != NULL && nregs != NULL && size != NULL
&& bin_nfree != NULL && bin_nregs != NULL && slabcur_addr != NULL);
const edata_t *edata = emap_edata_lookup(tsdn, &emap_global, ptr);
const edata_t *edata = emap_edata_lookup(tsdn, &arena_emap_global, ptr);
if (unlikely(edata == NULL)) {
*nfree = *nregs = *size = *bin_nfree = *bin_nregs = 0;
*slabcur_addr = NULL;

View File

@ -1623,7 +1623,7 @@ malloc_init_hard_a0_locked() {
return true;
}
/* emap_global is static, hence zeroed. */
if (emap_init(&emap_global, b0get(), /* zeroed */ true)) {
if (emap_init(&arena_emap_global, b0get(), /* zeroed */ true)) {
return true;
}
if (extent_boot()) {
@ -2645,7 +2645,8 @@ ifree(tsd_t *tsd, void *ptr, tcache_t *tcache, bool slow_path) {
assert(malloc_initialized() || IS_INITIALIZER);
emap_alloc_ctx_t alloc_ctx;
emap_alloc_ctx_lookup(tsd_tsdn(tsd), &emap_global, ptr, &alloc_ctx);
emap_alloc_ctx_lookup(tsd_tsdn(tsd), &arena_emap_global, ptr,
&alloc_ctx);
assert(alloc_ctx.szind != SC_NSIZES);
size_t usize = sz_index2size(alloc_ctx.szind);
@ -2699,12 +2700,12 @@ isfree(tsd_t *tsd, void *ptr, size_t usize, tcache_t *tcache, bool slow_path) {
if (config_debug) {
emap_alloc_ctx_t dbg_ctx;
emap_alloc_ctx_lookup(tsd_tsdn(tsd),
&emap_global, ptr, &dbg_ctx);
&arena_emap_global, ptr, &dbg_ctx);
assert(dbg_ctx.szind == alloc_ctx.szind);
assert(dbg_ctx.slab == alloc_ctx.slab);
}
} else if (opt_prof) {
emap_alloc_ctx_lookup(tsd_tsdn(tsd), &emap_global,
emap_alloc_ctx_lookup(tsd_tsdn(tsd), &arena_emap_global,
ptr, &alloc_ctx);
if (config_opt_safety_checks) {
@ -2781,8 +2782,8 @@ bool free_fastpath(void *ptr, size_t size, bool size_hint) {
if (unlikely(tsd == NULL || !tsd_fast(tsd))) {
return false;
}
bool res = emap_alloc_ctx_try_lookup_fast(tsd, &emap_global,
ptr, &alloc_ctx);
bool res = emap_alloc_ctx_try_lookup_fast(tsd,
&arena_emap_global, ptr, &alloc_ctx);
/* Note: profiled objects will have alloc_ctx.slab set */
if (unlikely(!res || !alloc_ctx.slab)) {
@ -3238,7 +3239,8 @@ do_rallocx(void *ptr, size_t size, int flags, bool is_realloc) {
}
emap_alloc_ctx_t alloc_ctx;
emap_alloc_ctx_lookup(tsd_tsdn(tsd), &emap_global, ptr, &alloc_ctx);
emap_alloc_ctx_lookup(tsd_tsdn(tsd), &arena_emap_global, ptr,
&alloc_ctx);
assert(alloc_ctx.szind != SC_NSIZES);
old_usize = sz_index2size(alloc_ctx.szind);
assert(old_usize == isalloc(tsd_tsdn(tsd), ptr));
@ -3510,11 +3512,12 @@ je_xallocx(void *ptr, size_t size, size_t extra, int flags) {
* object associated with the ptr (though the content of the edata_t
* object can be changed).
*/
edata_t *old_edata = emap_edata_lookup(tsd_tsdn(tsd), &emap_global,
ptr);
edata_t *old_edata = emap_edata_lookup(tsd_tsdn(tsd),
&arena_emap_global, ptr);
emap_alloc_ctx_t alloc_ctx;
emap_alloc_ctx_lookup(tsd_tsdn(tsd), &emap_global, ptr, &alloc_ctx);
emap_alloc_ctx_lookup(tsd_tsdn(tsd), &arena_emap_global, ptr,
&alloc_ctx);
assert(alloc_ctx.szind != SC_NSIZES);
old_usize = sz_index2size(alloc_ctx.szind);
assert(old_usize == isalloc(tsd_tsdn(tsd), ptr));
@ -3547,7 +3550,7 @@ je_xallocx(void *ptr, size_t size, size_t extra, int flags) {
* xallocx() should keep using the same edata_t object (though its
* content can be changed).
*/
assert(emap_edata_lookup(tsd_tsdn(tsd), &emap_global, ptr)
assert(emap_edata_lookup(tsd_tsdn(tsd), &arena_emap_global, ptr)
== old_edata);
if (unlikely(usize == old_usize)) {

View File

@ -202,7 +202,7 @@ void *
large_ralloc(tsdn_t *tsdn, arena_t *arena, void *ptr, size_t usize,
size_t alignment, bool zero, tcache_t *tcache,
hook_ralloc_args_t *hook_args) {
edata_t *edata = emap_edata_lookup(tsdn, &emap_global, ptr);
edata_t *edata = emap_edata_lookup(tsdn, &arena_emap_global, ptr);
size_t oldusize = edata_usize_get(edata);
/* The following should have been caught by callers. */

View File

@ -13,9 +13,9 @@ pa_nactive_sub(pa_shard_t *shard, size_t sub_pages) {
}
bool
pa_shard_init(tsdn_t *tsdn, pa_shard_t *shard, base_t *base, unsigned ind,
pa_shard_stats_t *stats, malloc_mutex_t *stats_mtx, nstime_t *cur_time,
ssize_t dirty_decay_ms, ssize_t muzzy_decay_ms) {
pa_shard_init(tsdn_t *tsdn, pa_shard_t *shard, emap_t *emap, base_t *base,
unsigned ind, pa_shard_stats_t *stats, malloc_mutex_t *stats_mtx,
nstime_t *cur_time, ssize_t dirty_decay_ms, ssize_t muzzy_decay_ms) {
/* This will change eventually, but for now it should hold. */
assert(base_ind_get(base) == ind);
/*
@ -68,6 +68,7 @@ pa_shard_init(tsdn_t *tsdn, pa_shard_t *shard, base_t *base, unsigned ind,
shard->stats = stats;
memset(shard->stats, 0, sizeof(*shard->stats));
shard->emap = emap;
shard->base = base;
return false;
@ -175,8 +176,8 @@ pa_expand(tsdn_t *tsdn, pa_shard_t *shard, edata_t *edata, size_t old_size,
if (trail == NULL) {
return true;
}
if (extent_merge_wrapper(tsdn, ehooks, &shard->edata_cache, edata,
trail)) {
if (extent_merge_wrapper(tsdn, shard, ehooks, &shard->edata_cache,
edata, trail)) {
extent_dalloc_wrapper(tsdn, shard, ehooks, trail);
return true;
}
@ -186,7 +187,7 @@ pa_expand(tsdn_t *tsdn, pa_shard_t *shard, edata_t *edata, size_t old_size,
}
pa_nactive_add(shard, expand_amount >> LG_PAGE);
edata_szind_set(edata, szind);
emap_remap(tsdn, &emap_global, edata, szind, slab);
emap_remap(tsdn, shard->emap, edata, szind, slab);
return false;
}
@ -205,8 +206,8 @@ pa_shrink(tsdn_t *tsdn, pa_shard_t *shard, edata_t *edata, size_t old_size,
return true;
}
edata_t *trail = extent_split_wrapper(tsdn, &shard->edata_cache, ehooks,
edata, new_size, szind, slab, shrink_amount, SC_NSIZES,
edata_t *trail = extent_split_wrapper(tsdn, shard, &shard->edata_cache,
ehooks, edata, new_size, szind, slab, shrink_amount, SC_NSIZES,
false);
if (trail == NULL) {
return true;

View File

@ -229,7 +229,8 @@ prof_malloc_sample_object(tsd_t *tsd, const void *ptr, size_t size,
prof_fetch_sys_thread_name(tsd);
}
edata_t *edata = emap_edata_lookup(tsd_tsdn(tsd), &emap_global, ptr);
edata_t *edata = emap_edata_lookup(tsd_tsdn(tsd), &arena_emap_global,
ptr);
prof_info_set(tsd, edata, tctx);
malloc_mutex_lock(tsd_tsdn(tsd), tctx->tdata->lock);

View File

@ -125,7 +125,7 @@ tbin_edatas_lookup_size_check(tsd_t *tsd, cache_bin_ptr_array_t *arr,
size_t szind_sum = binind * nflush;
for (unsigned i = 0; i < nflush; i++) {
emap_full_alloc_ctx_t full_alloc_ctx;
emap_full_alloc_ctx_lookup(tsd_tsdn(tsd), &emap_global,
emap_full_alloc_ctx_lookup(tsd_tsdn(tsd), &arena_emap_global,
cache_bin_ptr_array_get(arr, i), &full_alloc_ctx);
edatas[i] = full_alloc_ctx.edata;
szind_sum -= full_alloc_ctx.szind;
@ -185,7 +185,8 @@ tcache_bin_flush_impl(tsd_t *tsd, tcache_t *tcache, cache_bin_t *tbin,
} else {
for (unsigned i = 0 ; i < nflush; i++) {
item_edata[i] = emap_edata_lookup(tsd_tsdn(tsd),
&emap_global, cache_bin_ptr_array_get(&ptrs, i));
&arena_emap_global,
cache_bin_ptr_array_get(&ptrs, i));
}
}

View File

@ -61,8 +61,8 @@ get_large_size(size_t ind) {
static size_t
vsalloc(tsdn_t *tsdn, const void *ptr) {
emap_full_alloc_ctx_t full_alloc_ctx;
bool missing = emap_full_alloc_ctx_try_lookup(tsdn, &emap_global, ptr,
&full_alloc_ctx);
bool missing = emap_full_alloc_ctx_try_lookup(tsdn, &arena_emap_global,
ptr, &full_alloc_ctx);
if (missing) {
return 0;
}

View File

@ -62,12 +62,12 @@ thd_start(void *varg) {
ptr = mallocx(1, MALLOCX_TCACHE_NONE);
ptr2 = mallocx(129, MALLOCX_TCACHE_NONE);
edata = emap_edata_lookup(tsdn, &emap_global, ptr);
edata = emap_edata_lookup(tsdn, &arena_emap_global, ptr);
shard1 = edata_binshard_get(edata);
dallocx(ptr, 0);
expect_u_lt(shard1, 16, "Unexpected bin shard used");
edata = emap_edata_lookup(tsdn, &emap_global, ptr2);
edata = emap_edata_lookup(tsdn, &arena_emap_global, ptr2);
shard2 = edata_binshard_get(edata);
dallocx(ptr2, 0);
expect_u_lt(shard2, 4, "Unexpected bin shard used");

View File

@ -103,7 +103,7 @@ TEST_END
static void
confirm_malloc(void *p) {
assert_ptr_not_null(p, "malloc failed unexpectedly");
edata_t *e = emap_edata_lookup(TSDN_NULL, &emap_global, p);
edata_t *e = emap_edata_lookup(TSDN_NULL, &arena_emap_global, p);
assert_ptr_not_null(e, "NULL edata for living pointer");
prof_recent_t *n = edata_prof_recent_alloc_get_no_lock(e);
assert_ptr_not_null(n, "Record in edata should not be NULL");