PA: Parameterize emap. Move emap_global to arena.

This lets us test the PA module without interfering with the global emap used by
the real allocator (the one not under test).
This commit is contained in:
David Goldblatt
2020-03-14 10:49:34 -07:00
committed by David Goldblatt
parent f730577277
commit 294b276fc7
19 changed files with 211 additions and 184 deletions

View File

@@ -15,6 +15,7 @@ extern const char *percpu_arena_mode_names[];
extern const uint64_t h_steps[SMOOTHSTEP_NSTEPS];
extern malloc_mutex_t arenas_lock;
extern emap_t arena_emap_global;
extern size_t opt_oversize_threshold;
extern size_t oversize_threshold;

View File

@@ -48,10 +48,12 @@ arena_prof_info_get(tsd_t *tsd, const void *ptr, emap_alloc_ctx_t *alloc_ctx,
/* Static check. */
if (alloc_ctx == NULL) {
edata = emap_edata_lookup(tsd_tsdn(tsd), &emap_global, ptr);
edata = emap_edata_lookup(tsd_tsdn(tsd), &arena_emap_global,
ptr);
is_slab = edata_slab_get(edata);
} else if (unlikely(!(is_slab = alloc_ctx->slab))) {
edata = emap_edata_lookup(tsd_tsdn(tsd), &emap_global, ptr);
edata = emap_edata_lookup(tsd_tsdn(tsd), &arena_emap_global,
ptr);
}
if (unlikely(!is_slab)) {
@@ -75,15 +77,15 @@ arena_prof_tctx_reset(tsd_t *tsd, const void *ptr,
/* Static check. */
if (alloc_ctx == NULL) {
edata_t *edata = emap_edata_lookup(tsd_tsdn(tsd), &emap_global,
ptr);
edata_t *edata = emap_edata_lookup(tsd_tsdn(tsd),
&arena_emap_global, ptr);
if (unlikely(!edata_slab_get(edata))) {
large_prof_tctx_reset(edata);
}
} else {
if (unlikely(!alloc_ctx->slab)) {
edata_t *edata = emap_edata_lookup(tsd_tsdn(tsd),
&emap_global, ptr);
&arena_emap_global, ptr);
large_prof_tctx_reset(edata);
}
}
@@ -94,7 +96,8 @@ arena_prof_tctx_reset_sampled(tsd_t *tsd, const void *ptr) {
cassert(config_prof);
assert(ptr != NULL);
edata_t *edata = emap_edata_lookup(tsd_tsdn(tsd), &emap_global, ptr);
edata_t *edata = emap_edata_lookup(tsd_tsdn(tsd), &arena_emap_global,
ptr);
assert(!edata_slab_get(edata));
large_prof_tctx_reset(edata);
@@ -157,7 +160,7 @@ arena_malloc(tsdn_t *tsdn, arena_t *arena, size_t size, szind_t ind, bool zero,
JEMALLOC_ALWAYS_INLINE arena_t *
arena_aalloc(tsdn_t *tsdn, const void *ptr) {
edata_t *edata = emap_edata_lookup(tsdn, &emap_global, ptr);
edata_t *edata = emap_edata_lookup(tsdn, &arena_emap_global, ptr);
unsigned arena_ind = edata_arena_ind_get(edata);
return (arena_t *)atomic_load_p(&arenas[arena_ind], ATOMIC_RELAXED);
}
@@ -166,7 +169,7 @@ JEMALLOC_ALWAYS_INLINE size_t
arena_salloc(tsdn_t *tsdn, const void *ptr) {
assert(ptr != NULL);
emap_alloc_ctx_t alloc_ctx;
emap_alloc_ctx_lookup(tsdn, &emap_global, ptr, &alloc_ctx);
emap_alloc_ctx_lookup(tsdn, &arena_emap_global, ptr, &alloc_ctx);
assert(alloc_ctx.szind != SC_NSIZES);
return sz_index2size(alloc_ctx.szind);
@@ -184,8 +187,8 @@ arena_vsalloc(tsdn_t *tsdn, const void *ptr) {
*/
emap_full_alloc_ctx_t full_alloc_ctx;
bool missing = emap_full_alloc_ctx_try_lookup(tsdn, &emap_global, ptr,
&full_alloc_ctx);
bool missing = emap_full_alloc_ctx_try_lookup(tsdn, &arena_emap_global,
ptr, &full_alloc_ctx);
if (missing) {
return 0;
}
@@ -208,7 +211,8 @@ arena_dalloc_large_no_tcache(tsdn_t *tsdn, void *ptr, szind_t szind) {
if (config_prof && unlikely(szind < SC_NBINS)) {
arena_dalloc_promoted(tsdn, ptr, NULL, true);
} else {
edata_t *edata = emap_edata_lookup(tsdn, &emap_global, ptr);
edata_t *edata = emap_edata_lookup(tsdn, &arena_emap_global,
ptr);
large_dalloc(tsdn, edata);
}
}
@@ -218,10 +222,11 @@ arena_dalloc_no_tcache(tsdn_t *tsdn, void *ptr) {
assert(ptr != NULL);
emap_alloc_ctx_t alloc_ctx;
emap_alloc_ctx_lookup(tsdn, &emap_global, ptr, &alloc_ctx);
emap_alloc_ctx_lookup(tsdn, &arena_emap_global, ptr, &alloc_ctx);
if (config_debug) {
edata_t *edata = emap_edata_lookup(tsdn, &emap_global, ptr);
edata_t *edata = emap_edata_lookup(tsdn, &arena_emap_global,
ptr);
assert(alloc_ctx.szind == edata_szind_get(edata));
assert(alloc_ctx.szind < SC_NSIZES);
assert(alloc_ctx.slab == edata_slab_get(edata));
@@ -246,7 +251,8 @@ arena_dalloc_large(tsdn_t *tsdn, void *ptr, tcache_t *tcache, szind_t szind,
slow_path);
}
} else {
edata_t *edata = emap_edata_lookup(tsdn, &emap_global, ptr);
edata_t *edata = emap_edata_lookup(tsdn, &arena_emap_global,
ptr);
large_dalloc(tsdn, edata);
}
}
@@ -267,11 +273,13 @@ arena_dalloc(tsdn_t *tsdn, void *ptr, tcache_t *tcache,
alloc_ctx = *caller_alloc_ctx;
} else {
util_assume(!tsdn_null(tsdn));
emap_alloc_ctx_lookup(tsdn, &emap_global, ptr, &alloc_ctx);
emap_alloc_ctx_lookup(tsdn, &arena_emap_global, ptr,
&alloc_ctx);
}
if (config_debug) {
edata_t *edata = emap_edata_lookup(tsdn, &emap_global, ptr);
edata_t *edata = emap_edata_lookup(tsdn, &arena_emap_global,
ptr);
assert(alloc_ctx.szind == edata_szind_get(edata));
assert(alloc_ctx.szind < SC_NSIZES);
assert(alloc_ctx.slab == edata_slab_get(edata));
@@ -303,15 +311,16 @@ arena_sdalloc_no_tcache(tsdn_t *tsdn, void *ptr, size_t size) {
}
if ((config_prof && opt_prof) || config_debug) {
emap_alloc_ctx_lookup(tsdn, &emap_global, ptr, &alloc_ctx);
emap_alloc_ctx_lookup(tsdn, &arena_emap_global, ptr,
&alloc_ctx);
assert(alloc_ctx.szind == sz_size2index(size));
assert((config_prof && opt_prof)
|| alloc_ctx.slab == (alloc_ctx.szind < SC_NBINS));
if (config_debug) {
edata_t *edata = emap_edata_lookup(tsdn, &emap_global,
ptr);
edata_t *edata = emap_edata_lookup(tsdn,
&arena_emap_global, ptr);
assert(alloc_ctx.szind == edata_szind_get(edata));
assert(alloc_ctx.slab == edata_slab_get(edata));
}
@@ -341,7 +350,7 @@ arena_sdalloc(tsdn_t *tsdn, void *ptr, size_t size, tcache_t *tcache,
if (config_prof && opt_prof) {
if (caller_alloc_ctx == NULL) {
/* Uncommon case and should be a static check. */
emap_alloc_ctx_lookup(tsdn, &emap_global, ptr,
emap_alloc_ctx_lookup(tsdn, &arena_emap_global, ptr,
&alloc_ctx);
assert(alloc_ctx.szind == sz_size2index(size));
} else {
@@ -357,7 +366,8 @@ arena_sdalloc(tsdn_t *tsdn, void *ptr, size_t size, tcache_t *tcache,
}
if (config_debug) {
edata_t *edata = emap_edata_lookup(tsdn, &emap_global, ptr);
edata_t *edata = emap_edata_lookup(tsdn, &arena_emap_global,
ptr);
assert(alloc_ctx.szind == edata_szind_get(edata));
assert(alloc_ctx.slab == edata_slab_get(edata));
}

View File

@@ -26,8 +26,6 @@ struct emap_full_alloc_ctx_s {
edata_t *edata;
};
extern emap_t emap_global;
bool emap_init(emap_t *emap, base_t *base, bool zeroed);
/*

View File

@@ -47,10 +47,10 @@ bool extent_purge_lazy_wrapper(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata,
size_t offset, size_t length);
bool extent_purge_forced_wrapper(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata,
size_t offset, size_t length);
edata_t *extent_split_wrapper(tsdn_t *tsdn, edata_cache_t *edata_cache,
ehooks_t *ehooks, edata_t *edata, size_t size_a, szind_t szind_a,
bool slab_a, size_t size_b, szind_t szind_b, bool slab_b);
bool extent_merge_wrapper(tsdn_t *tsdn, ehooks_t *ehooks,
edata_t *extent_split_wrapper(tsdn_t *tsdn, pa_shard_t *shard,
edata_cache_t *edata_cache, ehooks_t *ehooks, edata_t *edata, size_t size_a,
szind_t szind_a, bool slab_a, size_t size_b, szind_t szind_b, bool slab_b);
bool extent_merge_wrapper(tsdn_t *tsdn, pa_shard_t *shard, ehooks_t *ehooks,
edata_cache_t *edata_cache, edata_t *a, edata_t *b);
bool extent_boot(void);

View File

@@ -5,6 +5,7 @@
#include "jemalloc/internal/decay.h"
#include "jemalloc/internal/ecache.h"
#include "jemalloc/internal/edata_cache.h"
#include "jemalloc/internal/emap.h"
#include "jemalloc/internal/lockedint.h"
enum pa_decay_purge_setting_e {
@@ -140,6 +141,9 @@ struct pa_shard_s {
decay_t decay_dirty; /* dirty --> muzzy */
decay_t decay_muzzy; /* muzzy --> retained */
/* The emap this shard is tied to. */
emap_t *emap;
/* The base from which we get the ehooks and allocate metadat. */
base_t *base;
};
@@ -171,9 +175,10 @@ pa_shard_ehooks_get(pa_shard_t *shard) {
}
/* Returns true on error. */
bool pa_shard_init(tsdn_t *tsdn, pa_shard_t *shard, base_t *base, unsigned ind,
pa_shard_stats_t *stats, malloc_mutex_t *stats_mtx, nstime_t *cur_time,
ssize_t dirty_decay_ms, ssize_t muzzy_decay_ms);
bool pa_shard_init(tsdn_t *tsdn, pa_shard_t *shard, emap_t *emap, base_t *base,
unsigned ind, pa_shard_stats_t *stats, malloc_mutex_t *stats_mtx,
nstime_t *cur_time, ssize_t dirty_decay_ms, ssize_t muzzy_decay_ms);
/*
* This does the PA-specific parts of arena reset (i.e. freeing all active
* allocations).