PA: Move in the arena edata_cache.

This commit is contained in:
David Goldblatt 2020-03-08 11:41:19 -07:00 committed by David Goldblatt
parent 8433ad84ea
commit 688fb3eb89
8 changed files with 48 additions and 45 deletions

View File

@ -166,9 +166,6 @@ struct arena_s {
/* The grow info for the retained ecache. */
ecache_grow_t ecache_grow;
/* The source of edata_t objects. */
edata_cache_t edata_cache;
/*
* bins is used to store heaps of free regions.
*

View File

@ -17,9 +17,12 @@ struct pa_shard_s {
ecache_t ecache_dirty;
ecache_t ecache_muzzy;
ecache_t ecache_retained;
/* The source of edata_t objects. */
edata_cache_t edata_cache;
};
/* Returns true on error. */
bool pa_shard_init(tsdn_t *tsdn, pa_shard_t *shard, unsigned ind);
bool pa_shard_init(tsdn_t *tsdn, pa_shard_t *shard, base_t *base, unsigned ind);
#endif /* JEMALLOC_INTERNAL_PA_H */

View File

@ -101,7 +101,7 @@ arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads,
ecache_npages_get(&arena->pa_shard.ecache_retained) << LG_PAGE);
atomic_store_zu(&astats->edata_avail,
atomic_load_zu(&arena->edata_cache.count, ATOMIC_RELAXED),
atomic_load_zu(&arena->pa_shard.edata_cache.count, ATOMIC_RELAXED),
ATOMIC_RELAXED);
arena_stats_accum_u64(&astats->decay_dirty.npurge,
@ -228,7 +228,7 @@ arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads,
/* Gather per arena mutex profiling data. */
READ_ARENA_MUTEX_PROF_DATA(large_mtx, arena_prof_mutex_large);
READ_ARENA_MUTEX_PROF_DATA(edata_cache.mtx,
READ_ARENA_MUTEX_PROF_DATA(pa_shard.edata_cache.mtx,
arena_prof_mutex_extent_avail)
READ_ARENA_MUTEX_PROF_DATA(pa_shard.ecache_dirty.mtx,
arena_prof_mutex_extents_dirty)
@ -2027,7 +2027,7 @@ arena_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) {
goto label_error;
}
if (pa_shard_init(tsdn, &arena->pa_shard, ind)) {
if (pa_shard_init(tsdn, &arena->pa_shard, base, ind)) {
goto label_error;
}
@ -2044,10 +2044,6 @@ arena_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) {
goto label_error;
}
if (edata_cache_init(&arena->edata_cache, base)) {
goto label_error;
}
/* Initialize bins. */
uintptr_t bin_addr = (uintptr_t)arena + sizeof(arena_t);
atomic_store_u(&arena->binshard_next, 0, ATOMIC_RELEASE);
@ -2190,7 +2186,7 @@ arena_prefork3(tsdn_t *tsdn, arena_t *arena) {
void
arena_prefork4(tsdn_t *tsdn, arena_t *arena) {
edata_cache_prefork(tsdn, &arena->edata_cache);
edata_cache_prefork(tsdn, &arena->pa_shard.edata_cache);
}
void
@ -2224,7 +2220,7 @@ arena_postfork_parent(tsdn_t *tsdn, arena_t *arena) {
}
malloc_mutex_postfork_parent(tsdn, &arena->large_mtx);
base_postfork_parent(tsdn, arena->base);
edata_cache_postfork_parent(tsdn, &arena->edata_cache);
edata_cache_postfork_parent(tsdn, &arena->pa_shard.edata_cache);
ecache_postfork_parent(tsdn, &arena->pa_shard.ecache_dirty);
ecache_postfork_parent(tsdn, &arena->pa_shard.ecache_muzzy);
ecache_postfork_parent(tsdn, &arena->pa_shard.ecache_retained);
@ -2270,7 +2266,7 @@ arena_postfork_child(tsdn_t *tsdn, arena_t *arena) {
}
malloc_mutex_postfork_child(tsdn, &arena->large_mtx);
base_postfork_child(tsdn, arena->base);
edata_cache_postfork_child(tsdn, &arena->edata_cache);
edata_cache_postfork_child(tsdn, &arena->pa_shard.edata_cache);
ecache_postfork_child(tsdn, &arena->pa_shard.ecache_dirty);
ecache_postfork_child(tsdn, &arena->pa_shard.ecache_muzzy);
ecache_postfork_child(tsdn, &arena->pa_shard.ecache_retained);

View File

@ -3072,7 +3072,7 @@ stats_mutexes_reset_ctl(tsd_t *tsd, const size_t *mib,
continue;
}
MUTEX_PROF_RESET(arena->large_mtx);
MUTEX_PROF_RESET(arena->edata_cache.mtx);
MUTEX_PROF_RESET(arena->pa_shard.edata_cache.mtx);
MUTEX_PROF_RESET(arena->pa_shard.ecache_dirty.mtx);
MUTEX_PROF_RESET(arena->pa_shard.ecache_muzzy.mtx);
MUTEX_PROF_RESET(arena->pa_shard.ecache_retained.mtx);

View File

@ -157,8 +157,8 @@ ecache_evict(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks, ecache_t *ecache,
break;
}
/* Try to coalesce. */
if (extent_try_delayed_coalesce(tsdn, &arena->edata_cache,
ehooks, ecache, edata)) {
if (extent_try_delayed_coalesce(tsdn,
&arena->pa_shard.edata_cache, ehooks, ecache, edata)) {
break;
}
/*
@ -212,7 +212,7 @@ extents_abandon_vm(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
edata_size_get(edata), growing_retained);
}
}
edata_cache_put(tsdn, &arena->edata_cache, edata);
edata_cache_put(tsdn, &arena->pa_shard.edata_cache, edata);
}
static void
@ -462,9 +462,9 @@ extent_split_interior(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
/* Split the lead. */
if (leadsize != 0) {
*lead = *edata;
*edata = extent_split_impl(tsdn, &arena->edata_cache, ehooks,
*lead, leadsize, SC_NSIZES, false, size + trailsize, szind,
slab, growing_retained);
*edata = extent_split_impl(tsdn, &arena->pa_shard.edata_cache,
ehooks, *lead, leadsize, SC_NSIZES, false, size + trailsize,
szind, slab, growing_retained);
if (*edata == NULL) {
*to_leak = *lead;
*lead = NULL;
@ -474,9 +474,9 @@ extent_split_interior(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
/* Split the trail. */
if (trailsize != 0) {
*trail = extent_split_impl(tsdn, &arena->edata_cache, ehooks,
*edata, size, szind, slab, trailsize, SC_NSIZES, false,
growing_retained);
*trail = extent_split_impl(tsdn, &arena->pa_shard.edata_cache,
ehooks, *edata, size, szind, slab, trailsize, SC_NSIZES,
false, growing_retained);
if (*trail == NULL) {
*to_leak = *edata;
*to_salvage = *lead;
@ -643,7 +643,7 @@ extent_grow_retained(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
alloc_size = sz_pind2sz(arena->ecache_grow.next + egn_skip);
}
edata_t *edata = edata_cache_get(tsdn, &arena->edata_cache);
edata_t *edata = edata_cache_get(tsdn, &arena->pa_shard.edata_cache);
if (edata == NULL) {
goto label_err;
}
@ -654,7 +654,7 @@ extent_grow_retained(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
&committed);
if (ptr == NULL) {
edata_cache_put(tsdn, &arena->edata_cache, edata);
edata_cache_put(tsdn, &arena->pa_shard.edata_cache, edata);
goto label_err;
}
@ -663,7 +663,7 @@ extent_grow_retained(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
committed, true, EXTENT_IS_HEAD);
if (extent_register_no_gdump_add(tsdn, edata)) {
edata_cache_put(tsdn, &arena->edata_cache, edata);
edata_cache_put(tsdn, &arena->pa_shard.edata_cache, edata);
goto label_err;
}
@ -800,7 +800,7 @@ extent_alloc_wrapper(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
WITNESS_RANK_CORE, 0);
edata_t *edata = edata_cache_get(tsdn, &arena->edata_cache);
edata_t *edata = edata_cache_get(tsdn, &arena->pa_shard.edata_cache);
if (edata == NULL) {
return NULL;
}
@ -808,14 +808,14 @@ extent_alloc_wrapper(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
void *addr = ehooks_alloc(tsdn, ehooks, new_addr, size, palignment,
zero, commit);
if (addr == NULL) {
edata_cache_put(tsdn, &arena->edata_cache, edata);
edata_cache_put(tsdn, &arena->pa_shard.edata_cache, edata);
return NULL;
}
edata_init(edata, arena_ind_get(arena), addr, size, slab, szind,
arena_extent_sn_next(arena), extent_state_active, *zero, *commit,
true, EXTENT_NOT_HEAD);
if (extent_register(tsdn, edata)) {
edata_cache_put(tsdn, &arena->edata_cache, edata);
edata_cache_put(tsdn, &arena->pa_shard.edata_cache, edata);
return NULL;
}
@ -971,8 +971,8 @@ extent_record(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks, ecache_t *ecache,
emap_assert_mapped(tsdn, &emap_global, edata);
if (!ecache->delay_coalesce) {
edata = extent_try_coalesce(tsdn, &arena->edata_cache, ehooks,
ecache, edata, NULL, growing_retained);
edata = extent_try_coalesce(tsdn, &arena->pa_shard.edata_cache,
ehooks, ecache, edata, NULL, growing_retained);
} else if (edata_size_get(edata) >= SC_LARGE_MINCLASS) {
assert(ecache == &arena->pa_shard.ecache_dirty);
/* Always coalesce large extents eagerly. */
@ -980,7 +980,7 @@ extent_record(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks, ecache_t *ecache,
do {
assert(edata_state_get(edata) == extent_state_active);
edata = extent_try_coalesce_large(tsdn,
&arena->edata_cache, ehooks, ecache, edata,
&arena->pa_shard.edata_cache, ehooks, ecache, edata,
&coalesced, growing_retained);
} while (coalesced);
if (edata_size_get(edata) >= oversize_threshold &&
@ -1004,7 +1004,7 @@ extent_dalloc_gap(tsdn_t *tsdn, arena_t *arena, edata_t *edata) {
WITNESS_RANK_CORE, 0);
if (extent_register(tsdn, edata)) {
edata_cache_put(tsdn, &arena->edata_cache, edata);
edata_cache_put(tsdn, &arena->pa_shard.edata_cache, edata);
return;
}
extent_dalloc_wrapper(tsdn, arena, ehooks, edata);
@ -1027,7 +1027,7 @@ extent_dalloc_wrapper_try(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
edata_size_get(edata), edata_committed_get(edata));
if (!err) {
edata_cache_put(tsdn, &arena->edata_cache, edata);
edata_cache_put(tsdn, &arena->pa_shard.edata_cache, edata);
}
return err;
@ -1097,7 +1097,7 @@ extent_destroy_wrapper(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
ehooks_destroy(tsdn, ehooks, edata_base_get(edata),
edata_size_get(edata), edata_committed_get(edata));
edata_cache_put(tsdn, &arena->edata_cache, edata);
edata_cache_put(tsdn, &arena->pa_shard.edata_cache, edata);
}
static bool

View File

@ -123,7 +123,7 @@ extent_alloc_dss(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size,
return NULL;
}
gap = edata_cache_get(tsdn, &arena->edata_cache);
gap = edata_cache_get(tsdn, &arena->pa_shard.edata_cache);
if (gap == NULL) {
return NULL;
}
@ -189,7 +189,7 @@ extent_alloc_dss(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size,
extent_dalloc_gap(tsdn, arena, gap);
} else {
edata_cache_put(tsdn,
&arena->edata_cache, gap);
&arena->pa_shard.edata_cache, gap);
}
if (!*commit) {
*commit = pages_decommit(ret, size);
@ -225,7 +225,7 @@ extent_alloc_dss(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size,
}
label_oom:
extent_dss_extending_finish();
edata_cache_put(tsdn, &arena->edata_cache, gap);
edata_cache_put(tsdn, &arena->pa_shard.edata_cache, gap);
return NULL;
}

View File

@ -81,9 +81,10 @@ large_ralloc_no_move_shrink(tsdn_t *tsdn, edata_t *edata, size_t usize) {
/* Split excess pages. */
if (diff != 0) {
edata_t *trail = extent_split_wrapper(tsdn, &arena->edata_cache,
ehooks, edata, usize + sz_large_pad, sz_size2index(usize),
false, diff, SC_NSIZES, false);
edata_t *trail = extent_split_wrapper(tsdn,
&arena->pa_shard.edata_cache, ehooks, edata,
usize + sz_large_pad, sz_size2index(usize), false, diff,
SC_NSIZES, false);
if (trail == NULL) {
return true;
}
@ -140,8 +141,8 @@ large_ralloc_no_move_expand(tsdn_t *tsdn, edata_t *edata, size_t usize,
}
}
if (extent_merge_wrapper(tsdn, ehooks, &arena->edata_cache, edata,
trail)) {
if (extent_merge_wrapper(tsdn, ehooks, &arena->pa_shard.edata_cache,
edata, trail)) {
extent_dalloc_wrapper(tsdn, arena, ehooks, trail);
return true;
}

View File

@ -2,7 +2,9 @@
#include "jemalloc/internal/jemalloc_internal_includes.h"
bool
pa_shard_init(tsdn_t *tsdn, pa_shard_t *shard, unsigned ind) {
pa_shard_init(tsdn_t *tsdn, pa_shard_t *shard, base_t *base, unsigned ind) {
/* This will change eventually, but for now it should hold. */
assert(base_ind_get(base) == ind);
/*
* Delay coalescing for dirty extents despite the disruptive effect on
* memory layout for best-fit extent allocation, since cached extents
@ -31,5 +33,9 @@ pa_shard_init(tsdn_t *tsdn, pa_shard_t *shard, unsigned ind) {
ind, /* delay_coalesce */ false)) {
return true;
}
if (edata_cache_init(&shard->edata_cache, base)) {
return true;
}
return false;
}