PA: move in ecache_grow.

This commit is contained in:
David Goldblatt 2020-03-08 13:47:02 -07:00 committed by David Goldblatt
parent 32cb7c2f0b
commit acd0bf6a26
6 changed files with 32 additions and 29 deletions

View File

@ -163,9 +163,6 @@ struct arena_s {
arena_decay_t decay_dirty; /* dirty --> muzzy */ arena_decay_t decay_dirty; /* dirty --> muzzy */
arena_decay_t decay_muzzy; /* muzzy --> retained */ arena_decay_t decay_muzzy; /* muzzy --> retained */
/* The grow info for the retained ecache. */
ecache_grow_t ecache_grow;
/* /*
* bins is used to store heaps of free regions. * bins is used to store heaps of free regions.
* *

View File

@ -30,6 +30,9 @@ struct pa_shard_s {
/* The source of edata_t objects. */ /* The source of edata_t objects. */
edata_cache_t edata_cache; edata_cache_t edata_cache;
/* The grow info for the retained ecache. */
ecache_grow_t ecache_grow;
pa_shard_stats_t *stats; pa_shard_stats_t *stats;
}; };

View File

@ -1935,14 +1935,14 @@ arena_retain_grow_limit_get_set(tsd_t *tsd, arena_t *arena, size_t *old_limit,
} }
} }
malloc_mutex_lock(tsd_tsdn(tsd), &arena->ecache_grow.mtx); malloc_mutex_lock(tsd_tsdn(tsd), &arena->pa_shard.ecache_grow.mtx);
if (old_limit != NULL) { if (old_limit != NULL) {
*old_limit = sz_pind2sz(arena->ecache_grow.limit); *old_limit = sz_pind2sz(arena->pa_shard.ecache_grow.limit);
} }
if (new_limit != NULL) { if (new_limit != NULL) {
arena->ecache_grow.limit = new_ind; arena->pa_shard.ecache_grow.limit = new_ind;
} }
malloc_mutex_unlock(tsd_tsdn(tsd), &arena->ecache_grow.mtx); malloc_mutex_unlock(tsd_tsdn(tsd), &arena->pa_shard.ecache_grow.mtx);
return false; return false;
} }
@ -2042,10 +2042,6 @@ arena_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) {
goto label_error; goto label_error;
} }
if (ecache_grow_init(tsdn, &arena->ecache_grow)) {
goto label_error;
}
/* Initialize bins. */ /* Initialize bins. */
uintptr_t bin_addr = (uintptr_t)arena + sizeof(arena_t); uintptr_t bin_addr = (uintptr_t)arena + sizeof(arena_t);
atomic_store_u(&arena->binshard_next, 0, ATOMIC_RELEASE); atomic_store_u(&arena->binshard_next, 0, ATOMIC_RELEASE);
@ -2176,7 +2172,7 @@ arena_prefork1(tsdn_t *tsdn, arena_t *arena) {
void void
arena_prefork2(tsdn_t *tsdn, arena_t *arena) { arena_prefork2(tsdn_t *tsdn, arena_t *arena) {
ecache_grow_prefork(tsdn, &arena->ecache_grow); ecache_grow_prefork(tsdn, &arena->pa_shard.ecache_grow);
} }
void void
@ -2226,7 +2222,7 @@ arena_postfork_parent(tsdn_t *tsdn, arena_t *arena) {
ecache_postfork_parent(tsdn, &arena->pa_shard.ecache_dirty); ecache_postfork_parent(tsdn, &arena->pa_shard.ecache_dirty);
ecache_postfork_parent(tsdn, &arena->pa_shard.ecache_muzzy); ecache_postfork_parent(tsdn, &arena->pa_shard.ecache_muzzy);
ecache_postfork_parent(tsdn, &arena->pa_shard.ecache_retained); ecache_postfork_parent(tsdn, &arena->pa_shard.ecache_retained);
ecache_grow_postfork_parent(tsdn, &arena->ecache_grow); ecache_grow_postfork_parent(tsdn, &arena->pa_shard.ecache_grow);
malloc_mutex_postfork_parent(tsdn, &arena->decay_dirty.mtx); malloc_mutex_postfork_parent(tsdn, &arena->decay_dirty.mtx);
malloc_mutex_postfork_parent(tsdn, &arena->decay_muzzy.mtx); malloc_mutex_postfork_parent(tsdn, &arena->decay_muzzy.mtx);
if (config_stats) { if (config_stats) {
@ -2272,7 +2268,7 @@ arena_postfork_child(tsdn_t *tsdn, arena_t *arena) {
ecache_postfork_child(tsdn, &arena->pa_shard.ecache_dirty); ecache_postfork_child(tsdn, &arena->pa_shard.ecache_dirty);
ecache_postfork_child(tsdn, &arena->pa_shard.ecache_muzzy); ecache_postfork_child(tsdn, &arena->pa_shard.ecache_muzzy);
ecache_postfork_child(tsdn, &arena->pa_shard.ecache_retained); ecache_postfork_child(tsdn, &arena->pa_shard.ecache_retained);
ecache_grow_postfork_child(tsdn, &arena->ecache_grow); ecache_grow_postfork_child(tsdn, &arena->pa_shard.ecache_grow);
malloc_mutex_postfork_child(tsdn, &arena->decay_dirty.mtx); malloc_mutex_postfork_child(tsdn, &arena->decay_dirty.mtx);
malloc_mutex_postfork_child(tsdn, &arena->decay_muzzy.mtx); malloc_mutex_postfork_child(tsdn, &arena->decay_muzzy.mtx);
if (config_stats) { if (config_stats) {

View File

@ -619,7 +619,7 @@ static edata_t *
extent_grow_retained(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks, extent_grow_retained(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
size_t size, size_t alignment, bool slab, szind_t szind, size_t size, size_t alignment, bool slab, szind_t szind,
bool *zero, bool *commit) { bool *zero, bool *commit) {
malloc_mutex_assert_owner(tsdn, &arena->ecache_grow.mtx); malloc_mutex_assert_owner(tsdn, &arena->pa_shard.ecache_grow.mtx);
assert(!*zero || !slab); assert(!*zero || !slab);
size_t alloc_size_min = size + PAGE_CEILING(alignment) - PAGE; size_t alloc_size_min = size + PAGE_CEILING(alignment) - PAGE;
@ -632,15 +632,17 @@ extent_grow_retained(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
* satisfy this request. * satisfy this request.
*/ */
pszind_t egn_skip = 0; pszind_t egn_skip = 0;
size_t alloc_size = sz_pind2sz(arena->ecache_grow.next + egn_skip); size_t alloc_size = sz_pind2sz(
arena->pa_shard.ecache_grow.next + egn_skip);
while (alloc_size < alloc_size_min) { while (alloc_size < alloc_size_min) {
egn_skip++; egn_skip++;
if (arena->ecache_grow.next + egn_skip >= if (arena->pa_shard.ecache_grow.next + egn_skip >=
sz_psz2ind(SC_LARGE_MAXCLASS)) { sz_psz2ind(SC_LARGE_MAXCLASS)) {
/* Outside legal range. */ /* Outside legal range. */
goto label_err; goto label_err;
} }
alloc_size = sz_pind2sz(arena->ecache_grow.next + egn_skip); alloc_size = sz_pind2sz(
arena->pa_shard.ecache_grow.next + egn_skip);
} }
edata_t *edata = edata_cache_get(tsdn, &arena->pa_shard.edata_cache); edata_t *edata = edata_cache_get(tsdn, &arena->pa_shard.edata_cache);
@ -735,14 +737,15 @@ extent_grow_retained(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
* Increment extent_grow_next if doing so wouldn't exceed the allowed * Increment extent_grow_next if doing so wouldn't exceed the allowed
* range. * range.
*/ */
if (arena->ecache_grow.next + egn_skip + 1 <= if (arena->pa_shard.ecache_grow.next + egn_skip + 1 <=
arena->ecache_grow.limit) { arena->pa_shard.ecache_grow.limit) {
arena->ecache_grow.next += egn_skip + 1; arena->pa_shard.ecache_grow.next += egn_skip + 1;
} else { } else {
arena->ecache_grow.next = arena->ecache_grow.limit; arena->pa_shard.ecache_grow.next
= arena->pa_shard.ecache_grow.limit;
} }
/* All opportunities for failure are past. */ /* All opportunities for failure are past. */
malloc_mutex_unlock(tsdn, &arena->ecache_grow.mtx); malloc_mutex_unlock(tsdn, &arena->pa_shard.ecache_grow.mtx);
if (config_prof) { if (config_prof) {
/* Adjust gdump stats now that extent is final size. */ /* Adjust gdump stats now that extent is final size. */
@ -760,7 +763,7 @@ extent_grow_retained(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
return edata; return edata;
label_err: label_err:
malloc_mutex_unlock(tsdn, &arena->ecache_grow.mtx); malloc_mutex_unlock(tsdn, &arena->pa_shard.ecache_grow.mtx);
return NULL; return NULL;
} }
@ -771,13 +774,13 @@ extent_alloc_retained(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
assert(size != 0); assert(size != 0);
assert(alignment != 0); assert(alignment != 0);
malloc_mutex_lock(tsdn, &arena->ecache_grow.mtx); malloc_mutex_lock(tsdn, &arena->pa_shard.ecache_grow.mtx);
edata_t *edata = extent_recycle(tsdn, arena, ehooks, edata_t *edata = extent_recycle(tsdn, arena, ehooks,
&arena->pa_shard.ecache_retained, new_addr, size, alignment, slab, &arena->pa_shard.ecache_retained, new_addr, size, alignment, slab,
szind, zero, commit, true); szind, zero, commit, true);
if (edata != NULL) { if (edata != NULL) {
malloc_mutex_unlock(tsdn, &arena->ecache_grow.mtx); malloc_mutex_unlock(tsdn, &arena->pa_shard.ecache_grow.mtx);
if (config_prof) { if (config_prof) {
extent_gdump_add(tsdn, edata); extent_gdump_add(tsdn, edata);
} }
@ -786,9 +789,9 @@ extent_alloc_retained(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
alignment, slab, szind, zero, commit); alignment, slab, szind, zero, commit);
/* extent_grow_retained() always releases extent_grow_mtx. */ /* extent_grow_retained() always releases extent_grow_mtx. */
} else { } else {
malloc_mutex_unlock(tsdn, &arena->ecache_grow.mtx); malloc_mutex_unlock(tsdn, &arena->pa_shard.ecache_grow.mtx);
} }
malloc_mutex_assert_not_owner(tsdn, &arena->ecache_grow.mtx); malloc_mutex_assert_not_owner(tsdn, &arena->pa_shard.ecache_grow.mtx);
return edata; return edata;
} }

View File

@ -38,6 +38,10 @@ pa_shard_init(tsdn_t *tsdn, pa_shard_t *shard, base_t *base, unsigned ind,
return true; return true;
} }
if (ecache_grow_init(tsdn, &shard->ecache_grow)) {
return true;
}
shard->stats = stats; shard->stats = stats;
memset(shard->stats, 0, sizeof(*shard->stats)); memset(shard->stats, 0, sizeof(*shard->stats));

View File

@ -142,7 +142,7 @@ TEST_BEGIN(test_retained) {
size_t usable = 0; size_t usable = 0;
size_t fragmented = 0; size_t fragmented = 0;
for (pszind_t pind = sz_psz2ind(HUGEPAGE); pind < for (pszind_t pind = sz_psz2ind(HUGEPAGE); pind <
arena->ecache_grow.next; pind++) { arena->pa_shard.ecache_grow.next; pind++) {
size_t psz = sz_pind2sz(pind); size_t psz = sz_pind2sz(pind);
size_t psz_fragmented = psz % esz; size_t psz_fragmented = psz % esz;
size_t psz_usable = psz - psz_fragmented; size_t psz_usable = psz - psz_fragmented;