PAC: Move in ecache_grow.

This commit is contained in:
David Goldblatt 2020-06-01 16:01:53 -07:00 committed by David Goldblatt
parent 65803171a7
commit c81e389996
7 changed files with 53 additions and 42 deletions

View File

@ -126,8 +126,6 @@ struct pa_shard_s {
/* The source of edata_t objects. */
edata_cache_t edata_cache;
/* The grow info for the retained ecache. */
ecache_grow_t ecache_grow;
/* Extent serial number generator state. */
atomic_zu_t extent_sn_next;

View File

@ -23,9 +23,14 @@ struct pac_s {
emap_t *emap;
edata_cache_t *edata_cache;
/* The grow info for the retained ecache. */
ecache_grow_t ecache_grow;
};
bool pac_init(tsdn_t *tsdn, pac_t *pac, unsigned ind, emap_t *emap,
edata_cache_t *edata_cache);
bool pac_retain_grow_limit_get_set(tsdn_t *tsdn, pac_t *pac, size_t *old_limit,
size_t *new_limit);
#endif /* JEMALLOC_INTERNAL_PAC_H */

View File

@ -608,7 +608,7 @@ extent_recycle(tsdn_t *tsdn, pa_shard_t *shard, ehooks_t *ehooks,
static edata_t *
extent_grow_retained(tsdn_t *tsdn, pa_shard_t *shard, ehooks_t *ehooks,
size_t size, size_t alignment, bool zero, bool *commit) {
malloc_mutex_assert_owner(tsdn, &shard->ecache_grow.mtx);
malloc_mutex_assert_owner(tsdn, &shard->pac.ecache_grow.mtx);
size_t alloc_size_min = size + PAGE_CEILING(alignment) - PAGE;
/* Beware size_t wrap-around. */
@ -620,16 +620,16 @@ extent_grow_retained(tsdn_t *tsdn, pa_shard_t *shard, ehooks_t *ehooks,
* satisfy this request.
*/
pszind_t egn_skip = 0;
size_t alloc_size = sz_pind2sz(shard->ecache_grow.next + egn_skip);
size_t alloc_size = sz_pind2sz(shard->pac.ecache_grow.next + egn_skip);
while (alloc_size < alloc_size_min) {
egn_skip++;
if (shard->ecache_grow.next + egn_skip >=
if (shard->pac.ecache_grow.next + egn_skip >=
sz_psz2ind(SC_LARGE_MAXCLASS)) {
/* Outside legal range. */
goto label_err;
}
alloc_size = sz_pind2sz(
shard->ecache_grow.next + egn_skip);
shard->pac.ecache_grow.next + egn_skip);
}
edata_t *edata = edata_cache_get(tsdn, shard->pac.edata_cache);
@ -722,14 +722,14 @@ extent_grow_retained(tsdn_t *tsdn, pa_shard_t *shard, ehooks_t *ehooks,
* Increment extent_grow_next if doing so wouldn't exceed the allowed
* range.
*/
if (shard->ecache_grow.next + egn_skip + 1 <=
shard->ecache_grow.limit) {
shard->ecache_grow.next += egn_skip + 1;
if (shard->pac.ecache_grow.next + egn_skip + 1 <=
shard->pac.ecache_grow.limit) {
shard->pac.ecache_grow.next += egn_skip + 1;
} else {
shard->ecache_grow.next = shard->ecache_grow.limit;
shard->pac.ecache_grow.next = shard->pac.ecache_grow.limit;
}
/* All opportunities for failure are past. */
malloc_mutex_unlock(tsdn, &shard->ecache_grow.mtx);
malloc_mutex_unlock(tsdn, &shard->pac.ecache_grow.mtx);
if (config_prof) {
/* Adjust gdump stats now that extent is final size. */
@ -743,7 +743,7 @@ extent_grow_retained(tsdn_t *tsdn, pa_shard_t *shard, ehooks_t *ehooks,
return edata;
label_err:
malloc_mutex_unlock(tsdn, &shard->ecache_grow.mtx);
malloc_mutex_unlock(tsdn, &shard->pac.ecache_grow.mtx);
return NULL;
}
@ -753,13 +753,13 @@ extent_alloc_retained(tsdn_t *tsdn, pa_shard_t *shard, ehooks_t *ehooks,
assert(size != 0);
assert(alignment != 0);
malloc_mutex_lock(tsdn, &shard->ecache_grow.mtx);
malloc_mutex_lock(tsdn, &shard->pac.ecache_grow.mtx);
edata_t *edata = extent_recycle(tsdn, shard, ehooks,
&shard->pac.ecache_retained, new_addr, size, alignment, zero,
commit, /* growing_retained */ true);
if (edata != NULL) {
malloc_mutex_unlock(tsdn, &shard->ecache_grow.mtx);
malloc_mutex_unlock(tsdn, &shard->pac.ecache_grow.mtx);
if (config_prof) {
extent_gdump_add(tsdn, edata);
}
@ -768,9 +768,9 @@ extent_alloc_retained(tsdn_t *tsdn, pa_shard_t *shard, ehooks_t *ehooks,
alignment, zero, commit);
/* extent_grow_retained() always releases extent_grow_mtx. */
} else {
malloc_mutex_unlock(tsdn, &shard->ecache_grow.mtx);
malloc_mutex_unlock(tsdn, &shard->pac.ecache_grow.mtx);
}
malloc_mutex_assert_not_owner(tsdn, &shard->ecache_grow.mtx);
malloc_mutex_assert_not_owner(tsdn, &shard->pac.ecache_grow.mtx);
return edata;
}

View File

@ -32,9 +32,6 @@ pa_shard_init(tsdn_t *tsdn, pa_shard_t *shard, emap_t *emap, base_t *base,
if (pac_init(tsdn, &shard->pac, ind, emap, &shard->edata_cache)) {
return true;
}
if (ecache_grow_init(tsdn, &shard->ecache_grow)) {
return true;
}
if (decay_init(&shard->decay_dirty, cur_time, dirty_decay_ms)) {
return true;
@ -455,23 +452,6 @@ pa_maybe_decay_purge(tsdn_t *tsdn, pa_shard_t *shard, decay_t *decay,
bool
pa_shard_retain_grow_limit_get_set(tsdn_t *tsdn, pa_shard_t *shard,
size_t *old_limit, size_t *new_limit) {
pszind_t new_ind JEMALLOC_CC_SILENCE_INIT(0);
if (new_limit != NULL) {
size_t limit = *new_limit;
/* Grow no more than the new limit. */
if ((new_ind = sz_psz2ind(limit + 1) - 1) >= SC_NPSIZES) {
return true;
}
}
malloc_mutex_lock(tsdn, &shard->ecache_grow.mtx);
if (old_limit != NULL) {
*old_limit = sz_pind2sz(shard->ecache_grow.limit);
}
if (new_limit != NULL) {
shard->ecache_grow.limit = new_ind;
}
malloc_mutex_unlock(tsdn, &shard->ecache_grow.mtx);
return false;
return pac_retain_grow_limit_get_set(tsdn, &shard->pac, old_limit,
new_limit);
}

View File

@ -16,7 +16,7 @@ pa_shard_prefork0(tsdn_t *tsdn, pa_shard_t *shard) {
void
pa_shard_prefork2(tsdn_t *tsdn, pa_shard_t *shard) {
ecache_grow_prefork(tsdn, &shard->ecache_grow);
ecache_grow_prefork(tsdn, &shard->pac.ecache_grow);
}
void
@ -37,7 +37,7 @@ pa_shard_postfork_parent(tsdn_t *tsdn, pa_shard_t *shard) {
ecache_postfork_parent(tsdn, &shard->pac.ecache_dirty);
ecache_postfork_parent(tsdn, &shard->pac.ecache_muzzy);
ecache_postfork_parent(tsdn, &shard->pac.ecache_retained);
ecache_grow_postfork_parent(tsdn, &shard->ecache_grow);
ecache_grow_postfork_parent(tsdn, &shard->pac.ecache_grow);
malloc_mutex_postfork_parent(tsdn, &shard->decay_dirty.mtx);
malloc_mutex_postfork_parent(tsdn, &shard->decay_muzzy.mtx);
}
@ -48,7 +48,7 @@ pa_shard_postfork_child(tsdn_t *tsdn, pa_shard_t *shard) {
ecache_postfork_child(tsdn, &shard->pac.ecache_dirty);
ecache_postfork_child(tsdn, &shard->pac.ecache_muzzy);
ecache_postfork_child(tsdn, &shard->pac.ecache_retained);
ecache_grow_postfork_child(tsdn, &shard->ecache_grow);
ecache_grow_postfork_child(tsdn, &shard->pac.ecache_grow);
malloc_mutex_postfork_child(tsdn, &shard->decay_dirty.mtx);
malloc_mutex_postfork_child(tsdn, &shard->decay_muzzy.mtx);
}

View File

@ -34,8 +34,36 @@ pac_init(tsdn_t *tsdn, pac_t *pac, unsigned ind, emap_t *emap,
ind, /* delay_coalesce */ false)) {
return true;
}
if (ecache_grow_init(tsdn, &pac->ecache_grow)) {
return true;
}
pac->emap = emap;
pac->edata_cache = edata_cache;
return false;
}
bool
pac_retain_grow_limit_get_set(tsdn_t *tsdn, pac_t *pac, size_t *old_limit,
size_t *new_limit) {
pszind_t new_ind JEMALLOC_CC_SILENCE_INIT(0);
if (new_limit != NULL) {
size_t limit = *new_limit;
/* Grow no more than the new limit. */
if ((new_ind = sz_psz2ind(limit + 1) - 1) >= SC_NPSIZES) {
return true;
}
}
malloc_mutex_lock(tsdn, &pac->ecache_grow.mtx);
if (old_limit != NULL) {
*old_limit = sz_pind2sz(pac->ecache_grow.limit);
}
if (new_limit != NULL) {
pac->ecache_grow.limit = new_ind;
}
malloc_mutex_unlock(tsdn, &pac->ecache_grow.mtx);
return false;
}

View File

@ -142,7 +142,7 @@ TEST_BEGIN(test_retained) {
size_t usable = 0;
size_t fragmented = 0;
for (pszind_t pind = sz_psz2ind(HUGEPAGE); pind <
arena->pa_shard.ecache_grow.next; pind++) {
arena->pa_shard.pac.ecache_grow.next; pind++) {
size_t psz = sz_pind2sz(pind);
size_t psz_fragmented = psz % esz;
size_t psz_usable = psz - psz_fragmented;