Rename ecache_grow -> geom_grow.

We're about to start using it outside of the ecaches, in the HPA central
allocator.
This commit is contained in:
David Goldblatt
2020-08-07 18:03:40 -07:00
committed by David Goldblatt
parent b399463fba
commit 131b1b5338
10 changed files with 84 additions and 74 deletions

View File

@@ -29,29 +29,3 @@ void
ecache_postfork_child(tsdn_t *tsdn, ecache_t *ecache) {
malloc_mutex_postfork_child(tsdn, &ecache->mtx);
}
bool
ecache_grow_init(tsdn_t *tsdn, ecache_grow_t *ecache_grow) {
ecache_grow->next = sz_psz2ind(HUGEPAGE);
ecache_grow->limit = sz_psz2ind(SC_LARGE_MAXCLASS);
if (malloc_mutex_init(&ecache_grow->mtx, "extent_grow",
WITNESS_RANK_EXTENT_GROW, malloc_mutex_rank_exclusive)) {
return true;
}
return false;
}
void
ecache_grow_prefork(tsdn_t *tsdn, ecache_grow_t *ecache_grow) {
malloc_mutex_prefork(tsdn, &ecache_grow->mtx);
}
void
ecache_grow_postfork_parent(tsdn_t *tsdn, ecache_grow_t *ecache_grow) {
malloc_mutex_postfork_parent(tsdn, &ecache_grow->mtx);
}
void
ecache_grow_postfork_child(tsdn_t *tsdn, ecache_grow_t *ecache_grow) {
malloc_mutex_postfork_child(tsdn, &ecache_grow->mtx);
}

View File

@@ -614,7 +614,7 @@ extent_recycle(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, ecache_t *ecache,
static edata_t *
extent_grow_retained(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
size_t size, size_t alignment, bool zero, bool *commit) {
malloc_mutex_assert_owner(tsdn, &pac->ecache_grow.mtx);
malloc_mutex_assert_owner(tsdn, &pac->geom_grow.mtx);
size_t alloc_size_min = size + PAGE_CEILING(alignment) - PAGE;
/* Beware size_t wrap-around. */
@@ -626,15 +626,15 @@ extent_grow_retained(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
* satisfy this request.
*/
pszind_t egn_skip = 0;
size_t alloc_size = sz_pind2sz(pac->ecache_grow.next + egn_skip);
size_t alloc_size = sz_pind2sz(pac->geom_grow.next + egn_skip);
while (alloc_size < alloc_size_min) {
egn_skip++;
if (pac->ecache_grow.next + egn_skip >=
if (pac->geom_grow.next + egn_skip >=
sz_psz2ind(SC_LARGE_MAXCLASS)) {
/* Outside legal range. */
goto label_err;
}
alloc_size = sz_pind2sz(pac->ecache_grow.next + egn_skip);
alloc_size = sz_pind2sz(pac->geom_grow.next + egn_skip);
}
edata_t *edata = edata_cache_get(tsdn, pac->edata_cache);
@@ -727,13 +727,13 @@ extent_grow_retained(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
* Increment extent_grow_next if doing so wouldn't exceed the allowed
* range.
*/
if (pac->ecache_grow.next + egn_skip + 1 <= pac->ecache_grow.limit) {
pac->ecache_grow.next += egn_skip + 1;
if (pac->geom_grow.next + egn_skip + 1 <= pac->geom_grow.limit) {
pac->geom_grow.next += egn_skip + 1;
} else {
pac->ecache_grow.next = pac->ecache_grow.limit;
pac->geom_grow.next = pac->geom_grow.limit;
}
/* All opportunities for failure are past. */
malloc_mutex_unlock(tsdn, &pac->ecache_grow.mtx);
malloc_mutex_unlock(tsdn, &pac->geom_grow.mtx);
if (config_prof) {
/* Adjust gdump stats now that extent is final size. */
@@ -747,7 +747,7 @@ extent_grow_retained(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
return edata;
label_err:
malloc_mutex_unlock(tsdn, &pac->ecache_grow.mtx);
malloc_mutex_unlock(tsdn, &pac->geom_grow.mtx);
return NULL;
}
@@ -757,13 +757,13 @@ extent_alloc_retained(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
assert(size != 0);
assert(alignment != 0);
malloc_mutex_lock(tsdn, &pac->ecache_grow.mtx);
malloc_mutex_lock(tsdn, &pac->geom_grow.mtx);
edata_t *edata = extent_recycle(tsdn, pac, ehooks,
&pac->ecache_retained, new_addr, size, alignment, zero,
commit, /* growing_retained */ true);
if (edata != NULL) {
malloc_mutex_unlock(tsdn, &pac->ecache_grow.mtx);
malloc_mutex_unlock(tsdn, &pac->geom_grow.mtx);
if (config_prof) {
extent_gdump_add(tsdn, edata);
}
@@ -772,9 +772,9 @@ extent_alloc_retained(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
alignment, zero, commit);
/* extent_grow_retained() always releases extent_grow_mtx. */
} else {
malloc_mutex_unlock(tsdn, &pac->ecache_grow.mtx);
malloc_mutex_unlock(tsdn, &pac->geom_grow.mtx);
}
malloc_mutex_assert_not_owner(tsdn, &pac->ecache_grow.mtx);
malloc_mutex_assert_not_owner(tsdn, &pac->geom_grow.mtx);
return edata;
}

29
src/geom_grow.c Normal file
View File

@@ -0,0 +1,29 @@
#include "jemalloc/internal/jemalloc_preamble.h"
#include "jemalloc/internal/jemalloc_internal_includes.h"
bool
geom_grow_init(tsdn_t *tsdn, geom_grow_t *geom_grow) {
geom_grow->next = sz_psz2ind(HUGEPAGE);
geom_grow->limit = sz_psz2ind(SC_LARGE_MAXCLASS);
if (malloc_mutex_init(&geom_grow->mtx, "extent_grow",
WITNESS_RANK_EXTENT_GROW, malloc_mutex_rank_exclusive)) {
return true;
}
return false;
}
void
geom_grow_prefork(tsdn_t *tsdn, geom_grow_t *geom_grow) {
malloc_mutex_prefork(tsdn, &geom_grow->mtx);
}
void
geom_grow_postfork_parent(tsdn_t *tsdn, geom_grow_t *geom_grow) {
malloc_mutex_postfork_parent(tsdn, &geom_grow->mtx);
}
void
geom_grow_postfork_child(tsdn_t *tsdn, geom_grow_t *geom_grow) {
malloc_mutex_postfork_child(tsdn, &geom_grow->mtx);
}

View File

@@ -16,7 +16,7 @@ pa_shard_prefork0(tsdn_t *tsdn, pa_shard_t *shard) {
void
pa_shard_prefork2(tsdn_t *tsdn, pa_shard_t *shard) {
ecache_grow_prefork(tsdn, &shard->pac.ecache_grow);
geom_grow_prefork(tsdn, &shard->pac.geom_grow);
}
void
@@ -37,7 +37,7 @@ pa_shard_postfork_parent(tsdn_t *tsdn, pa_shard_t *shard) {
ecache_postfork_parent(tsdn, &shard->pac.ecache_dirty);
ecache_postfork_parent(tsdn, &shard->pac.ecache_muzzy);
ecache_postfork_parent(tsdn, &shard->pac.ecache_retained);
ecache_grow_postfork_parent(tsdn, &shard->pac.ecache_grow);
geom_grow_postfork_parent(tsdn, &shard->pac.geom_grow);
malloc_mutex_postfork_parent(tsdn, &shard->pac.decay_dirty.mtx);
malloc_mutex_postfork_parent(tsdn, &shard->pac.decay_muzzy.mtx);
}
@@ -48,7 +48,7 @@ pa_shard_postfork_child(tsdn_t *tsdn, pa_shard_t *shard) {
ecache_postfork_child(tsdn, &shard->pac.ecache_dirty);
ecache_postfork_child(tsdn, &shard->pac.ecache_muzzy);
ecache_postfork_child(tsdn, &shard->pac.ecache_retained);
ecache_grow_postfork_child(tsdn, &shard->pac.ecache_grow);
geom_grow_postfork_child(tsdn, &shard->pac.geom_grow);
malloc_mutex_postfork_child(tsdn, &shard->pac.decay_dirty.mtx);
malloc_mutex_postfork_child(tsdn, &shard->pac.decay_muzzy.mtx);
}

View File

@@ -68,7 +68,7 @@ pac_init(tsdn_t *tsdn, pac_t *pac, base_t *base, emap_t *emap,
ind, /* delay_coalesce */ false)) {
return true;
}
if (ecache_grow_init(tsdn, &pac->ecache_grow)) {
if (geom_grow_init(tsdn, &pac->geom_grow)) {
return true;
}
if (decay_init(&pac->decay_dirty, cur_time, dirty_decay_ms)) {
@@ -203,14 +203,14 @@ pac_retain_grow_limit_get_set(tsdn_t *tsdn, pac_t *pac, size_t *old_limit,
}
}
malloc_mutex_lock(tsdn, &pac->ecache_grow.mtx);
malloc_mutex_lock(tsdn, &pac->geom_grow.mtx);
if (old_limit != NULL) {
*old_limit = sz_pind2sz(pac->ecache_grow.limit);
*old_limit = sz_pind2sz(pac->geom_grow.limit);
}
if (new_limit != NULL) {
pac->ecache_grow.limit = new_ind;
pac->geom_grow.limit = new_ind;
}
malloc_mutex_unlock(tsdn, &pac->ecache_grow.mtx);
malloc_mutex_unlock(tsdn, &pac->geom_grow.mtx);
return false;
}