diff --git a/include/jemalloc/internal/geom_grow.h b/include/jemalloc/internal/geom_grow.h index 128c56f9..ba83386f 100644 --- a/include/jemalloc/internal/geom_grow.h +++ b/include/jemalloc/internal/geom_grow.h @@ -13,12 +13,9 @@ struct geom_grow_s { * retain_grow_limit is the max allowed size ind to expand (unless the * required size is greater). Default is no limit, and controlled * through mallctl only. - * - * Synchronization: mtx */ pszind_t next; pszind_t limit; - malloc_mutex_t mtx; }; static inline bool @@ -48,9 +45,6 @@ geom_grow_size_commit(geom_grow_t *geom_grow, pszind_t skip) { } -bool geom_grow_init(geom_grow_t *geom_grow); -void geom_grow_prefork(tsdn_t *tsdn, geom_grow_t *geom_grow); -void geom_grow_postfork_parent(tsdn_t *tsdn, geom_grow_t *geom_grow); -void geom_grow_postfork_child(tsdn_t *tsdn, geom_grow_t *geom_grow); +void geom_grow_init(geom_grow_t *geom_grow); #endif /* JEMALLOC_INTERNAL_ECACHE_GROW_H */ diff --git a/include/jemalloc/internal/pac.h b/include/jemalloc/internal/pac.h index a028456c..614d34a5 100644 --- a/include/jemalloc/internal/pac.h +++ b/include/jemalloc/internal/pac.h @@ -96,6 +96,7 @@ struct pac_s { /* The grow info for the retained ecache. */ geom_grow_t geom_grow; + malloc_mutex_t grow_mtx; /* * Decay-based purging state, responsible for scheduling extent state diff --git a/src/extent.c b/src/extent.c index 6abaadf0..26a5c13f 100644 --- a/src/extent.c +++ b/src/extent.c @@ -614,7 +614,7 @@ extent_recycle(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, ecache_t *ecache, static edata_t * extent_grow_retained(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, size_t size, size_t alignment, bool zero, bool *commit) { - malloc_mutex_assert_owner(tsdn, &pac->geom_grow.mtx); + malloc_mutex_assert_owner(tsdn, &pac->grow_mtx); size_t alloc_size_min = size + PAGE_CEILING(alignment) - PAGE; /* Beware size_t wrap-around. */ @@ -725,7 +725,7 @@ extent_grow_retained(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, */ /* All opportunities for failure are past. */ geom_grow_size_commit(&pac->geom_grow, geom_grow_skip); - malloc_mutex_unlock(tsdn, &pac->geom_grow.mtx); + malloc_mutex_unlock(tsdn, &pac->grow_mtx); if (config_prof) { /* Adjust gdump stats now that extent is final size. */ @@ -739,7 +739,7 @@ extent_grow_retained(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, return edata; label_err: - malloc_mutex_unlock(tsdn, &pac->geom_grow.mtx); + malloc_mutex_unlock(tsdn, &pac->grow_mtx); return NULL; } @@ -749,24 +749,24 @@ extent_alloc_retained(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, assert(size != 0); assert(alignment != 0); - malloc_mutex_lock(tsdn, &pac->geom_grow.mtx); + malloc_mutex_lock(tsdn, &pac->grow_mtx); edata_t *edata = extent_recycle(tsdn, pac, ehooks, &pac->ecache_retained, new_addr, size, alignment, zero, commit, /* growing_retained */ true); if (edata != NULL) { - malloc_mutex_unlock(tsdn, &pac->geom_grow.mtx); + malloc_mutex_unlock(tsdn, &pac->grow_mtx); if (config_prof) { extent_gdump_add(tsdn, edata); } } else if (opt_retain && new_addr == NULL) { edata = extent_grow_retained(tsdn, pac, ehooks, size, alignment, zero, commit); - /* extent_grow_retained() always releases extent_grow_mtx. */ + /* extent_grow_retained() always releases pac->grow_mtx. */ } else { - malloc_mutex_unlock(tsdn, &pac->geom_grow.mtx); + malloc_mutex_unlock(tsdn, &pac->grow_mtx); } - malloc_mutex_assert_not_owner(tsdn, &pac->geom_grow.mtx); + malloc_mutex_assert_not_owner(tsdn, &pac->grow_mtx); return edata; } diff --git a/src/geom_grow.c b/src/geom_grow.c index eab8bc96..4816bb7f 100644 --- a/src/geom_grow.c +++ b/src/geom_grow.c @@ -1,29 +1,8 @@ #include "jemalloc/internal/jemalloc_preamble.h" #include "jemalloc/internal/jemalloc_internal_includes.h" -bool +void geom_grow_init(geom_grow_t *geom_grow) { geom_grow->next = sz_psz2ind(HUGEPAGE); geom_grow->limit = sz_psz2ind(SC_LARGE_MAXCLASS); - if (malloc_mutex_init(&geom_grow->mtx, "extent_grow", - WITNESS_RANK_EXTENT_GROW, malloc_mutex_rank_exclusive)) { - return true; - } - return false; } - -void -geom_grow_prefork(tsdn_t *tsdn, geom_grow_t *geom_grow) { - malloc_mutex_prefork(tsdn, &geom_grow->mtx); -} - -void -geom_grow_postfork_parent(tsdn_t *tsdn, geom_grow_t *geom_grow) { - malloc_mutex_postfork_parent(tsdn, &geom_grow->mtx); -} - -void -geom_grow_postfork_child(tsdn_t *tsdn, geom_grow_t *geom_grow) { - malloc_mutex_postfork_child(tsdn, &geom_grow->mtx); -} - diff --git a/src/pa_extra.c b/src/pa_extra.c index 9e083cae..8bf54b96 100644 --- a/src/pa_extra.c +++ b/src/pa_extra.c @@ -16,7 +16,7 @@ pa_shard_prefork0(tsdn_t *tsdn, pa_shard_t *shard) { void pa_shard_prefork2(tsdn_t *tsdn, pa_shard_t *shard) { - geom_grow_prefork(tsdn, &shard->pac.geom_grow); + malloc_mutex_prefork(tsdn, &shard->pac.grow_mtx); } void @@ -37,7 +37,7 @@ pa_shard_postfork_parent(tsdn_t *tsdn, pa_shard_t *shard) { ecache_postfork_parent(tsdn, &shard->pac.ecache_dirty); ecache_postfork_parent(tsdn, &shard->pac.ecache_muzzy); ecache_postfork_parent(tsdn, &shard->pac.ecache_retained); - geom_grow_postfork_parent(tsdn, &shard->pac.geom_grow); + malloc_mutex_postfork_parent(tsdn, &shard->pac.grow_mtx); malloc_mutex_postfork_parent(tsdn, &shard->pac.decay_dirty.mtx); malloc_mutex_postfork_parent(tsdn, &shard->pac.decay_muzzy.mtx); } @@ -48,7 +48,7 @@ pa_shard_postfork_child(tsdn_t *tsdn, pa_shard_t *shard) { ecache_postfork_child(tsdn, &shard->pac.ecache_dirty); ecache_postfork_child(tsdn, &shard->pac.ecache_muzzy); ecache_postfork_child(tsdn, &shard->pac.ecache_retained); - geom_grow_postfork_child(tsdn, &shard->pac.geom_grow); + malloc_mutex_postfork_child(tsdn, &shard->pac.grow_mtx); malloc_mutex_postfork_child(tsdn, &shard->pac.decay_dirty.mtx); malloc_mutex_postfork_child(tsdn, &shard->pac.decay_muzzy.mtx); } diff --git a/src/pac.c b/src/pac.c index 6d52a937..f50e82b0 100644 --- a/src/pac.c +++ b/src/pac.c @@ -68,7 +68,9 @@ pac_init(tsdn_t *tsdn, pac_t *pac, base_t *base, emap_t *emap, ind, /* delay_coalesce */ false)) { return true; } - if (geom_grow_init(&pac->geom_grow)) { + geom_grow_init(&pac->geom_grow); + if (malloc_mutex_init(&pac->grow_mtx, "extent_grow", + WITNESS_RANK_EXTENT_GROW, malloc_mutex_rank_exclusive)) { return true; } if (decay_init(&pac->decay_dirty, cur_time, dirty_decay_ms)) { @@ -203,14 +205,14 @@ pac_retain_grow_limit_get_set(tsdn_t *tsdn, pac_t *pac, size_t *old_limit, } } - malloc_mutex_lock(tsdn, &pac->geom_grow.mtx); + malloc_mutex_lock(tsdn, &pac->grow_mtx); if (old_limit != NULL) { *old_limit = sz_pind2sz(pac->geom_grow.limit); } if (new_limit != NULL) { pac->geom_grow.limit = new_ind; } - malloc_mutex_unlock(tsdn, &pac->geom_grow.mtx); + malloc_mutex_unlock(tsdn, &pac->grow_mtx); return false; }