Geom_grow: Don't keep the mutex internal.

We're about to use it in ways that will have external synchronization.
This commit is contained in:
David Goldblatt 2020-08-11 10:18:31 -07:00 committed by David Goldblatt
parent c57494879f
commit 5e90fd006e
6 changed files with 19 additions and 43 deletions

View File

@ -13,12 +13,9 @@ struct geom_grow_s {
* retain_grow_limit is the max allowed size ind to expand (unless the * retain_grow_limit is the max allowed size ind to expand (unless the
* required size is greater). Default is no limit, and controlled * required size is greater). Default is no limit, and controlled
* through mallctl only. * through mallctl only.
*
* Synchronization: mtx
*/ */
pszind_t next; pszind_t next;
pszind_t limit; pszind_t limit;
malloc_mutex_t mtx;
}; };
static inline bool static inline bool
@ -48,9 +45,6 @@ geom_grow_size_commit(geom_grow_t *geom_grow, pszind_t skip) {
} }
bool geom_grow_init(geom_grow_t *geom_grow); void geom_grow_init(geom_grow_t *geom_grow);
void geom_grow_prefork(tsdn_t *tsdn, geom_grow_t *geom_grow);
void geom_grow_postfork_parent(tsdn_t *tsdn, geom_grow_t *geom_grow);
void geom_grow_postfork_child(tsdn_t *tsdn, geom_grow_t *geom_grow);
#endif /* JEMALLOC_INTERNAL_ECACHE_GROW_H */ #endif /* JEMALLOC_INTERNAL_ECACHE_GROW_H */

View File

@ -96,6 +96,7 @@ struct pac_s {
/* The grow info for the retained ecache. */ /* The grow info for the retained ecache. */
geom_grow_t geom_grow; geom_grow_t geom_grow;
malloc_mutex_t grow_mtx;
/* /*
* Decay-based purging state, responsible for scheduling extent state * Decay-based purging state, responsible for scheduling extent state

View File

@ -614,7 +614,7 @@ extent_recycle(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, ecache_t *ecache,
static edata_t * static edata_t *
extent_grow_retained(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, extent_grow_retained(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
size_t size, size_t alignment, bool zero, bool *commit) { size_t size, size_t alignment, bool zero, bool *commit) {
malloc_mutex_assert_owner(tsdn, &pac->geom_grow.mtx); malloc_mutex_assert_owner(tsdn, &pac->grow_mtx);
size_t alloc_size_min = size + PAGE_CEILING(alignment) - PAGE; size_t alloc_size_min = size + PAGE_CEILING(alignment) - PAGE;
/* Beware size_t wrap-around. */ /* Beware size_t wrap-around. */
@ -725,7 +725,7 @@ extent_grow_retained(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
*/ */
/* All opportunities for failure are past. */ /* All opportunities for failure are past. */
geom_grow_size_commit(&pac->geom_grow, geom_grow_skip); geom_grow_size_commit(&pac->geom_grow, geom_grow_skip);
malloc_mutex_unlock(tsdn, &pac->geom_grow.mtx); malloc_mutex_unlock(tsdn, &pac->grow_mtx);
if (config_prof) { if (config_prof) {
/* Adjust gdump stats now that extent is final size. */ /* Adjust gdump stats now that extent is final size. */
@ -739,7 +739,7 @@ extent_grow_retained(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
return edata; return edata;
label_err: label_err:
malloc_mutex_unlock(tsdn, &pac->geom_grow.mtx); malloc_mutex_unlock(tsdn, &pac->grow_mtx);
return NULL; return NULL;
} }
@ -749,24 +749,24 @@ extent_alloc_retained(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
assert(size != 0); assert(size != 0);
assert(alignment != 0); assert(alignment != 0);
malloc_mutex_lock(tsdn, &pac->geom_grow.mtx); malloc_mutex_lock(tsdn, &pac->grow_mtx);
edata_t *edata = extent_recycle(tsdn, pac, ehooks, edata_t *edata = extent_recycle(tsdn, pac, ehooks,
&pac->ecache_retained, new_addr, size, alignment, zero, &pac->ecache_retained, new_addr, size, alignment, zero,
commit, /* growing_retained */ true); commit, /* growing_retained */ true);
if (edata != NULL) { if (edata != NULL) {
malloc_mutex_unlock(tsdn, &pac->geom_grow.mtx); malloc_mutex_unlock(tsdn, &pac->grow_mtx);
if (config_prof) { if (config_prof) {
extent_gdump_add(tsdn, edata); extent_gdump_add(tsdn, edata);
} }
} else if (opt_retain && new_addr == NULL) { } else if (opt_retain && new_addr == NULL) {
edata = extent_grow_retained(tsdn, pac, ehooks, size, edata = extent_grow_retained(tsdn, pac, ehooks, size,
alignment, zero, commit); alignment, zero, commit);
/* extent_grow_retained() always releases extent_grow_mtx. */ /* extent_grow_retained() always releases pac->grow_mtx. */
} else { } else {
malloc_mutex_unlock(tsdn, &pac->geom_grow.mtx); malloc_mutex_unlock(tsdn, &pac->grow_mtx);
} }
malloc_mutex_assert_not_owner(tsdn, &pac->geom_grow.mtx); malloc_mutex_assert_not_owner(tsdn, &pac->grow_mtx);
return edata; return edata;
} }

View File

@ -1,29 +1,8 @@
#include "jemalloc/internal/jemalloc_preamble.h" #include "jemalloc/internal/jemalloc_preamble.h"
#include "jemalloc/internal/jemalloc_internal_includes.h" #include "jemalloc/internal/jemalloc_internal_includes.h"
bool void
geom_grow_init(geom_grow_t *geom_grow) { geom_grow_init(geom_grow_t *geom_grow) {
geom_grow->next = sz_psz2ind(HUGEPAGE); geom_grow->next = sz_psz2ind(HUGEPAGE);
geom_grow->limit = sz_psz2ind(SC_LARGE_MAXCLASS); geom_grow->limit = sz_psz2ind(SC_LARGE_MAXCLASS);
if (malloc_mutex_init(&geom_grow->mtx, "extent_grow",
WITNESS_RANK_EXTENT_GROW, malloc_mutex_rank_exclusive)) {
return true;
} }
return false;
}
void
geom_grow_prefork(tsdn_t *tsdn, geom_grow_t *geom_grow) {
malloc_mutex_prefork(tsdn, &geom_grow->mtx);
}
void
geom_grow_postfork_parent(tsdn_t *tsdn, geom_grow_t *geom_grow) {
malloc_mutex_postfork_parent(tsdn, &geom_grow->mtx);
}
void
geom_grow_postfork_child(tsdn_t *tsdn, geom_grow_t *geom_grow) {
malloc_mutex_postfork_child(tsdn, &geom_grow->mtx);
}

View File

@ -16,7 +16,7 @@ pa_shard_prefork0(tsdn_t *tsdn, pa_shard_t *shard) {
void void
pa_shard_prefork2(tsdn_t *tsdn, pa_shard_t *shard) { pa_shard_prefork2(tsdn_t *tsdn, pa_shard_t *shard) {
geom_grow_prefork(tsdn, &shard->pac.geom_grow); malloc_mutex_prefork(tsdn, &shard->pac.grow_mtx);
} }
void void
@ -37,7 +37,7 @@ pa_shard_postfork_parent(tsdn_t *tsdn, pa_shard_t *shard) {
ecache_postfork_parent(tsdn, &shard->pac.ecache_dirty); ecache_postfork_parent(tsdn, &shard->pac.ecache_dirty);
ecache_postfork_parent(tsdn, &shard->pac.ecache_muzzy); ecache_postfork_parent(tsdn, &shard->pac.ecache_muzzy);
ecache_postfork_parent(tsdn, &shard->pac.ecache_retained); ecache_postfork_parent(tsdn, &shard->pac.ecache_retained);
geom_grow_postfork_parent(tsdn, &shard->pac.geom_grow); malloc_mutex_postfork_parent(tsdn, &shard->pac.grow_mtx);
malloc_mutex_postfork_parent(tsdn, &shard->pac.decay_dirty.mtx); malloc_mutex_postfork_parent(tsdn, &shard->pac.decay_dirty.mtx);
malloc_mutex_postfork_parent(tsdn, &shard->pac.decay_muzzy.mtx); malloc_mutex_postfork_parent(tsdn, &shard->pac.decay_muzzy.mtx);
} }
@ -48,7 +48,7 @@ pa_shard_postfork_child(tsdn_t *tsdn, pa_shard_t *shard) {
ecache_postfork_child(tsdn, &shard->pac.ecache_dirty); ecache_postfork_child(tsdn, &shard->pac.ecache_dirty);
ecache_postfork_child(tsdn, &shard->pac.ecache_muzzy); ecache_postfork_child(tsdn, &shard->pac.ecache_muzzy);
ecache_postfork_child(tsdn, &shard->pac.ecache_retained); ecache_postfork_child(tsdn, &shard->pac.ecache_retained);
geom_grow_postfork_child(tsdn, &shard->pac.geom_grow); malloc_mutex_postfork_child(tsdn, &shard->pac.grow_mtx);
malloc_mutex_postfork_child(tsdn, &shard->pac.decay_dirty.mtx); malloc_mutex_postfork_child(tsdn, &shard->pac.decay_dirty.mtx);
malloc_mutex_postfork_child(tsdn, &shard->pac.decay_muzzy.mtx); malloc_mutex_postfork_child(tsdn, &shard->pac.decay_muzzy.mtx);
} }

View File

@ -68,7 +68,9 @@ pac_init(tsdn_t *tsdn, pac_t *pac, base_t *base, emap_t *emap,
ind, /* delay_coalesce */ false)) { ind, /* delay_coalesce */ false)) {
return true; return true;
} }
if (geom_grow_init(&pac->geom_grow)) { geom_grow_init(&pac->geom_grow);
if (malloc_mutex_init(&pac->grow_mtx, "extent_grow",
WITNESS_RANK_EXTENT_GROW, malloc_mutex_rank_exclusive)) {
return true; return true;
} }
if (decay_init(&pac->decay_dirty, cur_time, dirty_decay_ms)) { if (decay_init(&pac->decay_dirty, cur_time, dirty_decay_ms)) {
@ -203,14 +205,14 @@ pac_retain_grow_limit_get_set(tsdn_t *tsdn, pac_t *pac, size_t *old_limit,
} }
} }
malloc_mutex_lock(tsdn, &pac->geom_grow.mtx); malloc_mutex_lock(tsdn, &pac->grow_mtx);
if (old_limit != NULL) { if (old_limit != NULL) {
*old_limit = sz_pind2sz(pac->geom_grow.limit); *old_limit = sz_pind2sz(pac->geom_grow.limit);
} }
if (new_limit != NULL) { if (new_limit != NULL) {
pac->geom_grow.limit = new_ind; pac->geom_grow.limit = new_ind;
} }
malloc_mutex_unlock(tsdn, &pac->geom_grow.mtx); malloc_mutex_unlock(tsdn, &pac->grow_mtx);
return false; return false;
} }