2020-08-15 04:36:41 +08:00
|
|
|
#include "jemalloc/internal/jemalloc_preamble.h"
|
|
|
|
#include "jemalloc/internal/jemalloc_internal_includes.h"
|
|
|
|
|
|
|
|
#include "jemalloc/internal/hpa.h"
|
|
|
|
|
|
|
|
#include "jemalloc/internal/flat_bitmap.h"
|
|
|
|
#include "jemalloc/internal/witness.h"
|
|
|
|
|
2020-11-10 05:49:30 +08:00
|
|
|
#define HPA_EDEN_SIZE (128 * HUGEPAGE)
|
|
|
|
|
2020-08-15 04:36:41 +08:00
|
|
|
static edata_t *hpa_alloc(tsdn_t *tsdn, pai_t *self, size_t size,
|
|
|
|
size_t alignment, bool zero);
|
|
|
|
static bool hpa_expand(tsdn_t *tsdn, pai_t *self, edata_t *edata,
|
|
|
|
size_t old_size, size_t new_size, bool zero);
|
|
|
|
static bool hpa_shrink(tsdn_t *tsdn, pai_t *self, edata_t *edata,
|
|
|
|
size_t old_size, size_t new_size);
|
|
|
|
static void hpa_dalloc(tsdn_t *tsdn, pai_t *self, edata_t *edata);
|
|
|
|
|
|
|
|
bool
|
2020-11-10 05:49:30 +08:00
|
|
|
hpa_supported() {
|
|
|
|
#ifdef _WIN32
|
2020-08-15 04:36:41 +08:00
|
|
|
/*
|
2020-11-10 05:49:30 +08:00
|
|
|
* At least until the API and implementation is somewhat settled, we
|
|
|
|
* don't want to try to debug the VM subsystem on the hardest-to-test
|
|
|
|
* platform.
|
2020-08-15 04:36:41 +08:00
|
|
|
*/
|
2020-11-10 05:49:30 +08:00
|
|
|
return false;
|
|
|
|
#endif
|
|
|
|
if (!pages_can_hugify) {
|
|
|
|
return false;
|
2020-08-15 04:36:41 +08:00
|
|
|
}
|
2020-11-10 05:49:30 +08:00
|
|
|
/*
|
|
|
|
* We fundamentally rely on a address-space-hungry growth strategy for
|
|
|
|
* hugepages.
|
|
|
|
*/
|
2020-11-18 08:32:45 +08:00
|
|
|
if (LG_SIZEOF_PTR != 3) {
|
2020-11-10 05:49:30 +08:00
|
|
|
return false;
|
2020-08-15 04:36:41 +08:00
|
|
|
}
|
2020-11-10 05:49:30 +08:00
|
|
|
/*
|
2020-11-18 08:32:45 +08:00
|
|
|
* If we couldn't detect the value of HUGEPAGE, HUGEPAGE_PAGES becomes
|
|
|
|
* this sentinel value -- see the comment in pages.h.
|
2020-11-10 05:49:30 +08:00
|
|
|
*/
|
2020-11-18 08:32:45 +08:00
|
|
|
if (HUGEPAGE_PAGES == 1) {
|
2020-11-10 05:49:30 +08:00
|
|
|
return false;
|
2020-08-15 04:36:41 +08:00
|
|
|
}
|
2020-11-10 05:49:30 +08:00
|
|
|
return true;
|
2020-08-15 04:36:41 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
bool
|
2020-11-18 08:32:45 +08:00
|
|
|
hpa_shard_init(hpa_shard_t *shard, emap_t *emap, base_t *base,
|
|
|
|
edata_cache_t *edata_cache, unsigned ind, size_t alloc_max) {
|
2020-11-10 05:49:30 +08:00
|
|
|
/* malloc_conf processing should have filtered out these cases. */
|
|
|
|
assert(hpa_supported());
|
2020-08-15 04:36:41 +08:00
|
|
|
bool err;
|
|
|
|
err = malloc_mutex_init(&shard->grow_mtx, "hpa_shard_grow",
|
|
|
|
WITNESS_RANK_HPA_SHARD_GROW, malloc_mutex_rank_exclusive);
|
|
|
|
if (err) {
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
err = malloc_mutex_init(&shard->mtx, "hpa_shard",
|
|
|
|
WITNESS_RANK_HPA_SHARD, malloc_mutex_rank_exclusive);
|
|
|
|
if (err) {
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2020-10-31 05:43:43 +08:00
|
|
|
assert(edata_cache != NULL);
|
2020-11-18 08:32:45 +08:00
|
|
|
shard->base = base;
|
2020-10-31 05:43:43 +08:00
|
|
|
edata_cache_small_init(&shard->ecs, edata_cache);
|
2020-08-15 04:36:41 +08:00
|
|
|
psset_init(&shard->psset);
|
2020-11-10 05:49:30 +08:00
|
|
|
shard->alloc_max = alloc_max;
|
2020-11-18 08:32:45 +08:00
|
|
|
hpdata_list_init(&shard->unused_slabs);
|
|
|
|
shard->age_counter = 0;
|
2020-11-10 05:49:30 +08:00
|
|
|
shard->eden = NULL;
|
2020-11-18 08:32:45 +08:00
|
|
|
shard->eden_len = 0;
|
2020-11-10 05:49:30 +08:00
|
|
|
shard->ind = ind;
|
|
|
|
shard->emap = emap;
|
2020-12-04 08:09:50 +08:00
|
|
|
|
2020-12-04 07:35:38 +08:00
|
|
|
shard->stats.nevictions = 0;
|
2020-12-04 08:09:50 +08:00
|
|
|
shard->stats.npurge_passes = 0;
|
|
|
|
shard->stats.npurges = 0;
|
|
|
|
shard->stats.nhugifies = 0;
|
|
|
|
shard->stats.ndehugifies = 0;
|
2020-08-15 04:36:41 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Fill these in last, so that if an hpa_shard gets used despite
|
|
|
|
* initialization failing, we'll at least crash instead of just
|
|
|
|
* operating on corrupted data.
|
|
|
|
*/
|
|
|
|
shard->pai.alloc = &hpa_alloc;
|
|
|
|
shard->pai.expand = &hpa_expand;
|
|
|
|
shard->pai.shrink = &hpa_shrink;
|
|
|
|
shard->pai.dalloc = &hpa_dalloc;
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2020-11-11 08:23:03 +08:00
|
|
|
/*
|
|
|
|
* Note that the stats functions here follow the usual stats naming conventions;
|
|
|
|
* "merge" obtains the stats from some live object of instance, while "accum"
|
|
|
|
* only combines the stats from one stats objet to another. Hence the lack of
|
|
|
|
* locking here.
|
|
|
|
*/
|
2020-12-04 07:35:38 +08:00
|
|
|
static void
|
|
|
|
hpa_shard_nonderived_stats_accum(hpa_shard_nonderived_stats_t *dst,
|
|
|
|
hpa_shard_nonderived_stats_t *src) {
|
|
|
|
dst->nevictions += src->nevictions;
|
2020-12-04 08:09:50 +08:00
|
|
|
dst->npurge_passes += src->npurge_passes;
|
|
|
|
dst->npurges += src->npurges;
|
|
|
|
dst->nhugifies += src->nhugifies;
|
|
|
|
dst->ndehugifies += src->ndehugifies;
|
2020-12-04 07:35:38 +08:00
|
|
|
}
|
|
|
|
|
2020-11-11 08:23:03 +08:00
|
|
|
void
|
2020-11-10 05:49:30 +08:00
|
|
|
hpa_shard_stats_accum(hpa_shard_stats_t *dst, hpa_shard_stats_t *src) {
|
2020-11-11 08:23:03 +08:00
|
|
|
psset_stats_accum(&dst->psset_stats, &src->psset_stats);
|
2020-12-04 07:35:38 +08:00
|
|
|
hpa_shard_nonderived_stats_accum(&dst->nonderived_stats,
|
|
|
|
&src->nonderived_stats);
|
2020-11-11 08:23:03 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2020-11-10 05:49:30 +08:00
|
|
|
hpa_shard_stats_merge(tsdn_t *tsdn, hpa_shard_t *shard,
|
|
|
|
hpa_shard_stats_t *dst) {
|
2020-12-01 11:06:50 +08:00
|
|
|
malloc_mutex_lock(tsdn, &shard->grow_mtx);
|
2020-11-11 08:23:03 +08:00
|
|
|
malloc_mutex_lock(tsdn, &shard->mtx);
|
|
|
|
psset_stats_accum(&dst->psset_stats, &shard->psset.stats);
|
2020-12-04 07:35:38 +08:00
|
|
|
hpa_shard_nonderived_stats_accum(&dst->nonderived_stats, &shard->stats);
|
2020-11-11 08:23:03 +08:00
|
|
|
malloc_mutex_unlock(tsdn, &shard->mtx);
|
2020-12-01 11:06:50 +08:00
|
|
|
malloc_mutex_unlock(tsdn, &shard->grow_mtx);
|
2020-11-11 08:23:03 +08:00
|
|
|
}
|
|
|
|
|
2020-11-18 08:32:45 +08:00
|
|
|
static hpdata_t *
|
|
|
|
hpa_alloc_ps(tsdn_t *tsdn, hpa_shard_t *shard) {
|
|
|
|
return (hpdata_t *)base_alloc(tsdn, shard->base, sizeof(hpdata_t),
|
|
|
|
CACHELINE);
|
|
|
|
}
|
|
|
|
|
2020-11-10 05:49:30 +08:00
|
|
|
static bool
|
2020-12-04 10:02:23 +08:00
|
|
|
hpa_good_hugification_candidate(hpa_shard_t *shard, hpdata_t *ps) {
|
2020-08-15 04:36:41 +08:00
|
|
|
/*
|
2020-11-10 05:49:30 +08:00
|
|
|
* For now, just use a static check; hugify a page if it's <= 5%
|
|
|
|
* inactive. Eventually, this should be a malloc conf option.
|
2020-08-15 04:36:41 +08:00
|
|
|
*/
|
2020-12-04 10:02:23 +08:00
|
|
|
return hpdata_nactive_get(ps) >= (HUGEPAGE_PAGES) * 95 / 100;
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool
|
|
|
|
hpa_should_hugify(hpa_shard_t *shard, hpdata_t *ps) {
|
|
|
|
if (hpdata_changing_state_get(ps) || hpdata_huge_get(ps)) {
|
2020-12-03 14:24:15 +08:00
|
|
|
return false;
|
|
|
|
}
|
2020-12-04 10:02:23 +08:00
|
|
|
return hpa_good_hugification_candidate(shard, ps);
|
2020-11-10 05:49:30 +08:00
|
|
|
}
|
2020-08-15 04:36:41 +08:00
|
|
|
|
2020-12-03 14:24:15 +08:00
|
|
|
/*
|
|
|
|
* Whether or not the given pageslab meets the criteria for being purged (and,
|
|
|
|
* if necessary, dehugified).
|
|
|
|
*/
|
|
|
|
static bool
|
|
|
|
hpa_should_purge(hpa_shard_t *shard, hpdata_t *ps) {
|
|
|
|
/* Ditto. */
|
|
|
|
if (hpdata_changing_state_get(ps)) {
|
|
|
|
return false;
|
|
|
|
}
|
2020-12-04 10:58:58 +08:00
|
|
|
size_t purgeable = hpdata_ndirty_get(ps);
|
2020-12-03 14:24:15 +08:00
|
|
|
return purgeable > HUGEPAGE_PAGES * 25 / 100
|
|
|
|
|| (purgeable > 0 && hpdata_empty(ps));
|
2020-11-10 05:49:30 +08:00
|
|
|
}
|
2020-08-15 04:36:41 +08:00
|
|
|
|
2020-11-18 08:32:45 +08:00
|
|
|
static hpdata_t *
|
2020-11-10 05:49:30 +08:00
|
|
|
hpa_grow(tsdn_t *tsdn, hpa_shard_t *shard) {
|
|
|
|
malloc_mutex_assert_owner(tsdn, &shard->grow_mtx);
|
2020-11-18 08:32:45 +08:00
|
|
|
hpdata_t *ps = NULL;
|
2020-11-10 05:49:30 +08:00
|
|
|
|
|
|
|
/* Is there address space waiting for reuse? */
|
|
|
|
malloc_mutex_assert_owner(tsdn, &shard->grow_mtx);
|
2020-11-18 08:32:45 +08:00
|
|
|
ps = hpdata_list_first(&shard->unused_slabs);
|
2020-11-10 05:49:30 +08:00
|
|
|
if (ps != NULL) {
|
2020-11-18 08:32:45 +08:00
|
|
|
hpdata_list_remove(&shard->unused_slabs, ps);
|
|
|
|
hpdata_age_set(ps, shard->age_counter++);
|
2020-11-10 05:49:30 +08:00
|
|
|
return ps;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Is eden a perfect fit? */
|
2020-11-18 08:32:45 +08:00
|
|
|
if (shard->eden != NULL && shard->eden_len == HUGEPAGE) {
|
|
|
|
ps = hpa_alloc_ps(tsdn, shard);
|
|
|
|
if (ps == NULL) {
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
hpdata_init(ps, shard->eden, shard->age_counter++);
|
2020-11-10 05:49:30 +08:00
|
|
|
shard->eden = NULL;
|
2020-11-18 08:32:45 +08:00
|
|
|
shard->eden_len = 0;
|
2020-11-10 05:49:30 +08:00
|
|
|
return ps;
|
2020-08-15 04:36:41 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2020-11-10 05:49:30 +08:00
|
|
|
* We're about to try to allocate from eden by splitting. If eden is
|
|
|
|
* NULL, we have to allocate it too. Otherwise, we just have to
|
|
|
|
* allocate an edata_t for the new psset.
|
2020-08-15 04:36:41 +08:00
|
|
|
*/
|
2020-11-10 05:49:30 +08:00
|
|
|
if (shard->eden == NULL) {
|
|
|
|
/*
|
|
|
|
* During development, we're primarily concerned with systems
|
|
|
|
* with overcommit. Eventually, we should be more careful here.
|
|
|
|
*/
|
|
|
|
bool commit = true;
|
|
|
|
/* Allocate address space, bailing if we fail. */
|
|
|
|
void *new_eden = pages_map(NULL, HPA_EDEN_SIZE, HUGEPAGE,
|
|
|
|
&commit);
|
|
|
|
if (new_eden == NULL) {
|
|
|
|
return NULL;
|
|
|
|
}
|
2020-11-18 08:32:45 +08:00
|
|
|
ps = hpa_alloc_ps(tsdn, shard);
|
2020-11-10 05:49:30 +08:00
|
|
|
if (ps == NULL) {
|
|
|
|
pages_unmap(new_eden, HPA_EDEN_SIZE);
|
|
|
|
return NULL;
|
|
|
|
}
|
2020-11-18 08:32:45 +08:00
|
|
|
shard->eden = new_eden;
|
|
|
|
shard->eden_len = HPA_EDEN_SIZE;
|
2020-11-10 05:49:30 +08:00
|
|
|
} else {
|
|
|
|
/* Eden is already nonempty; only need an edata for ps. */
|
2020-11-18 08:32:45 +08:00
|
|
|
ps = hpa_alloc_ps(tsdn, shard);
|
2020-11-10 05:49:30 +08:00
|
|
|
if (ps == NULL) {
|
|
|
|
return NULL;
|
|
|
|
}
|
2020-08-15 04:36:41 +08:00
|
|
|
}
|
2020-11-18 08:32:45 +08:00
|
|
|
assert(ps != NULL);
|
2020-11-10 05:49:30 +08:00
|
|
|
assert(shard->eden != NULL);
|
2020-11-18 08:32:45 +08:00
|
|
|
assert(shard->eden_len > HUGEPAGE);
|
|
|
|
assert(shard->eden_len % HUGEPAGE == 0);
|
|
|
|
assert(HUGEPAGE_ADDR2BASE(shard->eden) == shard->eden);
|
|
|
|
|
|
|
|
hpdata_init(ps, shard->eden, shard->age_counter++);
|
|
|
|
|
|
|
|
char *eden_char = (char *)shard->eden;
|
|
|
|
eden_char += HUGEPAGE;
|
|
|
|
shard->eden = (void *)eden_char;
|
|
|
|
shard->eden_len -= HUGEPAGE;
|
2020-11-10 05:49:30 +08:00
|
|
|
|
|
|
|
return ps;
|
|
|
|
}
|
2020-08-15 04:36:41 +08:00
|
|
|
|
2020-11-10 05:49:30 +08:00
|
|
|
/*
|
2020-12-03 14:24:15 +08:00
|
|
|
* As a precondition, ps should not be in the psset (we can handle deallocation
|
|
|
|
* races, but not allocation ones), and we should hold the shard mutex.
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
hpa_purge(tsdn_t *tsdn, hpa_shard_t *shard, hpdata_t *ps) {
|
|
|
|
malloc_mutex_assert_owner(tsdn, &shard->mtx);
|
|
|
|
while (hpa_should_purge(shard, ps)) {
|
|
|
|
/* Do the metadata update bit while holding the lock. */
|
|
|
|
hpdata_purge_state_t purge_state;
|
|
|
|
hpdata_purge_begin(ps, &purge_state);
|
2020-12-04 08:09:50 +08:00
|
|
|
shard->stats.npurge_passes++;
|
2020-12-03 14:24:15 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Dehugifying can only happen on the first loop iteration,
|
|
|
|
* since no other threads can allocate out of this ps while
|
|
|
|
* we're purging (and thus, can't hugify it), but there's not a
|
|
|
|
* natural way to express that in the control flow.
|
|
|
|
*/
|
|
|
|
bool needs_dehugify = false;
|
|
|
|
if (hpdata_huge_get(ps)) {
|
|
|
|
needs_dehugify = true;
|
2020-12-04 08:09:50 +08:00
|
|
|
shard->stats.ndehugifies++;
|
2020-12-03 14:24:15 +08:00
|
|
|
hpdata_dehugify(ps);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Drop the lock to do the OS calls. */
|
|
|
|
malloc_mutex_unlock(tsdn, &shard->mtx);
|
|
|
|
|
|
|
|
if (needs_dehugify) {
|
|
|
|
pages_nohuge(hpdata_addr_get(ps), HUGEPAGE);
|
|
|
|
}
|
|
|
|
|
|
|
|
size_t total_purged = 0;
|
2020-12-04 08:09:50 +08:00
|
|
|
uint64_t purges_this_pass = 0;
|
2020-12-03 14:24:15 +08:00
|
|
|
void *purge_addr;
|
|
|
|
size_t purge_size;
|
|
|
|
while (hpdata_purge_next(ps, &purge_state, &purge_addr,
|
|
|
|
&purge_size)) {
|
2020-12-04 08:09:50 +08:00
|
|
|
purges_this_pass++;
|
2020-12-03 14:24:15 +08:00
|
|
|
pages_purge_forced(purge_addr, purge_size);
|
|
|
|
total_purged += purge_size;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Reacquire to finish our metadata update. */
|
|
|
|
malloc_mutex_lock(tsdn, &shard->mtx);
|
2020-12-04 08:09:50 +08:00
|
|
|
shard->stats.npurges += purges_this_pass;
|
2020-12-03 14:24:15 +08:00
|
|
|
hpdata_purge_end(ps, &purge_state);
|
|
|
|
|
|
|
|
assert(total_purged <= HUGEPAGE);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* We're not done here; other threads can't allocate out of ps
|
|
|
|
* while purging, but they can still deallocate. Those
|
|
|
|
* deallocations could have meant more purging than what we
|
|
|
|
* planned ought to happen. We have to re-check now that we've
|
|
|
|
* reacquired the mutex again.
|
|
|
|
*/
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Does the metadata tracking associated with a page slab becoming empty. The
|
|
|
|
* psset doesn't hold empty pageslabs, but we do want address space reuse, so we
|
|
|
|
* track these pages outside the psset.
|
2020-11-10 05:49:30 +08:00
|
|
|
*/
|
|
|
|
static void
|
2020-11-18 08:32:45 +08:00
|
|
|
hpa_handle_ps_eviction(tsdn_t *tsdn, hpa_shard_t *shard, hpdata_t *ps) {
|
2020-08-15 04:36:41 +08:00
|
|
|
/*
|
2020-11-10 05:49:30 +08:00
|
|
|
* We do relatively expensive system calls. The ps was evicted, so no
|
|
|
|
* one should touch it while we're also touching it.
|
2020-08-15 04:36:41 +08:00
|
|
|
*/
|
2020-11-10 05:49:30 +08:00
|
|
|
malloc_mutex_assert_not_owner(tsdn, &shard->mtx);
|
|
|
|
malloc_mutex_assert_not_owner(tsdn, &shard->grow_mtx);
|
2020-08-15 04:36:41 +08:00
|
|
|
|
2020-11-10 05:49:30 +08:00
|
|
|
malloc_mutex_lock(tsdn, &shard->grow_mtx);
|
2020-12-04 07:35:38 +08:00
|
|
|
shard->stats.nevictions++;
|
2020-11-18 08:32:45 +08:00
|
|
|
hpdata_list_prepend(&shard->unused_slabs, ps);
|
2020-11-10 05:49:30 +08:00
|
|
|
malloc_mutex_unlock(tsdn, &shard->grow_mtx);
|
2020-08-15 04:36:41 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static edata_t *
|
2020-11-10 05:49:30 +08:00
|
|
|
hpa_try_alloc_no_grow(tsdn_t *tsdn, hpa_shard_t *shard, size_t size, bool *oom) {
|
2020-08-15 04:36:41 +08:00
|
|
|
bool err;
|
2020-10-31 05:43:43 +08:00
|
|
|
malloc_mutex_lock(tsdn, &shard->mtx);
|
|
|
|
edata_t *edata = edata_cache_small_get(tsdn, &shard->ecs);
|
2020-11-10 05:49:30 +08:00
|
|
|
*oom = false;
|
2020-08-15 04:36:41 +08:00
|
|
|
if (edata == NULL) {
|
2020-10-31 05:43:43 +08:00
|
|
|
malloc_mutex_unlock(tsdn, &shard->mtx);
|
2020-11-10 05:49:30 +08:00
|
|
|
*oom = true;
|
2020-08-15 04:36:41 +08:00
|
|
|
return NULL;
|
|
|
|
}
|
2020-11-10 05:49:30 +08:00
|
|
|
assert(edata_arena_ind_get(edata) == shard->ind);
|
2020-08-15 04:36:41 +08:00
|
|
|
|
2020-12-06 07:58:31 +08:00
|
|
|
hpdata_t *ps = psset_pick_alloc(&shard->psset, size);
|
2020-12-01 07:15:21 +08:00
|
|
|
if (ps == NULL) {
|
2020-11-10 05:49:30 +08:00
|
|
|
edata_cache_small_put(tsdn, &shard->ecs, edata);
|
|
|
|
malloc_mutex_unlock(tsdn, &shard->mtx);
|
|
|
|
return NULL;
|
|
|
|
}
|
2020-12-01 07:15:21 +08:00
|
|
|
|
2020-12-06 07:58:31 +08:00
|
|
|
psset_update_begin(&shard->psset, ps);
|
2020-12-01 07:15:21 +08:00
|
|
|
void *addr = hpdata_reserve_alloc(ps, size);
|
|
|
|
edata_init(edata, shard->ind, addr, size, /* slab */ false,
|
|
|
|
SC_NSIZES, /* sn */ 0, extent_state_active, /* zeroed */ false,
|
|
|
|
/* committed */ true, EXTENT_PAI_HPA, EXTENT_NOT_HEAD);
|
|
|
|
edata_ps_set(edata, ps);
|
|
|
|
|
2020-11-10 05:49:30 +08:00
|
|
|
/*
|
|
|
|
* This could theoretically be moved outside of the critical section,
|
|
|
|
* but that introduces the potential for a race. Without the lock, the
|
|
|
|
* (initially nonempty, since this is the reuse pathway) pageslab we
|
|
|
|
* allocated out of could become otherwise empty while the lock is
|
|
|
|
* dropped. This would force us to deal with a pageslab eviction down
|
|
|
|
* the error pathway, which is a pain.
|
|
|
|
*/
|
|
|
|
err = emap_register_boundary(tsdn, shard->emap, edata,
|
|
|
|
SC_NSIZES, /* slab */ false);
|
|
|
|
if (err) {
|
2020-12-01 07:15:21 +08:00
|
|
|
hpdata_unreserve(ps, edata_addr_get(edata),
|
|
|
|
edata_size_get(edata));
|
2020-12-03 14:24:15 +08:00
|
|
|
/*
|
|
|
|
* We should arguably reset dirty state here, but this would
|
|
|
|
* require some sort of prepare + commit functionality that's a
|
|
|
|
* little much to deal with for now.
|
|
|
|
*/
|
2020-12-06 07:58:31 +08:00
|
|
|
psset_update_end(&shard->psset, ps);
|
2020-11-10 05:49:30 +08:00
|
|
|
edata_cache_small_put(tsdn, &shard->ecs, edata);
|
|
|
|
malloc_mutex_unlock(tsdn, &shard->mtx);
|
|
|
|
*oom = true;
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool hugify = hpa_should_hugify(shard, ps);
|
|
|
|
if (hugify) {
|
2020-12-03 10:44:34 +08:00
|
|
|
hpdata_hugify_begin(ps);
|
2020-12-04 08:09:50 +08:00
|
|
|
shard->stats.nhugifies++;
|
2020-11-10 05:49:30 +08:00
|
|
|
}
|
2020-12-06 07:58:31 +08:00
|
|
|
psset_update_end(&shard->psset, ps);
|
2020-12-01 07:15:21 +08:00
|
|
|
|
2020-08-15 04:36:41 +08:00
|
|
|
malloc_mutex_unlock(tsdn, &shard->mtx);
|
2020-11-10 05:49:30 +08:00
|
|
|
if (hugify) {
|
|
|
|
/*
|
|
|
|
* Hugifying with the lock dropped is safe, even with
|
|
|
|
* concurrent modifications to the ps. This relies on
|
|
|
|
* the fact that the current implementation will never
|
|
|
|
* dehugify a non-empty pageslab, and ps will never
|
|
|
|
* become empty before we return edata to the user to be
|
|
|
|
* freed.
|
|
|
|
*
|
|
|
|
* Note that holding the lock would prevent not just operations
|
|
|
|
* on this page slab, but also operations any other alloc/dalloc
|
|
|
|
* operations in this hpa shard.
|
|
|
|
*/
|
2020-12-03 14:24:15 +08:00
|
|
|
bool err = pages_huge(hpdata_addr_get(ps), HUGEPAGE);
|
|
|
|
/*
|
|
|
|
* Pretending we succeed when we actually failed is safe; trying
|
|
|
|
* to rolllback would be tricky, though. Eat the error.
|
|
|
|
*/
|
|
|
|
(void)err;
|
|
|
|
|
2020-12-03 10:44:34 +08:00
|
|
|
malloc_mutex_lock(tsdn, &shard->mtx);
|
|
|
|
hpdata_hugify_end(ps);
|
2020-12-03 14:24:15 +08:00
|
|
|
if (hpa_should_purge(shard, ps)) {
|
|
|
|
/*
|
|
|
|
* There was a race in which the ps went from being
|
|
|
|
* almost full to having lots of free space while we
|
|
|
|
* hugified. Undo our operation, taking care to meet
|
|
|
|
* the precondition that the ps isn't in the psset.
|
|
|
|
*/
|
2020-12-06 07:58:31 +08:00
|
|
|
psset_update_begin(&shard->psset, ps);
|
2020-12-03 14:24:15 +08:00
|
|
|
hpa_purge(tsdn, shard, ps);
|
2020-12-06 07:58:31 +08:00
|
|
|
psset_update_end(&shard->psset, ps);
|
2020-12-03 14:24:15 +08:00
|
|
|
}
|
2020-12-03 10:44:34 +08:00
|
|
|
malloc_mutex_unlock(tsdn, &shard->mtx);
|
2020-11-10 05:49:30 +08:00
|
|
|
}
|
|
|
|
return edata;
|
|
|
|
}
|
|
|
|
|
|
|
|
static edata_t *
|
|
|
|
hpa_alloc_psset(tsdn_t *tsdn, hpa_shard_t *shard, size_t size) {
|
|
|
|
assert(size <= shard->alloc_max);
|
|
|
|
bool err;
|
|
|
|
bool oom;
|
|
|
|
edata_t *edata;
|
|
|
|
|
|
|
|
edata = hpa_try_alloc_no_grow(tsdn, shard, size, &oom);
|
|
|
|
if (edata != NULL) {
|
2020-08-15 04:36:41 +08:00
|
|
|
return edata;
|
|
|
|
}
|
2020-11-10 05:49:30 +08:00
|
|
|
|
2020-08-15 04:36:41 +08:00
|
|
|
/* Nothing in the psset works; we have to grow it. */
|
|
|
|
malloc_mutex_lock(tsdn, &shard->grow_mtx);
|
2020-11-10 05:49:30 +08:00
|
|
|
/*
|
|
|
|
* Check for grow races; maybe some earlier thread expanded the psset
|
|
|
|
* in between when we dropped the main mutex and grabbed the grow mutex.
|
|
|
|
*/
|
|
|
|
edata = hpa_try_alloc_no_grow(tsdn, shard, size, &oom);
|
|
|
|
if (edata != NULL || oom) {
|
2020-08-15 04:36:41 +08:00
|
|
|
malloc_mutex_unlock(tsdn, &shard->grow_mtx);
|
|
|
|
return edata;
|
|
|
|
}
|
|
|
|
|
2020-11-10 05:49:30 +08:00
|
|
|
/*
|
|
|
|
* Note that we don't hold shard->mtx here (while growing);
|
|
|
|
* deallocations (and allocations of smaller sizes) may still succeed
|
|
|
|
* while we're doing this potentially expensive system call.
|
|
|
|
*/
|
2020-12-01 07:15:21 +08:00
|
|
|
hpdata_t *ps = hpa_grow(tsdn, shard);
|
|
|
|
if (ps == NULL) {
|
2020-08-15 04:36:41 +08:00
|
|
|
malloc_mutex_unlock(tsdn, &shard->grow_mtx);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* We got the new edata; allocate from it. */
|
|
|
|
malloc_mutex_lock(tsdn, &shard->mtx);
|
2020-12-06 07:58:31 +08:00
|
|
|
/*
|
|
|
|
* This will go away soon. The psset doesn't draw a distinction between
|
|
|
|
* pageslab removal and updating. If this is a new pageslab, we pretend
|
|
|
|
* that it's an old one that's been getting updated.
|
|
|
|
*/
|
|
|
|
if (!hpdata_updating_get(ps)) {
|
|
|
|
hpdata_updating_set(ps, true);
|
|
|
|
}
|
|
|
|
|
2020-11-10 05:49:30 +08:00
|
|
|
edata = edata_cache_small_get(tsdn, &shard->ecs);
|
|
|
|
if (edata == NULL) {
|
2020-12-04 07:35:38 +08:00
|
|
|
shard->stats.nevictions++;
|
2020-11-10 05:49:30 +08:00
|
|
|
malloc_mutex_unlock(tsdn, &shard->mtx);
|
|
|
|
malloc_mutex_unlock(tsdn, &shard->grow_mtx);
|
2020-12-01 07:15:21 +08:00
|
|
|
hpa_handle_ps_eviction(tsdn, shard, ps);
|
2020-11-10 05:49:30 +08:00
|
|
|
return NULL;
|
|
|
|
}
|
2020-12-01 07:15:21 +08:00
|
|
|
|
|
|
|
void *addr = hpdata_reserve_alloc(ps, size);
|
|
|
|
edata_init(edata, shard->ind, addr, size, /* slab */ false,
|
|
|
|
SC_NSIZES, /* sn */ 0, extent_state_active, /* zeroed */ false,
|
|
|
|
/* committed */ true, EXTENT_PAI_HPA, EXTENT_NOT_HEAD);
|
|
|
|
edata_ps_set(edata, ps);
|
|
|
|
|
2020-11-10 05:49:30 +08:00
|
|
|
err = emap_register_boundary(tsdn, shard->emap, edata,
|
|
|
|
SC_NSIZES, /* slab */ false);
|
|
|
|
if (err) {
|
2020-12-01 07:15:21 +08:00
|
|
|
hpdata_unreserve(ps, edata_addr_get(edata),
|
|
|
|
edata_size_get(edata));
|
2020-12-03 14:24:15 +08:00
|
|
|
|
2020-11-10 05:49:30 +08:00
|
|
|
edata_cache_small_put(tsdn, &shard->ecs, edata);
|
2020-12-01 11:06:50 +08:00
|
|
|
|
2020-12-04 07:35:38 +08:00
|
|
|
shard->stats.nevictions++;
|
2020-11-10 05:49:30 +08:00
|
|
|
malloc_mutex_unlock(tsdn, &shard->mtx);
|
|
|
|
malloc_mutex_unlock(tsdn, &shard->grow_mtx);
|
2020-12-03 14:24:15 +08:00
|
|
|
|
|
|
|
/* We'll do a fake purge; the pages weren't really touched. */
|
|
|
|
hpdata_purge_state_t purge_state;
|
|
|
|
void *purge_addr;
|
|
|
|
size_t purge_size;
|
|
|
|
hpdata_purge_begin(ps, &purge_state);
|
|
|
|
bool found_extent = hpdata_purge_next(ps, &purge_state,
|
|
|
|
&purge_addr, &purge_size);
|
|
|
|
assert(found_extent);
|
|
|
|
assert(purge_addr == addr);
|
|
|
|
assert(purge_size == size);
|
|
|
|
found_extent = hpdata_purge_next(ps, &purge_state,
|
|
|
|
&purge_addr, &purge_size);
|
|
|
|
assert(!found_extent);
|
|
|
|
hpdata_purge_end(ps, &purge_state);
|
|
|
|
|
2020-11-10 05:49:30 +08:00
|
|
|
hpa_handle_ps_eviction(tsdn, shard, ps);
|
|
|
|
return NULL;
|
|
|
|
}
|
2020-12-06 07:58:31 +08:00
|
|
|
psset_update_end(&shard->psset, ps);
|
2020-12-01 07:15:21 +08:00
|
|
|
|
2020-08-15 04:36:41 +08:00
|
|
|
malloc_mutex_unlock(tsdn, &shard->mtx);
|
|
|
|
malloc_mutex_unlock(tsdn, &shard->grow_mtx);
|
|
|
|
return edata;
|
|
|
|
}
|
|
|
|
|
|
|
|
static hpa_shard_t *
|
|
|
|
hpa_from_pai(pai_t *self) {
|
|
|
|
assert(self->alloc = &hpa_alloc);
|
|
|
|
assert(self->expand = &hpa_expand);
|
|
|
|
assert(self->shrink = &hpa_shrink);
|
|
|
|
assert(self->dalloc = &hpa_dalloc);
|
|
|
|
return (hpa_shard_t *)self;
|
|
|
|
}
|
|
|
|
|
|
|
|
static edata_t *
|
|
|
|
hpa_alloc(tsdn_t *tsdn, pai_t *self, size_t size,
|
|
|
|
size_t alignment, bool zero) {
|
|
|
|
assert((size & PAGE_MASK) == 0);
|
2020-11-10 05:49:30 +08:00
|
|
|
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
|
|
|
|
WITNESS_RANK_CORE, 0);
|
|
|
|
|
2020-09-05 09:29:28 +08:00
|
|
|
hpa_shard_t *shard = hpa_from_pai(self);
|
2020-08-15 04:36:41 +08:00
|
|
|
/* We don't handle alignment or zeroing for now. */
|
|
|
|
if (alignment > PAGE || zero) {
|
|
|
|
return NULL;
|
|
|
|
}
|
2020-11-10 05:49:30 +08:00
|
|
|
if (size > shard->alloc_max) {
|
2020-09-05 09:29:28 +08:00
|
|
|
return NULL;
|
|
|
|
}
|
2020-08-15 04:36:41 +08:00
|
|
|
|
2020-11-10 05:49:30 +08:00
|
|
|
edata_t *edata = hpa_alloc_psset(tsdn, shard, size);
|
2020-08-15 04:36:41 +08:00
|
|
|
|
|
|
|
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
|
|
|
|
WITNESS_RANK_CORE, 0);
|
2020-11-10 05:49:30 +08:00
|
|
|
|
2020-08-15 04:36:41 +08:00
|
|
|
if (edata != NULL) {
|
2020-11-10 05:49:30 +08:00
|
|
|
emap_assert_mapped(tsdn, shard->emap, edata);
|
2020-08-15 04:36:41 +08:00
|
|
|
assert(edata_pai_get(edata) == EXTENT_PAI_HPA);
|
|
|
|
assert(edata_state_get(edata) == extent_state_active);
|
|
|
|
assert(edata_arena_ind_get(edata) == shard->ind);
|
|
|
|
assert(edata_szind_get_maybe_invalid(edata) == SC_NSIZES);
|
|
|
|
assert(!edata_slab_get(edata));
|
|
|
|
assert(edata_committed_get(edata));
|
|
|
|
assert(edata_base_get(edata) == edata_addr_get(edata));
|
|
|
|
assert(edata_base_get(edata) != NULL);
|
|
|
|
}
|
|
|
|
return edata;
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool
|
|
|
|
hpa_expand(tsdn_t *tsdn, pai_t *self, edata_t *edata,
|
|
|
|
size_t old_size, size_t new_size, bool zero) {
|
|
|
|
/* Expand not yet supported. */
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool
|
|
|
|
hpa_shrink(tsdn_t *tsdn, pai_t *self, edata_t *edata,
|
|
|
|
size_t old_size, size_t new_size) {
|
|
|
|
/* Shrink not yet supported. */
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
hpa_dalloc(tsdn_t *tsdn, pai_t *self, edata_t *edata) {
|
|
|
|
hpa_shard_t *shard = hpa_from_pai(self);
|
|
|
|
|
|
|
|
edata_addr_set(edata, edata_base_get(edata));
|
|
|
|
edata_zeroed_set(edata, false);
|
|
|
|
|
|
|
|
assert(edata_pai_get(edata) == EXTENT_PAI_HPA);
|
|
|
|
assert(edata_state_get(edata) == extent_state_active);
|
|
|
|
assert(edata_arena_ind_get(edata) == shard->ind);
|
|
|
|
assert(edata_szind_get_maybe_invalid(edata) == SC_NSIZES);
|
|
|
|
assert(!edata_slab_get(edata));
|
|
|
|
assert(edata_committed_get(edata));
|
|
|
|
assert(edata_base_get(edata) != NULL);
|
|
|
|
|
2020-11-18 08:32:45 +08:00
|
|
|
hpdata_t *ps = edata_ps_get(edata);
|
2020-11-10 05:49:30 +08:00
|
|
|
/* Currently, all edatas come from pageslabs. */
|
|
|
|
assert(ps != NULL);
|
|
|
|
emap_deregister_boundary(tsdn, shard->emap, edata);
|
2020-12-03 14:24:15 +08:00
|
|
|
/*
|
|
|
|
* Note that the shard mutex protects ps's metadata too; it wouldn't be
|
|
|
|
* correct to try to read most information out of it without the lock.
|
|
|
|
*/
|
2020-11-10 05:49:30 +08:00
|
|
|
malloc_mutex_lock(tsdn, &shard->mtx);
|
2020-12-01 07:15:21 +08:00
|
|
|
|
2020-12-03 14:24:15 +08:00
|
|
|
/*
|
|
|
|
* Release the metadata early, to avoid having to remember to do it
|
|
|
|
* while we're also doing tricky purging logic.
|
|
|
|
*/
|
|
|
|
void *unreserve_addr = edata_addr_get(edata);
|
|
|
|
size_t unreserve_size = edata_size_get(edata);
|
|
|
|
edata_cache_small_put(tsdn, &shard->ecs, edata);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* We have three rules interacting here:
|
|
|
|
* - You can't update ps metadata while it's still in the psset. We
|
|
|
|
* enforce this because it's necessary for stats tracking and metadata
|
|
|
|
* management.
|
|
|
|
* - The ps must not be in the psset while purging. This is because we
|
|
|
|
* can't handle purge/alloc races.
|
|
|
|
* - Whoever removes the ps from the psset is the one to reinsert it (or
|
|
|
|
* to pass it to hpa_handle_ps_eviction upon emptying). This keeps
|
|
|
|
* responsibility tracking simple.
|
|
|
|
*/
|
|
|
|
if (hpdata_mid_purge_get(ps)) {
|
|
|
|
/*
|
|
|
|
* Another thread started purging, and so the ps is not in the
|
|
|
|
* psset and we can do our metadata update. The other thread is
|
|
|
|
* in charge of reinserting the ps, so we're done.
|
|
|
|
*/
|
2020-12-06 07:58:31 +08:00
|
|
|
assert(hpdata_updating_get(ps));
|
2020-12-03 14:24:15 +08:00
|
|
|
hpdata_unreserve(ps, unreserve_addr, unreserve_size);
|
|
|
|
malloc_mutex_unlock(tsdn, &shard->mtx);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
/*
|
|
|
|
* No other thread is purging, and the ps is non-empty, so it should be
|
|
|
|
* in the psset.
|
|
|
|
*/
|
2020-12-06 07:58:31 +08:00
|
|
|
assert(!hpdata_updating_get(ps));
|
|
|
|
psset_update_begin(&shard->psset, ps);
|
2020-12-03 14:24:15 +08:00
|
|
|
hpdata_unreserve(ps, unreserve_addr, unreserve_size);
|
|
|
|
if (!hpa_should_purge(shard, ps)) {
|
|
|
|
/*
|
|
|
|
* This should be the common case; no other thread is purging,
|
|
|
|
* and we won't purge either.
|
|
|
|
*/
|
2020-12-06 07:58:31 +08:00
|
|
|
psset_update_end(&shard->psset, ps);
|
2020-12-03 14:24:15 +08:00
|
|
|
malloc_mutex_unlock(tsdn, &shard->mtx);
|
|
|
|
return;
|
|
|
|
}
|
2020-12-01 07:15:21 +08:00
|
|
|
|
2020-12-03 14:24:15 +08:00
|
|
|
/* It's our job to purge. */
|
|
|
|
hpa_purge(tsdn, shard, ps);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* OK, the hpdata is as purged as we want it to be, and it's going back
|
|
|
|
* into the psset (if nonempty) or getting evicted (if empty).
|
|
|
|
*/
|
2020-12-01 07:15:21 +08:00
|
|
|
if (hpdata_empty(ps)) {
|
|
|
|
malloc_mutex_unlock(tsdn, &shard->mtx);
|
|
|
|
hpa_handle_ps_eviction(tsdn, shard, ps);
|
|
|
|
} else {
|
2020-12-06 07:58:31 +08:00
|
|
|
psset_update_end(&shard->psset, ps);
|
2020-12-01 07:15:21 +08:00
|
|
|
malloc_mutex_unlock(tsdn, &shard->mtx);
|
2020-08-15 04:36:41 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-10-31 05:43:43 +08:00
|
|
|
void
|
|
|
|
hpa_shard_disable(tsdn_t *tsdn, hpa_shard_t *shard) {
|
|
|
|
malloc_mutex_lock(tsdn, &shard->mtx);
|
|
|
|
edata_cache_small_disable(tsdn, &shard->ecs);
|
|
|
|
malloc_mutex_unlock(tsdn, &shard->mtx);
|
|
|
|
}
|
|
|
|
|
2020-11-10 05:49:30 +08:00
|
|
|
static void
|
|
|
|
hpa_shard_assert_stats_empty(psset_bin_stats_t *bin_stats) {
|
2020-12-04 10:32:42 +08:00
|
|
|
assert(bin_stats->npageslabs == 0);
|
|
|
|
assert(bin_stats->nactive == 0);
|
2020-11-10 05:49:30 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
hpa_assert_empty(tsdn_t *tsdn, hpa_shard_t *shard, psset_t *psset) {
|
|
|
|
malloc_mutex_assert_owner(tsdn, &shard->mtx);
|
2020-12-06 07:58:31 +08:00
|
|
|
hpdata_t *ps = psset_pick_alloc(psset, PAGE);
|
2020-12-01 07:15:21 +08:00
|
|
|
assert(ps == NULL);
|
2020-12-04 10:32:42 +08:00
|
|
|
for (int huge = 0; huge <= 1; huge++) {
|
|
|
|
hpa_shard_assert_stats_empty(&psset->stats.full_slabs[huge]);
|
|
|
|
for (pszind_t i = 0; i < PSSET_NPSIZES; i++) {
|
|
|
|
hpa_shard_assert_stats_empty(
|
|
|
|
&psset->stats.nonfull_slabs[i][huge]);
|
|
|
|
}
|
2020-11-10 05:49:30 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-08-15 04:36:41 +08:00
|
|
|
void
|
|
|
|
hpa_shard_destroy(tsdn_t *tsdn, hpa_shard_t *shard) {
|
|
|
|
/*
|
|
|
|
* By the time we're here, the arena code should have dalloc'd all the
|
|
|
|
* active extents, which means we should have eventually evicted
|
|
|
|
* everything from the psset, so it shouldn't be able to serve even a
|
|
|
|
* 1-page allocation.
|
|
|
|
*/
|
|
|
|
if (config_debug) {
|
|
|
|
malloc_mutex_lock(tsdn, &shard->mtx);
|
2020-11-10 05:49:30 +08:00
|
|
|
hpa_assert_empty(tsdn, shard, &shard->psset);
|
2020-08-15 04:36:41 +08:00
|
|
|
malloc_mutex_unlock(tsdn, &shard->mtx);
|
2020-11-10 05:49:30 +08:00
|
|
|
}
|
2020-11-18 08:32:45 +08:00
|
|
|
hpdata_t *ps;
|
|
|
|
while ((ps = hpdata_list_first(&shard->unused_slabs)) != NULL) {
|
|
|
|
hpdata_list_remove(&shard->unused_slabs, ps);
|
|
|
|
pages_unmap(hpdata_addr_get(ps), HUGEPAGE);
|
2020-08-15 04:36:41 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2020-10-17 04:14:59 +08:00
|
|
|
hpa_shard_prefork3(tsdn_t *tsdn, hpa_shard_t *shard) {
|
2020-08-15 04:36:41 +08:00
|
|
|
malloc_mutex_prefork(tsdn, &shard->grow_mtx);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2020-10-17 04:14:59 +08:00
|
|
|
hpa_shard_prefork4(tsdn_t *tsdn, hpa_shard_t *shard) {
|
2020-08-15 04:36:41 +08:00
|
|
|
malloc_mutex_prefork(tsdn, &shard->mtx);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
hpa_shard_postfork_parent(tsdn_t *tsdn, hpa_shard_t *shard) {
|
|
|
|
malloc_mutex_postfork_parent(tsdn, &shard->grow_mtx);
|
|
|
|
malloc_mutex_postfork_parent(tsdn, &shard->mtx);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
hpa_shard_postfork_child(tsdn_t *tsdn, hpa_shard_t *shard) {
|
|
|
|
malloc_mutex_postfork_child(tsdn, &shard->grow_mtx);
|
|
|
|
malloc_mutex_postfork_child(tsdn, &shard->mtx);
|
|
|
|
}
|