Passing down the original edata on the expand path.
Instead of passing down the new_addr, pass down the active edata which allows us to always use a neighbor-acquiring semantic. In other words, this tells us both the original edata and neighbor address. With this change, only neighbors of a "known" edata can be acquired, i.e. acquiring an edata based on an arbitrary address isn't possible anymore.
This commit is contained in:
parent
1784939688
commit
49b7d7f0a4
@ -25,7 +25,6 @@ enum extent_state_e {
|
||||
extent_state_muzzy = 2,
|
||||
extent_state_retained = 3,
|
||||
extent_state_transition = 4, /* States below are intermediate. */
|
||||
extent_state_updating = 4,
|
||||
extent_state_merging = 5,
|
||||
extent_state_max = 5 /* Sanity checking only. */
|
||||
};
|
||||
|
@ -43,11 +43,26 @@ void emap_remap(tsdn_t *tsdn, emap_t *emap, edata_t *edata, szind_t szind,
|
||||
void emap_update_edata_state(tsdn_t *tsdn, emap_t *emap, edata_t *edata,
|
||||
extent_state_t state);
|
||||
|
||||
edata_t *emap_try_acquire_edata(tsdn_t *tsdn, emap_t *emap, void *addr,
|
||||
extent_state_t expected_state, bool allow_head_extent);
|
||||
/*
|
||||
* The two acquire functions below allow accessing neighbor edatas, if it's safe
|
||||
* and valid to do so (i.e. from the same arena, of the same state, etc.). This
|
||||
* is necessary because the ecache locks are state based, and only protect
|
||||
* edatas with the same state. Therefore the neighbor edata's state needs to be
|
||||
* verified first, before chasing the edata pointer. The returned edata will be
|
||||
* in an acquired state, meaning other threads will be prevented from accessing
|
||||
* it, even if technically the edata can still be discovered from the rtree.
|
||||
*
|
||||
* This means, at any moment when holding pointers to edata, either one of the
|
||||
* state based locks is held (and the edatas are all of the protected state), or
|
||||
* the edatas are in an acquired state (e.g. in active or merging state). The
|
||||
* acquire operation itself (changing the edata to an acquired state) is done
|
||||
* under the state locks.
|
||||
*/
|
||||
edata_t *emap_try_acquire_edata_neighbor(tsdn_t *tsdn, emap_t *emap,
|
||||
edata_t *edata, extent_pai_t pai, extent_state_t expected_state,
|
||||
bool forward);
|
||||
edata_t *emap_try_acquire_edata_neighbor_expand(tsdn_t *tsdn, emap_t *emap,
|
||||
edata_t *edata, extent_pai_t pai, extent_state_t expected_state);
|
||||
void emap_release_edata(tsdn_t *tsdn, emap_t *emap, edata_t *edata,
|
||||
extent_state_t new_state);
|
||||
|
||||
@ -196,6 +211,17 @@ extent_assert_can_coalesce(const edata_t *inner, const edata_t *outer) {
|
||||
assert(edata_committed_get(inner) == edata_committed_get(outer));
|
||||
assert(edata_state_get(inner) == extent_state_active);
|
||||
assert(edata_state_get(outer) == extent_state_merging);
|
||||
assert(edata_base_get(inner) == edata_past_get(outer) ||
|
||||
edata_base_get(outer) == edata_past_get(inner));
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE void
|
||||
extent_assert_can_expand(const edata_t *original, const edata_t *expand) {
|
||||
assert(edata_arena_ind_get(original) == edata_arena_ind_get(expand));
|
||||
assert(edata_pai_get(original) == edata_pai_get(expand));
|
||||
assert(edata_state_get(original) == extent_state_active);
|
||||
assert(edata_state_get(expand) == extent_state_merging);
|
||||
assert(edata_past_get(original) == edata_base_get(expand));
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE edata_t *
|
||||
|
@ -20,9 +20,11 @@
|
||||
extern size_t opt_lg_extent_max_active_fit;
|
||||
|
||||
edata_t *ecache_alloc(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
|
||||
ecache_t *ecache, void *new_addr, size_t size, size_t alignment, bool zero);
|
||||
ecache_t *ecache, edata_t *expand_edata, size_t size, size_t alignment,
|
||||
bool zero);
|
||||
edata_t *ecache_alloc_grow(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
|
||||
ecache_t *ecache, void *new_addr, size_t size, size_t alignment, bool zero);
|
||||
ecache_t *ecache, edata_t *expand_edata, size_t size, size_t alignment,
|
||||
bool zero);
|
||||
void ecache_dalloc(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
|
||||
ecache_t *ecache, edata_t *edata);
|
||||
edata_t *ecache_evict(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
|
||||
|
84
src/emap.c
84
src/emap.c
@ -70,7 +70,8 @@ edata_neighbor_head_state_mergeable(bool edata_is_head,
|
||||
|
||||
static inline bool
|
||||
edata_can_acquire_neighbor(edata_t *edata, rtree_contents_t contents,
|
||||
extent_pai_t pai, extent_state_t expected_state, bool forward) {
|
||||
extent_pai_t pai, extent_state_t expected_state, bool forward,
|
||||
bool expanding) {
|
||||
edata_t *neighbor = contents.edata;
|
||||
if (neighbor == NULL) {
|
||||
return false;
|
||||
@ -87,8 +88,8 @@ edata_can_acquire_neighbor(edata_t *edata, rtree_contents_t contents,
|
||||
return false;
|
||||
}
|
||||
/* From this point, it's safe to access *neighbor. */
|
||||
if (edata_committed_get(edata) !=
|
||||
edata_committed_get(neighbor)) {
|
||||
if (!expanding && (edata_committed_get(edata) !=
|
||||
edata_committed_get(neighbor))) {
|
||||
/*
|
||||
* Some platforms (e.g. Windows) require an explicit
|
||||
* commit step (and writing to uncomitted memory is not
|
||||
@ -125,47 +126,13 @@ edata_can_acquire_neighbor(edata_t *edata, rtree_contents_t contents,
|
||||
return true;
|
||||
}
|
||||
|
||||
/* Will be removed in the next commit. */
|
||||
edata_t *
|
||||
emap_try_acquire_edata(tsdn_t *tsdn, emap_t *emap, void *addr,
|
||||
extent_state_t expected_state, bool allow_head_extent) {
|
||||
EMAP_DECLARE_RTREE_CTX;
|
||||
rtree_leaf_elm_t *elm = rtree_leaf_elm_lookup(tsdn, &emap->rtree,
|
||||
rtree_ctx, (uintptr_t)addr, false, false);
|
||||
if (elm == NULL) {
|
||||
return NULL;
|
||||
}
|
||||
rtree_contents_t contents = rtree_leaf_elm_read(tsdn, &emap->rtree, elm,
|
||||
/* dependent */ true);
|
||||
if (!allow_head_extent && contents.metadata.is_head) {
|
||||
/* !allow_head_extent indicates the expanding path. */
|
||||
return NULL;
|
||||
}
|
||||
|
||||
edata_t *edata = contents.edata;
|
||||
if (edata == NULL || contents.metadata.state != expected_state) {
|
||||
return NULL;
|
||||
}
|
||||
assert(edata_state_get(edata) == expected_state);
|
||||
emap_update_edata_state(tsdn, emap, edata, extent_state_updating);
|
||||
|
||||
return edata;
|
||||
}
|
||||
|
||||
void
|
||||
emap_release_edata(tsdn_t *tsdn, emap_t *emap, edata_t *edata,
|
||||
extent_state_t new_state) {
|
||||
assert(emap_edata_in_transition(tsdn, emap, edata));
|
||||
assert(emap_edata_is_acquired(tsdn, emap, edata));
|
||||
|
||||
emap_update_edata_state(tsdn, emap, edata, new_state);
|
||||
}
|
||||
|
||||
edata_t *
|
||||
emap_try_acquire_edata_neighbor(tsdn_t *tsdn, emap_t *emap, edata_t *edata,
|
||||
extent_pai_t pai, extent_state_t expected_state, bool forward) {
|
||||
static inline edata_t *
|
||||
emap_try_acquire_edata_neighbor_impl(tsdn_t *tsdn, emap_t *emap, edata_t *edata,
|
||||
extent_pai_t pai, extent_state_t expected_state, bool forward,
|
||||
bool expanding) {
|
||||
witness_assert_positive_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
|
||||
WITNESS_RANK_CORE);
|
||||
assert(!expanding || forward);
|
||||
assert(!edata_state_in_transition(expected_state));
|
||||
assert(expected_state == extent_state_dirty ||
|
||||
expected_state == extent_state_muzzy ||
|
||||
@ -196,18 +163,47 @@ emap_try_acquire_edata_neighbor(tsdn_t *tsdn, emap_t *emap, edata_t *edata,
|
||||
rtree_contents_t neighbor_contents = rtree_leaf_elm_read(tsdn,
|
||||
&emap->rtree, elm, /* dependent */ true);
|
||||
if (!edata_can_acquire_neighbor(edata, neighbor_contents, pai,
|
||||
expected_state, forward)) {
|
||||
expected_state, forward, expanding)) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/* From this point, the neighbor edata can be safely acquired. */
|
||||
edata_t *neighbor = neighbor_contents.edata;
|
||||
assert(edata_state_get(neighbor) == expected_state);
|
||||
emap_update_edata_state(tsdn, emap, neighbor, extent_state_merging);
|
||||
extent_assert_can_coalesce(edata, neighbor);
|
||||
if (expanding) {
|
||||
extent_assert_can_expand(edata, neighbor);
|
||||
} else {
|
||||
extent_assert_can_coalesce(edata, neighbor);
|
||||
}
|
||||
|
||||
return neighbor;
|
||||
}
|
||||
|
||||
edata_t *
|
||||
emap_try_acquire_edata_neighbor(tsdn_t *tsdn, emap_t *emap, edata_t *edata,
|
||||
extent_pai_t pai, extent_state_t expected_state, bool forward) {
|
||||
return emap_try_acquire_edata_neighbor_impl(tsdn, emap, edata, pai,
|
||||
expected_state, forward, /* expand */ false);
|
||||
}
|
||||
|
||||
edata_t *
|
||||
emap_try_acquire_edata_neighbor_expand(tsdn_t *tsdn, emap_t *emap,
|
||||
edata_t *edata, extent_pai_t pai, extent_state_t expected_state) {
|
||||
/* Try expanding forward. */
|
||||
return emap_try_acquire_edata_neighbor_impl(tsdn, emap, edata, pai,
|
||||
expected_state, /* forward */ true, /* expand */ true);
|
||||
}
|
||||
|
||||
void
|
||||
emap_release_edata(tsdn_t *tsdn, emap_t *emap, edata_t *edata,
|
||||
extent_state_t new_state) {
|
||||
assert(emap_edata_in_transition(tsdn, emap, edata));
|
||||
assert(emap_edata_is_acquired(tsdn, emap, edata));
|
||||
|
||||
emap_update_edata_state(tsdn, emap, edata, new_state);
|
||||
}
|
||||
|
||||
static bool
|
||||
emap_rtree_leaf_elms_lookup(tsdn_t *tsdn, emap_t *emap, rtree_ctx_t *rtree_ctx,
|
||||
const edata_t *edata, bool dependent, bool init_missing,
|
||||
|
88
src/extent.c
88
src/extent.c
@ -36,15 +36,15 @@ static atomic_zu_t highpages;
|
||||
|
||||
static void extent_deregister(tsdn_t *tsdn, pac_t *pac, edata_t *edata);
|
||||
static edata_t *extent_recycle(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
|
||||
ecache_t *ecache, void *new_addr, size_t usize, size_t alignment, bool zero,
|
||||
bool *commit, bool growing_retained);
|
||||
ecache_t *ecache, edata_t *expand_edata, size_t usize, size_t alignment,
|
||||
bool zero, bool *commit, bool growing_retained);
|
||||
static edata_t *extent_try_coalesce(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
|
||||
ecache_t *ecache, edata_t *edata, bool *coalesced, bool growing_retained);
|
||||
static void extent_record(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
|
||||
ecache_t *ecache, edata_t *edata, bool growing_retained);
|
||||
static edata_t *extent_alloc_retained(tsdn_t *tsdn, pac_t *pac,
|
||||
ehooks_t *ehooks, void *new_addr, size_t size, size_t alignment, bool zero,
|
||||
bool *commit);
|
||||
ehooks_t *ehooks, edata_t *expand_edata, size_t size, size_t alignment,
|
||||
bool zero, bool *commit);
|
||||
static edata_t *extent_alloc_wrapper(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
|
||||
void *new_addr, size_t size, size_t alignment, bool zero, bool *commit);
|
||||
|
||||
@ -80,14 +80,14 @@ extent_try_delayed_coalesce(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
|
||||
|
||||
edata_t *
|
||||
ecache_alloc(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, ecache_t *ecache,
|
||||
void *new_addr, size_t size, size_t alignment, bool zero) {
|
||||
edata_t *expand_edata, size_t size, size_t alignment, bool zero) {
|
||||
assert(size != 0);
|
||||
assert(alignment != 0);
|
||||
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
|
||||
WITNESS_RANK_CORE, 0);
|
||||
|
||||
bool commit = true;
|
||||
edata_t *edata = extent_recycle(tsdn, pac, ehooks, ecache, new_addr,
|
||||
edata_t *edata = extent_recycle(tsdn, pac, ehooks, ecache, expand_edata,
|
||||
size, alignment, zero, &commit, false);
|
||||
assert(edata == NULL || edata_pai_get(edata) == EXTENT_PAI_PAC);
|
||||
return edata;
|
||||
@ -95,25 +95,27 @@ ecache_alloc(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, ecache_t *ecache,
|
||||
|
||||
edata_t *
|
||||
ecache_alloc_grow(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, ecache_t *ecache,
|
||||
void *new_addr, size_t size, size_t alignment, bool zero) {
|
||||
edata_t *expand_edata, size_t size, size_t alignment, bool zero) {
|
||||
assert(size != 0);
|
||||
assert(alignment != 0);
|
||||
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
|
||||
WITNESS_RANK_CORE, 0);
|
||||
|
||||
bool commit = true;
|
||||
edata_t *edata = extent_alloc_retained(tsdn, pac, ehooks, new_addr,
|
||||
edata_t *edata = extent_alloc_retained(tsdn, pac, ehooks, expand_edata,
|
||||
size, alignment, zero, &commit);
|
||||
if (edata == NULL) {
|
||||
if (opt_retain && new_addr != NULL) {
|
||||
if (opt_retain && expand_edata != NULL) {
|
||||
/*
|
||||
* When retain is enabled and new_addr is set, we do not
|
||||
* attempt extent_alloc_wrapper which does mmap that is
|
||||
* very unlikely to succeed (unless it happens to be at
|
||||
* the end).
|
||||
* When retain is enabled and trying to expand, we do
|
||||
* not attempt extent_alloc_wrapper which does mmap that
|
||||
* is very unlikely to succeed (unless it happens to be
|
||||
* at the end).
|
||||
*/
|
||||
return NULL;
|
||||
}
|
||||
void *new_addr = (expand_edata == NULL) ? NULL :
|
||||
edata_past_get(expand_edata);
|
||||
edata = extent_alloc_wrapper(tsdn, pac, ehooks, new_addr,
|
||||
size, alignment, zero, &commit);
|
||||
}
|
||||
@ -246,7 +248,7 @@ extent_activate_locked(tsdn_t *tsdn, pac_t *pac, ecache_t *ecache,
|
||||
edata_t *edata) {
|
||||
assert(edata_arena_ind_get(edata) == ecache_ind_get(ecache));
|
||||
assert(edata_state_get(edata) == ecache->state ||
|
||||
edata_state_get(edata) == extent_state_updating);
|
||||
edata_state_get(edata) == extent_state_merging);
|
||||
|
||||
eset_remove(&ecache->eset, edata);
|
||||
emap_update_edata_state(tsdn, pac->emap, edata, extent_state_active);
|
||||
@ -354,37 +356,30 @@ extent_deregister_no_gdump_sub(tsdn_t *tsdn, pac_t *pac,
|
||||
*/
|
||||
static edata_t *
|
||||
extent_recycle_extract(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
|
||||
ecache_t *ecache, void *new_addr, size_t size, size_t alignment,
|
||||
ecache_t *ecache, edata_t *expand_edata, size_t size, size_t alignment,
|
||||
bool growing_retained) {
|
||||
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
|
||||
WITNESS_RANK_CORE, growing_retained ? 1 : 0);
|
||||
assert(alignment > 0);
|
||||
if (config_debug && new_addr != NULL) {
|
||||
if (config_debug && expand_edata != NULL) {
|
||||
/*
|
||||
* Non-NULL new_addr has two use cases:
|
||||
*
|
||||
* 1) Recycle a known-extant extent, e.g. during purging.
|
||||
* 2) Perform in-place expanding reallocation.
|
||||
*
|
||||
* Regardless of use case, new_addr must either refer to a
|
||||
* non-existing extent, or to the base of an extant extent,
|
||||
* since only active slabs support interior lookups (which of
|
||||
* course cannot be recycled).
|
||||
* Non-NULL expand_edata indicates in-place expanding realloc.
|
||||
* new_addr must either refer to a non-existing extent, or to
|
||||
* the base of an extant extent, since only active slabs support
|
||||
* interior lookups (which of course cannot be recycled).
|
||||
*/
|
||||
void *new_addr = edata_past_get(expand_edata);
|
||||
assert(PAGE_ADDR2BASE(new_addr) == new_addr);
|
||||
assert(alignment <= PAGE);
|
||||
}
|
||||
|
||||
malloc_mutex_lock(tsdn, &ecache->mtx);
|
||||
edata_t *edata;
|
||||
if (new_addr != NULL) {
|
||||
edata = emap_try_acquire_edata(tsdn, pac->emap, new_addr,
|
||||
ecache->state, /* allow_head_extent*/ false);
|
||||
if (expand_edata != NULL) {
|
||||
edata = emap_try_acquire_edata_neighbor_expand(tsdn, pac->emap,
|
||||
expand_edata, EXTENT_PAI_PAC, ecache->state);
|
||||
if (edata != NULL) {
|
||||
assert(edata_base_get(edata) == new_addr);
|
||||
assert(edata_arena_ind_get(edata) ==
|
||||
ecache_ind_get(ecache));
|
||||
assert(edata_state_get(edata) == extent_state_updating);
|
||||
extent_assert_can_expand(expand_edata, edata);
|
||||
if (edata_size_get(edata) < size) {
|
||||
emap_release_edata(tsdn, pac->emap, edata,
|
||||
ecache->state);
|
||||
@ -454,10 +449,11 @@ extent_split_interior(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
|
||||
edata_t **edata, edata_t **lead, edata_t **trail,
|
||||
/* The mess to clean up, in case of error. */
|
||||
edata_t **to_leak, edata_t **to_salvage,
|
||||
void *new_addr, size_t size, size_t alignment, bool growing_retained) {
|
||||
edata_t *expand_edata, size_t size, size_t alignment,
|
||||
bool growing_retained) {
|
||||
size_t leadsize = ALIGNMENT_CEILING((uintptr_t)edata_base_get(*edata),
|
||||
PAGE_CEILING(alignment)) - (uintptr_t)edata_base_get(*edata);
|
||||
assert(new_addr == NULL || leadsize == 0);
|
||||
assert(expand_edata == NULL || leadsize == 0);
|
||||
if (edata_size_get(*edata) < leadsize + size) {
|
||||
return extent_split_interior_cant_alloc;
|
||||
}
|
||||
@ -504,7 +500,7 @@ extent_split_interior(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
|
||||
*/
|
||||
static edata_t *
|
||||
extent_recycle_split(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
|
||||
ecache_t *ecache, void *new_addr, size_t size, size_t alignment,
|
||||
ecache_t *ecache, edata_t *expand_edata, size_t size, size_t alignment,
|
||||
edata_t *edata, bool growing_retained) {
|
||||
edata_t *lead;
|
||||
edata_t *trail;
|
||||
@ -513,7 +509,7 @@ extent_recycle_split(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
|
||||
|
||||
extent_split_interior_result_t result = extent_split_interior(
|
||||
tsdn, pac, ehooks, &edata, &lead, &trail, &to_leak, &to_salvage,
|
||||
new_addr, size, alignment, growing_retained);
|
||||
expand_edata, size, alignment, growing_retained);
|
||||
|
||||
if (!maps_coalesce && result != extent_split_interior_ok
|
||||
&& !opt_retain) {
|
||||
@ -544,12 +540,9 @@ extent_recycle_split(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
|
||||
extent_deregister(tsdn, pac, to_salvage);
|
||||
}
|
||||
if (to_leak != NULL) {
|
||||
void *leak = edata_base_get(to_leak);
|
||||
extent_deregister_no_gdump_sub(tsdn, pac, to_leak);
|
||||
extents_abandon_vm(tsdn, pac, ehooks, ecache, to_leak,
|
||||
growing_retained);
|
||||
assert(emap_try_acquire_edata(tsdn, pac->emap,
|
||||
leak, ecache->state, true) == NULL);
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
@ -562,17 +555,17 @@ extent_recycle_split(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
|
||||
*/
|
||||
static edata_t *
|
||||
extent_recycle(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, ecache_t *ecache,
|
||||
void *new_addr, size_t size, size_t alignment, bool zero, bool *commit,
|
||||
bool growing_retained) {
|
||||
edata_t *expand_edata, size_t size, size_t alignment, bool zero,
|
||||
bool *commit, bool growing_retained) {
|
||||
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
|
||||
WITNESS_RANK_CORE, growing_retained ? 1 : 0);
|
||||
edata_t *edata = extent_recycle_extract(tsdn, pac, ehooks, ecache,
|
||||
new_addr, size, alignment, growing_retained);
|
||||
expand_edata, size, alignment, growing_retained);
|
||||
if (edata == NULL) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
edata = extent_recycle_split(tsdn, pac, ehooks, ecache, new_addr,
|
||||
edata = extent_recycle_split(tsdn, pac, ehooks, ecache, expand_edata,
|
||||
size, alignment, edata, growing_retained);
|
||||
if (edata == NULL) {
|
||||
return NULL;
|
||||
@ -742,21 +735,22 @@ label_err:
|
||||
|
||||
static edata_t *
|
||||
extent_alloc_retained(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
|
||||
void *new_addr, size_t size, size_t alignment, bool zero, bool *commit) {
|
||||
edata_t *expand_edata, size_t size, size_t alignment, bool zero,
|
||||
bool *commit) {
|
||||
assert(size != 0);
|
||||
assert(alignment != 0);
|
||||
|
||||
malloc_mutex_lock(tsdn, &pac->grow_mtx);
|
||||
|
||||
edata_t *edata = extent_recycle(tsdn, pac, ehooks,
|
||||
&pac->ecache_retained, new_addr, size, alignment, zero,
|
||||
commit, /* growing_retained */ true);
|
||||
&pac->ecache_retained, expand_edata, size, alignment, zero, commit,
|
||||
/* growing_retained */ true);
|
||||
if (edata != NULL) {
|
||||
malloc_mutex_unlock(tsdn, &pac->grow_mtx);
|
||||
if (config_prof) {
|
||||
extent_gdump_add(tsdn, edata);
|
||||
}
|
||||
} else if (opt_retain && new_addr == NULL) {
|
||||
} else if (opt_retain && expand_edata == NULL) {
|
||||
edata = extent_grow_retained(tsdn, pac, ehooks, size,
|
||||
alignment, zero, commit);
|
||||
/* extent_grow_retained() always releases pac->grow_mtx. */
|
||||
|
@ -133,9 +133,7 @@ static bool
|
||||
pac_expand_impl(tsdn_t *tsdn, pai_t *self, edata_t *edata, size_t old_size,
|
||||
size_t new_size, bool zero) {
|
||||
pac_t *pac = (pac_t *)self;
|
||||
|
||||
ehooks_t *ehooks = pac_ehooks_get(pac);
|
||||
void *trail_begin = edata_past_get(edata);
|
||||
|
||||
size_t mapped_add = 0;
|
||||
size_t expand_amount = new_size - old_size;
|
||||
@ -144,14 +142,14 @@ pac_expand_impl(tsdn_t *tsdn, pai_t *self, edata_t *edata, size_t old_size,
|
||||
return true;
|
||||
}
|
||||
edata_t *trail = ecache_alloc(tsdn, pac, ehooks, &pac->ecache_dirty,
|
||||
trail_begin, expand_amount, PAGE, zero);
|
||||
edata, expand_amount, PAGE, zero);
|
||||
if (trail == NULL) {
|
||||
trail = ecache_alloc(tsdn, pac, ehooks, &pac->ecache_muzzy,
|
||||
trail_begin, expand_amount, PAGE, zero);
|
||||
edata, expand_amount, PAGE, zero);
|
||||
}
|
||||
if (trail == NULL) {
|
||||
trail = ecache_alloc_grow(tsdn, pac, ehooks,
|
||||
&pac->ecache_retained, trail_begin, expand_amount, PAGE,
|
||||
&pac->ecache_retained, edata, expand_amount, PAGE,
|
||||
zero);
|
||||
mapped_add = expand_amount;
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user