Implement retain on Windows.
The VirtualAlloc and VirtualFree APIs are different because MEM_DECOMMIT cannot be used across multiple VirtualAlloc regions. To properly support decommit, only allow merge / split within the same region -- this is done by tracking the "is_head" state of extents and not merging cross-region. Add a new state is_head (only relevant for retain && !maps_coalesce), which is true for the first extent in each VirtualAlloc region. Determine if two extents can be merged based on the head state, and use serial numbers for sanity checks.
This commit is contained in:
parent
f32f23d6cc
commit
9a86c65abc
@ -343,10 +343,30 @@ extent_prof_alloc_time_set(extent_t *extent, nstime_t t) {
|
||||
nstime_copy(&extent->e_alloc_time, &t);
|
||||
}
|
||||
|
||||
static inline bool
|
||||
extent_is_head_get(extent_t *extent) {
|
||||
if (maps_coalesce) {
|
||||
not_reached();
|
||||
}
|
||||
|
||||
return (bool)((extent->e_bits & EXTENT_BITS_IS_HEAD_MASK) >>
|
||||
EXTENT_BITS_IS_HEAD_SHIFT);
|
||||
}
|
||||
|
||||
static inline void
|
||||
extent_is_head_set(extent_t *extent, bool is_head) {
|
||||
if (maps_coalesce) {
|
||||
not_reached();
|
||||
}
|
||||
|
||||
extent->e_bits = (extent->e_bits & ~EXTENT_BITS_IS_HEAD_MASK) |
|
||||
((uint64_t)is_head << EXTENT_BITS_IS_HEAD_SHIFT);
|
||||
}
|
||||
|
||||
static inline void
|
||||
extent_init(extent_t *extent, arena_t *arena, void *addr, size_t size,
|
||||
bool slab, szind_t szind, size_t sn, extent_state_t state, bool zeroed,
|
||||
bool committed, bool dumpable) {
|
||||
bool committed, bool dumpable, extent_head_state_t is_head) {
|
||||
assert(addr == PAGE_ADDR2BASE(addr) || !slab);
|
||||
|
||||
extent_arena_set(extent, arena);
|
||||
@ -360,6 +380,10 @@ extent_init(extent_t *extent, arena_t *arena, void *addr, size_t size,
|
||||
extent_committed_set(extent, committed);
|
||||
extent_dumpable_set(extent, dumpable);
|
||||
ql_elm_new(extent, ql_link);
|
||||
if (!maps_coalesce) {
|
||||
extent_is_head_set(extent, (is_head == EXTENT_IS_HEAD) ? true :
|
||||
false);
|
||||
}
|
||||
if (config_prof) {
|
||||
extent_prof_tctx_set(extent, NULL);
|
||||
}
|
||||
|
@ -128,7 +128,11 @@ struct extent_s {
|
||||
#define EXTENT_BITS_BINSHARD_SHIFT (EXTENT_BITS_NFREE_WIDTH + EXTENT_BITS_NFREE_SHIFT)
|
||||
#define EXTENT_BITS_BINSHARD_MASK MASK(EXTENT_BITS_BINSHARD_WIDTH, EXTENT_BITS_BINSHARD_SHIFT)
|
||||
|
||||
#define EXTENT_BITS_SN_SHIFT (EXTENT_BITS_BINSHARD_WIDTH + EXTENT_BITS_BINSHARD_SHIFT)
|
||||
#define EXTENT_BITS_IS_HEAD_WIDTH 1
|
||||
#define EXTENT_BITS_IS_HEAD_SHIFT (EXTENT_BITS_BINSHARD_WIDTH + EXTENT_BITS_BINSHARD_SHIFT)
|
||||
#define EXTENT_BITS_IS_HEAD_MASK MASK(EXTENT_BITS_IS_HEAD_WIDTH, EXTENT_BITS_IS_HEAD_SHIFT)
|
||||
|
||||
#define EXTENT_BITS_SN_SHIFT (EXTENT_BITS_IS_HEAD_WIDTH + EXTENT_BITS_IS_HEAD_SHIFT)
|
||||
#define EXTENT_BITS_SN_MASK (UINT64_MAX << EXTENT_BITS_SN_SHIFT)
|
||||
|
||||
/* Pointer to the extent that this structure is responsible for. */
|
||||
|
@ -15,4 +15,9 @@ typedef struct extent_util_stats_verbose_s extent_util_stats_verbose_t;
|
||||
*/
|
||||
#define LG_EXTENT_MAX_ACTIVE_FIT_DEFAULT 6
|
||||
|
||||
typedef enum {
|
||||
EXTENT_NOT_HEAD,
|
||||
EXTENT_IS_HEAD /* Only relevant for Windows && opt.retain. */
|
||||
} extent_head_state_t;
|
||||
|
||||
#endif /* JEMALLOC_INTERNAL_EXTENT_TYPES_H */
|
||||
|
76
src/extent.c
76
src/extent.c
@ -50,20 +50,16 @@ static bool extent_purge_forced_default(extent_hooks_t *extent_hooks,
|
||||
static bool extent_purge_forced_impl(tsdn_t *tsdn, arena_t *arena,
|
||||
extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
|
||||
size_t length, bool growing_retained);
|
||||
#ifdef JEMALLOC_MAPS_COALESCE
|
||||
static bool extent_split_default(extent_hooks_t *extent_hooks, void *addr,
|
||||
size_t size, size_t size_a, size_t size_b, bool committed,
|
||||
unsigned arena_ind);
|
||||
#endif
|
||||
static extent_t *extent_split_impl(tsdn_t *tsdn, arena_t *arena,
|
||||
extent_hooks_t **r_extent_hooks, extent_t *extent, size_t size_a,
|
||||
szind_t szind_a, bool slab_a, size_t size_b, szind_t szind_b, bool slab_b,
|
||||
bool growing_retained);
|
||||
#ifdef JEMALLOC_MAPS_COALESCE
|
||||
static bool extent_merge_default(extent_hooks_t *extent_hooks, void *addr_a,
|
||||
size_t size_a, void *addr_b, size_t size_b, bool committed,
|
||||
unsigned arena_ind);
|
||||
#endif
|
||||
static bool extent_merge_impl(tsdn_t *tsdn, arena_t *arena,
|
||||
extent_hooks_t **r_extent_hooks, extent_t *a, extent_t *b,
|
||||
bool growing_retained);
|
||||
@ -88,11 +84,9 @@ const extent_hooks_t extent_hooks_default = {
|
||||
,
|
||||
NULL
|
||||
#endif
|
||||
#ifdef JEMALLOC_MAPS_COALESCE
|
||||
,
|
||||
extent_split_default,
|
||||
extent_merge_default
|
||||
#endif
|
||||
};
|
||||
|
||||
/* Used exclusively for gdump triggering. */
|
||||
@ -1323,7 +1317,7 @@ extent_grow_retained(tsdn_t *tsdn, arena_t *arena,
|
||||
|
||||
extent_init(extent, arena, ptr, alloc_size, false, SC_NSIZES,
|
||||
arena_extent_sn_next(arena), extent_state_active, zeroed,
|
||||
committed, true);
|
||||
committed, true, EXTENT_IS_HEAD);
|
||||
if (ptr == NULL) {
|
||||
extent_dalloc(tsdn, arena, extent);
|
||||
goto label_err;
|
||||
@ -1495,7 +1489,7 @@ extent_alloc_wrapper_hard(tsdn_t *tsdn, arena_t *arena,
|
||||
}
|
||||
extent_init(extent, arena, addr, esize, slab, szind,
|
||||
arena_extent_sn_next(arena), extent_state_active, *zero, *commit,
|
||||
true);
|
||||
true, EXTENT_NOT_HEAD);
|
||||
if (pad != 0) {
|
||||
extent_addr_randomize(tsdn, extent, alignment);
|
||||
}
|
||||
@ -2045,13 +2039,20 @@ extent_purge_forced_wrapper(tsdn_t *tsdn, arena_t *arena,
|
||||
offset, length, false);
|
||||
}
|
||||
|
||||
#ifdef JEMALLOC_MAPS_COALESCE
|
||||
static bool
|
||||
extent_split_default(extent_hooks_t *extent_hooks, void *addr, size_t size,
|
||||
size_t size_a, size_t size_b, bool committed, unsigned arena_ind) {
|
||||
return !maps_coalesce;
|
||||
if (!maps_coalesce) {
|
||||
/*
|
||||
* Without retain, only whole regions can be purged (required by
|
||||
* MEM_RELEASE on Windows) -- therefore disallow splitting. See
|
||||
* comments in extent_head_no_merge().
|
||||
*/
|
||||
return !opt_retain;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Accepts the extent to split, and the characteristics of each side of the
|
||||
@ -2083,7 +2084,8 @@ extent_split_impl(tsdn_t *tsdn, arena_t *arena,
|
||||
extent_init(trail, arena, (void *)((uintptr_t)extent_base_get(extent) +
|
||||
size_a), size_b, slab_b, szind_b, extent_sn_get(extent),
|
||||
extent_state_get(extent), extent_zeroed_get(extent),
|
||||
extent_committed_get(extent), extent_dumpable_get(extent));
|
||||
extent_committed_get(extent), extent_dumpable_get(extent),
|
||||
EXTENT_NOT_HEAD);
|
||||
|
||||
rtree_ctx_t rtree_ctx_fallback;
|
||||
rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
|
||||
@ -2094,7 +2096,8 @@ extent_split_impl(tsdn_t *tsdn, arena_t *arena,
|
||||
extent_init(&lead, arena, extent_addr_get(extent), size_a,
|
||||
slab_a, szind_a, extent_sn_get(extent),
|
||||
extent_state_get(extent), extent_zeroed_get(extent),
|
||||
extent_committed_get(extent), extent_dumpable_get(extent));
|
||||
extent_committed_get(extent), extent_dumpable_get(extent),
|
||||
EXTENT_NOT_HEAD);
|
||||
|
||||
extent_rtree_leaf_elms_lookup(tsdn, rtree_ctx, &lead, false,
|
||||
true, &lead_elm_a, &lead_elm_b);
|
||||
@ -2152,7 +2155,7 @@ extent_split_wrapper(tsdn_t *tsdn, arena_t *arena,
|
||||
|
||||
static bool
|
||||
extent_merge_default_impl(void *addr_a, void *addr_b) {
|
||||
if (!maps_coalesce) {
|
||||
if (!maps_coalesce && !opt_retain) {
|
||||
return true;
|
||||
}
|
||||
if (have_dss && !extent_dss_mergeable(addr_a, addr_b)) {
|
||||
@ -2162,13 +2165,51 @@ extent_merge_default_impl(void *addr_a, void *addr_b) {
|
||||
return false;
|
||||
}
|
||||
|
||||
#ifdef JEMALLOC_MAPS_COALESCE
|
||||
/*
|
||||
* Returns true if the given extents can't be merged because of their head bit
|
||||
* settings. Assumes the second extent has the higher address.
|
||||
*/
|
||||
static bool
|
||||
extent_head_no_merge(extent_t *a, extent_t *b) {
|
||||
assert(extent_base_get(a) < extent_base_get(b));
|
||||
/*
|
||||
* When coalesce is not always allowed (Windows), only merge extents
|
||||
* from the same VirtualAlloc region under opt.retain (in which case
|
||||
* MEM_DECOMMIT is utilized for purging).
|
||||
*/
|
||||
if (maps_coalesce) {
|
||||
return false;
|
||||
}
|
||||
if (!opt_retain) {
|
||||
return true;
|
||||
}
|
||||
/* If b is a head extent, disallow the cross-region merge. */
|
||||
if (extent_is_head_get(b)) {
|
||||
/*
|
||||
* Additionally, sn should not overflow with retain; sanity
|
||||
* check that different regions have unique sn.
|
||||
*/
|
||||
assert(extent_sn_comp(a, b) != 0);
|
||||
return true;
|
||||
}
|
||||
assert(extent_sn_comp(a, b) == 0);
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static bool
|
||||
extent_merge_default(extent_hooks_t *extent_hooks, void *addr_a, size_t size_a,
|
||||
void *addr_b, size_t size_b, bool committed, unsigned arena_ind) {
|
||||
if (!maps_coalesce) {
|
||||
tsdn_t *tsdn = tsdn_fetch();
|
||||
extent_t *a = iealloc(tsdn, addr_a);
|
||||
extent_t *b = iealloc(tsdn, addr_b);
|
||||
if (extent_head_no_merge(a, b)) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return extent_merge_default_impl(addr_a, addr_b);
|
||||
}
|
||||
#endif
|
||||
|
||||
static bool
|
||||
extent_merge_impl(tsdn_t *tsdn, arena_t *arena,
|
||||
@ -2176,10 +2217,11 @@ extent_merge_impl(tsdn_t *tsdn, arena_t *arena,
|
||||
bool growing_retained) {
|
||||
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
|
||||
WITNESS_RANK_CORE, growing_retained ? 1 : 0);
|
||||
assert(extent_base_get(a) < extent_base_get(b));
|
||||
|
||||
extent_hooks_assure_initialized(arena, r_extent_hooks);
|
||||
|
||||
if ((*r_extent_hooks)->merge == NULL) {
|
||||
if ((*r_extent_hooks)->merge == NULL || extent_head_no_merge(a, b)) {
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -156,7 +156,8 @@ extent_alloc_dss(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size,
|
||||
extent_init(gap, arena, gap_addr_page,
|
||||
gap_size_page, false, SC_NSIZES,
|
||||
arena_extent_sn_next(arena),
|
||||
extent_state_active, false, true, true);
|
||||
extent_state_active, false, true, true,
|
||||
EXTENT_NOT_HEAD);
|
||||
}
|
||||
/*
|
||||
* Compute the address just past the end of the desired
|
||||
@ -200,7 +201,7 @@ extent_alloc_dss(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size,
|
||||
extent_init(&extent, arena, ret, size,
|
||||
size, false, SC_NSIZES,
|
||||
extent_state_active, false, true,
|
||||
true);
|
||||
true, EXTENT_NOT_HEAD);
|
||||
if (extent_purge_forced_wrapper(tsdn,
|
||||
arena, &extent_hooks, &extent, 0,
|
||||
size)) {
|
||||
|
@ -279,8 +279,11 @@ extent_dalloc_unmap(extent_hooks_t *extent_hooks, void *addr, size_t size,
|
||||
if (!try_dalloc) {
|
||||
return true;
|
||||
}
|
||||
pages_unmap(addr, size);
|
||||
did_dalloc = true;
|
||||
if (!maps_coalesce && opt_retain) {
|
||||
return true;
|
||||
}
|
||||
pages_unmap(addr, size);
|
||||
return false;
|
||||
}
|
||||
|
||||
@ -304,7 +307,9 @@ TEST_BEGIN(test_arena_destroy_hooks_unmap) {
|
||||
unsigned nptrs;
|
||||
|
||||
extent_hooks_prep();
|
||||
if (maps_coalesce) {
|
||||
try_decommit = false;
|
||||
}
|
||||
memcpy(&hooks_orig, &hooks, sizeof(extent_hooks_t));
|
||||
memcpy(&hooks, &hooks_unmap, sizeof(extent_hooks_t));
|
||||
|
||||
|
@ -87,9 +87,9 @@ TEST_BEGIN(test_rtree_extrema) {
|
||||
extent_t extent_a, extent_b;
|
||||
extent_init(&extent_a, NULL, NULL, SC_LARGE_MINCLASS, false,
|
||||
sz_size2index(SC_LARGE_MINCLASS), 0,
|
||||
extent_state_active, false, false, true);
|
||||
extent_state_active, false, false, true, EXTENT_NOT_HEAD);
|
||||
extent_init(&extent_b, NULL, NULL, 0, false, SC_NSIZES, 0,
|
||||
extent_state_active, false, false, true);
|
||||
extent_state_active, false, false, true, EXTENT_NOT_HEAD);
|
||||
|
||||
tsdn_t *tsdn = tsdn_fetch();
|
||||
|
||||
@ -126,7 +126,7 @@ TEST_BEGIN(test_rtree_bits) {
|
||||
|
||||
extent_t extent;
|
||||
extent_init(&extent, NULL, NULL, 0, false, SC_NSIZES, 0,
|
||||
extent_state_active, false, false, true);
|
||||
extent_state_active, false, false, true, EXTENT_NOT_HEAD);
|
||||
|
||||
rtree_t *rtree = &test_rtree;
|
||||
rtree_ctx_t rtree_ctx;
|
||||
@ -167,7 +167,7 @@ TEST_BEGIN(test_rtree_random) {
|
||||
|
||||
extent_t extent;
|
||||
extent_init(&extent, NULL, NULL, 0, false, SC_NSIZES, 0,
|
||||
extent_state_active, false, false, true);
|
||||
extent_state_active, false, false, true, EXTENT_NOT_HEAD);
|
||||
|
||||
assert_false(rtree_new(rtree, false), "Unexpected rtree_new() failure");
|
||||
|
||||
|
@ -9,7 +9,8 @@ TEST_BEGIN(test_arena_slab_regind) {
|
||||
const bin_info_t *bin_info = &bin_infos[binind];
|
||||
extent_init(&slab, NULL, mallocx(bin_info->slab_size,
|
||||
MALLOCX_LG_ALIGN(LG_PAGE)), bin_info->slab_size, true,
|
||||
binind, 0, extent_state_active, false, true, true);
|
||||
binind, 0, extent_state_active, false, true, true,
|
||||
EXTENT_NOT_HEAD);
|
||||
assert_ptr_not_null(extent_addr_get(&slab),
|
||||
"Unexpected malloc() failure");
|
||||
for (regind = 0; regind < bin_info->nregs; regind++) {
|
||||
|
Loading…
Reference in New Issue
Block a user