Emap: Internal rtree context setting.
The only time sharing an rtree context saves across extent operations isn't a no-op is when tsd is unavailable. But this happens only in situations like thread death or initialization, and we don't care about shaving off every possible cycle in such scenarios.
This commit is contained in:
parent
08eb1e6c31
commit
1d449bd9a6
@ -27,16 +27,16 @@ void emap_lock_edata2(tsdn_t *tsdn, emap_t *emap, edata_t *edata1,
|
|||||||
edata_t *edata2);
|
edata_t *edata2);
|
||||||
void emap_unlock_edata2(tsdn_t *tsdn, emap_t *emap, edata_t *edata1,
|
void emap_unlock_edata2(tsdn_t *tsdn, emap_t *emap, edata_t *edata1,
|
||||||
edata_t *edata2);
|
edata_t *edata2);
|
||||||
edata_t *emap_lock_edata_from_addr(tsdn_t *tsdn, emap_t *emap,
|
edata_t *emap_lock_edata_from_addr(tsdn_t *tsdn, emap_t *emap, void *addr,
|
||||||
rtree_ctx_t *rtree_ctx, void *addr, bool inactive_only);
|
bool inactive_only);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Associate the given edata with its beginning and end address, setting the
|
* Associate the given edata with its beginning and end address, setting the
|
||||||
* szind and slab info appropriately.
|
* szind and slab info appropriately.
|
||||||
* Returns true on error (i.e. resource exhaustion).
|
* Returns true on error (i.e. resource exhaustion).
|
||||||
*/
|
*/
|
||||||
bool emap_register_boundary(tsdn_t *tsdn, emap_t *emap, rtree_ctx_t *rtree_ctx,
|
bool emap_register_boundary(tsdn_t *tsdn, emap_t *emap, edata_t *edata,
|
||||||
edata_t *edata, szind_t szind, bool slab);
|
szind_t szind, bool slab);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Does the same thing, but with the interior of the range, for slab
|
* Does the same thing, but with the interior of the range, for slab
|
||||||
@ -57,13 +57,11 @@ bool emap_register_boundary(tsdn_t *tsdn, emap_t *emap, rtree_ctx_t *rtree_ctx,
|
|||||||
* touched, so no allocation is necessary to fill the interior once the boundary
|
* touched, so no allocation is necessary to fill the interior once the boundary
|
||||||
* has been touched.
|
* has been touched.
|
||||||
*/
|
*/
|
||||||
void emap_register_interior(tsdn_t *tsdn, emap_t *emap, rtree_ctx_t *rtree_ctx,
|
void emap_register_interior(tsdn_t *tsdn, emap_t *emap, edata_t *edata,
|
||||||
edata_t *edata, szind_t szind);
|
szind_t szind);
|
||||||
|
|
||||||
void emap_deregister_boundary(tsdn_t *tsdn, emap_t *emap,
|
void emap_deregister_boundary(tsdn_t *tsdn, emap_t *emap, edata_t *edata);
|
||||||
rtree_ctx_t *rtree_ctx, edata_t *edata);
|
void emap_deregister_interior(tsdn_t *tsdn, emap_t *emap, edata_t *edata);
|
||||||
void emap_deregister_interior(tsdn_t *tsdn, emap_t *emap,
|
|
||||||
rtree_ctx_t *rtree_ctx, edata_t *edata);
|
|
||||||
|
|
||||||
typedef struct emap_prepare_s emap_prepare_t;
|
typedef struct emap_prepare_s emap_prepare_t;
|
||||||
struct emap_prepare_s {
|
struct emap_prepare_s {
|
||||||
@ -74,9 +72,12 @@ struct emap_prepare_s {
|
|||||||
};
|
};
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* These functions do some of the metadata management for merging and splitting
|
* These functions do some of the metadata management for merging, splitting,
|
||||||
* extents. In particular, they set the mappings from addresses to edatas and
|
* and reusing extents. In particular, they set the boundary mappings from
|
||||||
* fill in lead and trail.
|
* addresses to edatas and fill in the szind, size, and slab values for the
|
||||||
|
* output edata (and, for splitting, *all* values for the trail). If the result
|
||||||
|
* is going to be used as a slab, you still need to call emap_register_interior
|
||||||
|
* on it, though.
|
||||||
*
|
*
|
||||||
* Each operation has a "prepare" and a "commit" portion. The prepare portion
|
* Each operation has a "prepare" and a "commit" portion. The prepare portion
|
||||||
* does the operations that can be done without exclusive access to the extent
|
* does the operations that can be done without exclusive access to the extent
|
||||||
@ -89,15 +90,26 @@ struct emap_prepare_s {
|
|||||||
* and esn values) data for the split variants, and can be reused for any
|
* and esn values) data for the split variants, and can be reused for any
|
||||||
* purpose by its given arena after a merge or a failed split.
|
* purpose by its given arena after a merge or a failed split.
|
||||||
*/
|
*/
|
||||||
bool emap_split_prepare(tsdn_t *tsdn, emap_t *emap, rtree_ctx_t *rtree_ctx,
|
void emap_remap(tsdn_t *tsdn, emap_t *emap, edata_t *edata, size_t size,
|
||||||
emap_prepare_t *prepare, edata_t *edata, size_t size_a, szind_t szind_a,
|
szind_t szind, bool slab);
|
||||||
bool slab_a, edata_t *trail, size_t size_b, szind_t szind_b, bool slab_b);
|
bool emap_split_prepare(tsdn_t *tsdn, emap_t *emap, emap_prepare_t *prepare,
|
||||||
|
edata_t *edata, size_t size_a, szind_t szind_a, bool slab_a, edata_t *trail,
|
||||||
|
size_t size_b, szind_t szind_b, bool slab_b);
|
||||||
void emap_split_commit(tsdn_t *tsdn, emap_t *emap, emap_prepare_t *prepare,
|
void emap_split_commit(tsdn_t *tsdn, emap_t *emap, emap_prepare_t *prepare,
|
||||||
edata_t *lead, size_t size_a, szind_t szind_a, bool slab_a, edata_t *trail,
|
edata_t *lead, size_t size_a, szind_t szind_a, bool slab_a, edata_t *trail,
|
||||||
size_t size_b, szind_t szind_b, bool slab_b);
|
size_t size_b, szind_t szind_b, bool slab_b);
|
||||||
void emap_merge_prepare(tsdn_t *tsdn, emap_t *emap, rtree_ctx_t *rtree_ctx,
|
void emap_merge_prepare(tsdn_t *tsdn, emap_t *emap, emap_prepare_t *prepare,
|
||||||
emap_prepare_t *prepare, edata_t *lead, edata_t *trail);
|
edata_t *lead, edata_t *trail);
|
||||||
void emap_merge_commit(tsdn_t *tsdn, emap_t *emap, emap_prepare_t *prepare,
|
void emap_merge_commit(tsdn_t *tsdn, emap_t *emap, emap_prepare_t *prepare,
|
||||||
edata_t *lead, edata_t *trail);
|
edata_t *lead, edata_t *trail);
|
||||||
|
|
||||||
|
/* Assert that the emap's view of the given edata matches the edata's view. */
|
||||||
|
void emap_do_assert_mapped(tsdn_t *tsdn, emap_t *emap, edata_t *edata);
|
||||||
|
static inline void
|
||||||
|
emap_assert_mapped(tsdn_t *tsdn, emap_t *emap, edata_t *edata) {
|
||||||
|
if (config_debug) {
|
||||||
|
emap_do_assert_mapped(tsdn, emap, edata);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
#endif /* JEMALLOC_INTERNAL_EMAP_H */
|
#endif /* JEMALLOC_INTERNAL_EMAP_H */
|
||||||
|
73
src/emap.c
73
src/emap.c
@ -5,6 +5,15 @@
|
|||||||
|
|
||||||
emap_t emap_global;
|
emap_t emap_global;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Note: Ends without at semicolon, so that
|
||||||
|
* EMAP_DECLARE_RTREE_CTX;
|
||||||
|
* in uses will avoid empty-statement warnings.
|
||||||
|
*/
|
||||||
|
#define EMAP_DECLARE_RTREE_CTX \
|
||||||
|
rtree_ctx_t rtree_ctx_fallback; \
|
||||||
|
rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback)
|
||||||
|
|
||||||
enum emap_lock_result_e {
|
enum emap_lock_result_e {
|
||||||
emap_lock_result_success,
|
emap_lock_result_success,
|
||||||
emap_lock_result_failure,
|
emap_lock_result_failure,
|
||||||
@ -89,8 +98,9 @@ emap_try_lock_rtree_leaf_elm(tsdn_t *tsdn, emap_t *emap, rtree_leaf_elm_t *elm,
|
|||||||
* address, and NULL otherwise.
|
* address, and NULL otherwise.
|
||||||
*/
|
*/
|
||||||
edata_t *
|
edata_t *
|
||||||
emap_lock_edata_from_addr(tsdn_t *tsdn, emap_t *emap, rtree_ctx_t *rtree_ctx,
|
emap_lock_edata_from_addr(tsdn_t *tsdn, emap_t *emap, void *addr,
|
||||||
void *addr, bool inactive_only) {
|
bool inactive_only) {
|
||||||
|
EMAP_DECLARE_RTREE_CTX;
|
||||||
edata_t *ret = NULL;
|
edata_t *ret = NULL;
|
||||||
rtree_leaf_elm_t *elm = rtree_leaf_elm_lookup(tsdn, &emap->rtree,
|
rtree_leaf_elm_t *elm = rtree_leaf_elm_lookup(tsdn, &emap->rtree,
|
||||||
rtree_ctx, (uintptr_t)addr, false, false);
|
rtree_ctx, (uintptr_t)addr, false, false);
|
||||||
@ -137,8 +147,10 @@ emap_rtree_write_acquired(tsdn_t *tsdn, emap_t *emap, rtree_leaf_elm_t *elm_a,
|
|||||||
}
|
}
|
||||||
|
|
||||||
bool
|
bool
|
||||||
emap_register_boundary(tsdn_t *tsdn, emap_t *emap, rtree_ctx_t *rtree_ctx,
|
emap_register_boundary(tsdn_t *tsdn, emap_t *emap, edata_t *edata,
|
||||||
edata_t *edata, szind_t szind, bool slab) {
|
szind_t szind, bool slab) {
|
||||||
|
EMAP_DECLARE_RTREE_CTX;
|
||||||
|
|
||||||
rtree_leaf_elm_t *elm_a, *elm_b;
|
rtree_leaf_elm_t *elm_a, *elm_b;
|
||||||
bool err = emap_rtree_leaf_elms_lookup(tsdn, emap, rtree_ctx, edata,
|
bool err = emap_rtree_leaf_elms_lookup(tsdn, emap, rtree_ctx, edata,
|
||||||
false, true, &elm_a, &elm_b);
|
false, true, &elm_a, &elm_b);
|
||||||
@ -150,8 +162,10 @@ emap_register_boundary(tsdn_t *tsdn, emap_t *emap, rtree_ctx_t *rtree_ctx,
|
|||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
emap_register_interior(tsdn_t *tsdn, emap_t *emap, rtree_ctx_t *rtree_ctx,
|
emap_register_interior(tsdn_t *tsdn, emap_t *emap, edata_t *edata,
|
||||||
edata_t *edata, szind_t szind) {
|
szind_t szind) {
|
||||||
|
EMAP_DECLARE_RTREE_CTX;
|
||||||
|
|
||||||
assert(edata_slab_get(edata));
|
assert(edata_slab_get(edata));
|
||||||
|
|
||||||
/* Register interior. */
|
/* Register interior. */
|
||||||
@ -163,8 +177,8 @@ emap_register_interior(tsdn_t *tsdn, emap_t *emap, rtree_ctx_t *rtree_ctx,
|
|||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
emap_deregister_boundary(tsdn_t *tsdn, emap_t *emap, rtree_ctx_t *rtree_ctx,
|
emap_deregister_boundary(tsdn_t *tsdn, emap_t *emap, edata_t *edata) {
|
||||||
edata_t *edata) {
|
EMAP_DECLARE_RTREE_CTX;
|
||||||
rtree_leaf_elm_t *elm_a, *elm_b;
|
rtree_leaf_elm_t *elm_a, *elm_b;
|
||||||
|
|
||||||
emap_rtree_leaf_elms_lookup(tsdn, emap, rtree_ctx, edata,
|
emap_rtree_leaf_elms_lookup(tsdn, emap, rtree_ctx, edata,
|
||||||
@ -174,8 +188,9 @@ emap_deregister_boundary(tsdn_t *tsdn, emap_t *emap, rtree_ctx_t *rtree_ctx,
|
|||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
emap_deregister_interior(tsdn_t *tsdn, emap_t *emap, rtree_ctx_t *rtree_ctx,
|
emap_deregister_interior(tsdn_t *tsdn, emap_t *emap, edata_t *edata) {
|
||||||
edata_t *edata) {
|
EMAP_DECLARE_RTREE_CTX;
|
||||||
|
|
||||||
assert(edata_slab_get(edata));
|
assert(edata_slab_get(edata));
|
||||||
for (size_t i = 1; i < (edata_size_get(edata) >> LG_PAGE) - 1; i++) {
|
for (size_t i = 1; i < (edata_size_get(edata) >> LG_PAGE) - 1; i++) {
|
||||||
rtree_clear(tsdn, &emap->rtree, rtree_ctx,
|
rtree_clear(tsdn, &emap->rtree, rtree_ctx,
|
||||||
@ -184,10 +199,29 @@ emap_deregister_interior(tsdn_t *tsdn, emap_t *emap, rtree_ctx_t *rtree_ctx,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void emap_remap(tsdn_t *tsdn, emap_t *emap, edata_t *edata, size_t size,
|
||||||
|
szind_t szind, bool slab) {
|
||||||
|
EMAP_DECLARE_RTREE_CTX;
|
||||||
|
|
||||||
|
edata_szind_set(edata, szind);
|
||||||
|
if (szind != SC_NSIZES) {
|
||||||
|
rtree_szind_slab_update(tsdn, &emap->rtree, rtree_ctx,
|
||||||
|
(uintptr_t)edata_addr_get(edata), szind, slab);
|
||||||
|
if (slab && edata_size_get(edata) > PAGE) {
|
||||||
|
rtree_szind_slab_update(tsdn,
|
||||||
|
&emap->rtree, rtree_ctx,
|
||||||
|
(uintptr_t)edata_past_get(edata) - (uintptr_t)PAGE,
|
||||||
|
szind, slab);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
bool
|
bool
|
||||||
emap_split_prepare(tsdn_t *tsdn, emap_t *emap, rtree_ctx_t *rtree_ctx,
|
emap_split_prepare(tsdn_t *tsdn, emap_t *emap, emap_prepare_t *prepare,
|
||||||
emap_prepare_t *prepare, edata_t *edata, size_t size_a, szind_t szind_a,
|
edata_t *edata, size_t size_a, szind_t szind_a, bool slab_a, edata_t *trail,
|
||||||
bool slab_a, edata_t *trail, size_t size_b, szind_t szind_b, bool slab_b) {
|
size_t size_b, szind_t szind_b, bool slab_b) {
|
||||||
|
EMAP_DECLARE_RTREE_CTX;
|
||||||
/*
|
/*
|
||||||
* Note that while the trail mostly inherits its attributes from the
|
* Note that while the trail mostly inherits its attributes from the
|
||||||
* extent to be split, it maintains its own arena ind -- this allows
|
* extent to be split, it maintains its own arena ind -- this allows
|
||||||
@ -234,8 +268,9 @@ emap_split_commit(tsdn_t *tsdn, emap_t *emap, emap_prepare_t *prepare,
|
|||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
emap_merge_prepare(tsdn_t *tsdn, emap_t *emap, rtree_ctx_t *rtree_ctx,
|
emap_merge_prepare(tsdn_t *tsdn, emap_t *emap, emap_prepare_t *prepare,
|
||||||
emap_prepare_t *prepare, edata_t *lead, edata_t *trail) {
|
edata_t *lead, edata_t *trail) {
|
||||||
|
EMAP_DECLARE_RTREE_CTX;
|
||||||
emap_rtree_leaf_elms_lookup(tsdn, emap, rtree_ctx, lead, true, false,
|
emap_rtree_leaf_elms_lookup(tsdn, emap, rtree_ctx, lead, true, false,
|
||||||
&prepare->lead_elm_a, &prepare->lead_elm_b);
|
&prepare->lead_elm_a, &prepare->lead_elm_b);
|
||||||
emap_rtree_leaf_elms_lookup(tsdn, emap, rtree_ctx, trail, true, false,
|
emap_rtree_leaf_elms_lookup(tsdn, emap, rtree_ctx, trail, true, false,
|
||||||
@ -269,3 +304,11 @@ emap_merge_commit(tsdn_t *tsdn, emap_t *emap, emap_prepare_t *prepare,
|
|||||||
emap_rtree_write_acquired(tsdn, emap, prepare->lead_elm_a, merged_b,
|
emap_rtree_write_acquired(tsdn, emap, prepare->lead_elm_a, merged_b,
|
||||||
lead, SC_NSIZES, false);
|
lead, SC_NSIZES, false);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
emap_do_assert_mapped(tsdn_t *tsdn, emap_t *emap, edata_t *edata) {
|
||||||
|
EMAP_DECLARE_RTREE_CTX;
|
||||||
|
|
||||||
|
assert(rtree_edata_read(tsdn, &emap->rtree, rtree_ctx,
|
||||||
|
(uintptr_t)edata_base_get(edata), true) == edata);
|
||||||
|
}
|
||||||
|
153
src/extent.c
153
src/extent.c
@ -43,8 +43,8 @@ static edata_t *extent_recycle(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
|
|||||||
ecache_t *ecache, void *new_addr, size_t usize, size_t pad, size_t alignment,
|
ecache_t *ecache, void *new_addr, size_t usize, size_t pad, size_t alignment,
|
||||||
bool slab, szind_t szind, bool *zero, bool *commit, bool growing_retained);
|
bool slab, szind_t szind, bool *zero, bool *commit, bool growing_retained);
|
||||||
static edata_t *extent_try_coalesce(tsdn_t *tsdn, edata_cache_t *edata_cache,
|
static edata_t *extent_try_coalesce(tsdn_t *tsdn, edata_cache_t *edata_cache,
|
||||||
ehooks_t *ehooks, rtree_ctx_t *rtree_ctx, ecache_t *ecache, edata_t *edata,
|
ehooks_t *ehooks, ecache_t *ecache, edata_t *edata, bool *coalesced,
|
||||||
bool *coalesced, bool growing_retained);
|
bool growing_retained);
|
||||||
static void extent_record(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
|
static void extent_record(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
|
||||||
ecache_t *ecache, edata_t *edata, bool growing_retained);
|
ecache_t *ecache, edata_t *edata, bool growing_retained);
|
||||||
static edata_t *extent_alloc_retained(tsdn_t *tsdn, arena_t *arena,
|
static edata_t *extent_alloc_retained(tsdn_t *tsdn, arena_t *arena,
|
||||||
@ -81,12 +81,11 @@ extent_addr_randomize(tsdn_t *tsdn, arena_t *arena, edata_t *edata,
|
|||||||
|
|
||||||
static bool
|
static bool
|
||||||
extent_try_delayed_coalesce(tsdn_t *tsdn, edata_cache_t *edata_cache,
|
extent_try_delayed_coalesce(tsdn_t *tsdn, edata_cache_t *edata_cache,
|
||||||
ehooks_t *ehooks, rtree_ctx_t *rtree_ctx, ecache_t *ecache,
|
ehooks_t *ehooks, ecache_t *ecache, edata_t *edata) {
|
||||||
edata_t *edata) {
|
|
||||||
edata_state_set(edata, extent_state_active);
|
edata_state_set(edata, extent_state_active);
|
||||||
bool coalesced;
|
bool coalesced;
|
||||||
edata = extent_try_coalesce(tsdn, edata_cache, ehooks, rtree_ctx,
|
edata = extent_try_coalesce(tsdn, edata_cache, ehooks, ecache, edata,
|
||||||
ecache, edata, &coalesced, false);
|
&coalesced, false);
|
||||||
edata_state_set(edata, ecache->state);
|
edata_state_set(edata, ecache->state);
|
||||||
|
|
||||||
if (!coalesced) {
|
if (!coalesced) {
|
||||||
@ -160,9 +159,6 @@ ecache_dalloc(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks, ecache_t *ecache,
|
|||||||
edata_t *
|
edata_t *
|
||||||
ecache_evict(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks, ecache_t *ecache,
|
ecache_evict(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks, ecache_t *ecache,
|
||||||
size_t npages_min) {
|
size_t npages_min) {
|
||||||
rtree_ctx_t rtree_ctx_fallback;
|
|
||||||
rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
|
|
||||||
|
|
||||||
malloc_mutex_lock(tsdn, &ecache->mtx);
|
malloc_mutex_lock(tsdn, &ecache->mtx);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -188,7 +184,7 @@ ecache_evict(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks, ecache_t *ecache,
|
|||||||
}
|
}
|
||||||
/* Try to coalesce. */
|
/* Try to coalesce. */
|
||||||
if (extent_try_delayed_coalesce(tsdn, &arena->edata_cache,
|
if (extent_try_delayed_coalesce(tsdn, &arena->edata_cache,
|
||||||
ehooks, rtree_ctx, ecache, edata)) {
|
ehooks, ecache, edata)) {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
/*
|
/*
|
||||||
@ -309,9 +305,6 @@ extent_gdump_sub(tsdn_t *tsdn, const edata_t *edata) {
|
|||||||
|
|
||||||
static bool
|
static bool
|
||||||
extent_register_impl(tsdn_t *tsdn, edata_t *edata, bool gdump_add) {
|
extent_register_impl(tsdn_t *tsdn, edata_t *edata, bool gdump_add) {
|
||||||
rtree_ctx_t rtree_ctx_fallback;
|
|
||||||
rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* We need to hold the lock to protect against a concurrent coalesce
|
* We need to hold the lock to protect against a concurrent coalesce
|
||||||
* operation that sees us in a partial state.
|
* operation that sees us in a partial state.
|
||||||
@ -321,15 +314,13 @@ extent_register_impl(tsdn_t *tsdn, edata_t *edata, bool gdump_add) {
|
|||||||
szind_t szind = edata_szind_get_maybe_invalid(edata);
|
szind_t szind = edata_szind_get_maybe_invalid(edata);
|
||||||
bool slab = edata_slab_get(edata);
|
bool slab = edata_slab_get(edata);
|
||||||
|
|
||||||
if (emap_register_boundary(tsdn, &emap_global, rtree_ctx, edata, szind,
|
if (emap_register_boundary(tsdn, &emap_global, edata, szind, slab)) {
|
||||||
slab)) {
|
|
||||||
emap_unlock_edata(tsdn, &emap_global, edata);
|
emap_unlock_edata(tsdn, &emap_global, edata);
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (slab) {
|
if (slab) {
|
||||||
emap_register_interior(tsdn, &emap_global, rtree_ctx, edata,
|
emap_register_interior(tsdn, &emap_global, edata, szind);
|
||||||
szind);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
emap_unlock_edata(tsdn, &emap_global, edata);
|
emap_unlock_edata(tsdn, &emap_global, edata);
|
||||||
@ -362,13 +353,10 @@ extent_reregister(tsdn_t *tsdn, edata_t *edata) {
|
|||||||
*/
|
*/
|
||||||
static void
|
static void
|
||||||
extent_deregister_impl(tsdn_t *tsdn, edata_t *edata, bool gdump) {
|
extent_deregister_impl(tsdn_t *tsdn, edata_t *edata, bool gdump) {
|
||||||
rtree_ctx_t rtree_ctx_fallback;
|
|
||||||
rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
|
|
||||||
|
|
||||||
emap_lock_edata(tsdn, &emap_global, edata);
|
emap_lock_edata(tsdn, &emap_global, edata);
|
||||||
emap_deregister_boundary(tsdn, &emap_global, rtree_ctx, edata);
|
emap_deregister_boundary(tsdn, &emap_global, edata);
|
||||||
if (edata_slab_get(edata)) {
|
if (edata_slab_get(edata)) {
|
||||||
emap_deregister_interior(tsdn, &emap_global, rtree_ctx, edata);
|
emap_deregister_interior(tsdn, &emap_global, edata);
|
||||||
edata_slab_set(edata, false);
|
edata_slab_set(edata, false);
|
||||||
}
|
}
|
||||||
emap_unlock_edata(tsdn, &emap_global, edata);
|
emap_unlock_edata(tsdn, &emap_global, edata);
|
||||||
@ -394,8 +382,8 @@ extent_deregister_no_gdump_sub(tsdn_t *tsdn, edata_t *edata) {
|
|||||||
*/
|
*/
|
||||||
static edata_t *
|
static edata_t *
|
||||||
extent_recycle_extract(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
|
extent_recycle_extract(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
|
||||||
rtree_ctx_t *rtree_ctx, ecache_t *ecache, void *new_addr, size_t size,
|
ecache_t *ecache, void *new_addr, size_t size, size_t pad, size_t alignment,
|
||||||
size_t pad, size_t alignment, bool slab, bool growing_retained) {
|
bool slab, bool growing_retained) {
|
||||||
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
|
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
|
||||||
WITNESS_RANK_CORE, growing_retained ? 1 : 0);
|
WITNESS_RANK_CORE, growing_retained ? 1 : 0);
|
||||||
assert(alignment > 0);
|
assert(alignment > 0);
|
||||||
@ -420,8 +408,8 @@ extent_recycle_extract(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
|
|||||||
malloc_mutex_lock(tsdn, &ecache->mtx);
|
malloc_mutex_lock(tsdn, &ecache->mtx);
|
||||||
edata_t *edata;
|
edata_t *edata;
|
||||||
if (new_addr != NULL) {
|
if (new_addr != NULL) {
|
||||||
edata = emap_lock_edata_from_addr(tsdn, &emap_global, rtree_ctx,
|
edata = emap_lock_edata_from_addr(tsdn, &emap_global, new_addr,
|
||||||
new_addr, false);
|
false);
|
||||||
if (edata != NULL) {
|
if (edata != NULL) {
|
||||||
/*
|
/*
|
||||||
* We might null-out edata to report an error, but we
|
* We might null-out edata to report an error, but we
|
||||||
@ -480,7 +468,6 @@ typedef enum {
|
|||||||
|
|
||||||
static extent_split_interior_result_t
|
static extent_split_interior_result_t
|
||||||
extent_split_interior(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
|
extent_split_interior(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
|
||||||
rtree_ctx_t *rtree_ctx,
|
|
||||||
/* The result of splitting, in case of success. */
|
/* The result of splitting, in case of success. */
|
||||||
edata_t **edata, edata_t **lead, edata_t **trail,
|
edata_t **edata, edata_t **lead, edata_t **trail,
|
||||||
/* The mess to clean up, in case of error. */
|
/* The mess to clean up, in case of error. */
|
||||||
@ -529,22 +516,7 @@ extent_split_interior(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (leadsize == 0 && trailsize == 0) {
|
if (leadsize == 0 && trailsize == 0) {
|
||||||
/*
|
emap_remap(tsdn, &emap_global, *edata, size, szind, slab);
|
||||||
* Splitting causes szind to be set as a side effect, but no
|
|
||||||
* splitting occurred.
|
|
||||||
*/
|
|
||||||
edata_szind_set(*edata, szind);
|
|
||||||
if (szind != SC_NSIZES) {
|
|
||||||
rtree_szind_slab_update(tsdn, &emap_global.rtree,
|
|
||||||
rtree_ctx, (uintptr_t)edata_addr_get(*edata), szind,
|
|
||||||
slab);
|
|
||||||
if (slab && edata_size_get(*edata) > PAGE) {
|
|
||||||
rtree_szind_slab_update(tsdn,
|
|
||||||
&emap_global.rtree, rtree_ctx,
|
|
||||||
(uintptr_t)edata_past_get(*edata) -
|
|
||||||
(uintptr_t)PAGE, szind, slab);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return extent_split_interior_ok;
|
return extent_split_interior_ok;
|
||||||
@ -558,18 +530,16 @@ extent_split_interior(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
|
|||||||
*/
|
*/
|
||||||
static edata_t *
|
static edata_t *
|
||||||
extent_recycle_split(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
|
extent_recycle_split(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
|
||||||
rtree_ctx_t *rtree_ctx, ecache_t *ecache, void *new_addr, size_t size,
|
ecache_t *ecache, void *new_addr, size_t size, size_t pad, size_t alignment,
|
||||||
size_t pad, size_t alignment, bool slab, szind_t szind, edata_t *edata,
|
bool slab, szind_t szind, edata_t *edata, bool growing_retained) {
|
||||||
bool growing_retained) {
|
|
||||||
edata_t *lead;
|
edata_t *lead;
|
||||||
edata_t *trail;
|
edata_t *trail;
|
||||||
edata_t *to_leak;
|
edata_t *to_leak;
|
||||||
edata_t *to_salvage;
|
edata_t *to_salvage;
|
||||||
|
|
||||||
extent_split_interior_result_t result = extent_split_interior(
|
extent_split_interior_result_t result = extent_split_interior(
|
||||||
tsdn, arena, ehooks, rtree_ctx, &edata, &lead, &trail, &to_leak,
|
tsdn, arena, ehooks, &edata, &lead, &trail, &to_leak, &to_salvage,
|
||||||
&to_salvage, new_addr, size, pad, alignment, slab, szind,
|
new_addr, size, pad, alignment, slab, szind, growing_retained);
|
||||||
growing_retained);
|
|
||||||
|
|
||||||
if (!maps_coalesce && result != extent_split_interior_ok
|
if (!maps_coalesce && result != extent_split_interior_ok
|
||||||
&& !opt_retain) {
|
&& !opt_retain) {
|
||||||
@ -605,7 +575,7 @@ extent_recycle_split(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
|
|||||||
extents_abandon_vm(tsdn, arena, ehooks, ecache, to_leak,
|
extents_abandon_vm(tsdn, arena, ehooks, ecache, to_leak,
|
||||||
growing_retained);
|
growing_retained);
|
||||||
assert(emap_lock_edata_from_addr(tsdn, &emap_global,
|
assert(emap_lock_edata_from_addr(tsdn, &emap_global,
|
||||||
rtree_ctx, leak, false) == NULL);
|
leak, false) == NULL);
|
||||||
}
|
}
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
@ -626,19 +596,14 @@ extent_recycle(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks, ecache_t *ecache,
|
|||||||
assert(pad == 0 || !slab);
|
assert(pad == 0 || !slab);
|
||||||
assert(!*zero || !slab);
|
assert(!*zero || !slab);
|
||||||
|
|
||||||
rtree_ctx_t rtree_ctx_fallback;
|
edata_t *edata = extent_recycle_extract(tsdn, arena, ehooks, ecache,
|
||||||
rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
|
new_addr, size, pad, alignment, slab, growing_retained);
|
||||||
|
|
||||||
edata_t *edata = extent_recycle_extract(tsdn, arena, ehooks,
|
|
||||||
rtree_ctx, ecache, new_addr, size, pad, alignment, slab,
|
|
||||||
growing_retained);
|
|
||||||
if (edata == NULL) {
|
if (edata == NULL) {
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
edata = extent_recycle_split(tsdn, arena, ehooks, rtree_ctx, ecache,
|
edata = extent_recycle_split(tsdn, arena, ehooks, ecache, new_addr,
|
||||||
new_addr, size, pad, alignment, slab, szind, edata,
|
size, pad, alignment, slab, szind, edata, growing_retained);
|
||||||
growing_retained);
|
|
||||||
if (edata == NULL) {
|
if (edata == NULL) {
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
@ -665,8 +630,7 @@ extent_recycle(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks, ecache_t *ecache,
|
|||||||
assert(edata_state_get(edata) == extent_state_active);
|
assert(edata_state_get(edata) == extent_state_active);
|
||||||
if (slab) {
|
if (slab) {
|
||||||
edata_slab_set(edata, slab);
|
edata_slab_set(edata, slab);
|
||||||
emap_register_interior(tsdn, &emap_global, rtree_ctx, edata,
|
emap_register_interior(tsdn, &emap_global, edata, szind);
|
||||||
szind);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (*zero) {
|
if (*zero) {
|
||||||
@ -724,14 +688,15 @@ extent_grow_retained(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
|
|||||||
void *ptr = ehooks_alloc(tsdn, ehooks, NULL, alloc_size, PAGE, &zeroed,
|
void *ptr = ehooks_alloc(tsdn, ehooks, NULL, alloc_size, PAGE, &zeroed,
|
||||||
&committed);
|
&committed);
|
||||||
|
|
||||||
edata_init(edata, arena_ind_get(arena), ptr, alloc_size, false,
|
|
||||||
SC_NSIZES, arena_extent_sn_next(arena), extent_state_active, zeroed,
|
|
||||||
committed, true, EXTENT_IS_HEAD);
|
|
||||||
if (ptr == NULL) {
|
if (ptr == NULL) {
|
||||||
edata_cache_put(tsdn, &arena->edata_cache, edata);
|
edata_cache_put(tsdn, &arena->edata_cache, edata);
|
||||||
goto label_err;
|
goto label_err;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
edata_init(edata, arena_ind_get(arena), ptr, alloc_size, false,
|
||||||
|
SC_NSIZES, arena_extent_sn_next(arena), extent_state_active, zeroed,
|
||||||
|
committed, true, EXTENT_IS_HEAD);
|
||||||
|
|
||||||
if (extent_register_no_gdump_add(tsdn, edata)) {
|
if (extent_register_no_gdump_add(tsdn, edata)) {
|
||||||
edata_cache_put(tsdn, &arena->edata_cache, edata);
|
edata_cache_put(tsdn, &arena->edata_cache, edata);
|
||||||
goto label_err;
|
goto label_err;
|
||||||
@ -744,15 +709,13 @@ extent_grow_retained(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
|
|||||||
*commit = true;
|
*commit = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
rtree_ctx_t rtree_ctx_fallback;
|
|
||||||
rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
|
|
||||||
|
|
||||||
edata_t *lead;
|
edata_t *lead;
|
||||||
edata_t *trail;
|
edata_t *trail;
|
||||||
edata_t *to_leak;
|
edata_t *to_leak;
|
||||||
edata_t *to_salvage;
|
edata_t *to_salvage;
|
||||||
|
|
||||||
extent_split_interior_result_t result = extent_split_interior(tsdn,
|
extent_split_interior_result_t result = extent_split_interior(tsdn,
|
||||||
arena, ehooks, rtree_ctx, &edata, &lead, &trail, &to_leak,
|
arena, ehooks, &edata, &lead, &trail, &to_leak,
|
||||||
&to_salvage, NULL, size, pad, alignment, slab, szind, true);
|
&to_salvage, NULL, size, pad, alignment, slab, szind, true);
|
||||||
|
|
||||||
if (result == extent_split_interior_ok) {
|
if (result == extent_split_interior_ok) {
|
||||||
@ -824,13 +787,8 @@ extent_grow_retained(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
|
|||||||
extent_addr_randomize(tsdn, arena, edata, alignment);
|
extent_addr_randomize(tsdn, arena, edata, alignment);
|
||||||
}
|
}
|
||||||
if (slab) {
|
if (slab) {
|
||||||
rtree_ctx_t rtree_ctx_fallback;
|
|
||||||
rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn,
|
|
||||||
&rtree_ctx_fallback);
|
|
||||||
|
|
||||||
edata_slab_set(edata, true);
|
edata_slab_set(edata, true);
|
||||||
emap_register_interior(tsdn, &emap_global, rtree_ctx, edata,
|
emap_register_interior(tsdn, &emap_global, edata, szind);
|
||||||
szind);
|
|
||||||
}
|
}
|
||||||
if (*zero && !edata_zeroed_get(edata)) {
|
if (*zero && !edata_zeroed_get(edata)) {
|
||||||
void *addr = edata_base_get(edata);
|
void *addr = edata_base_get(edata);
|
||||||
@ -949,8 +907,8 @@ extent_coalesce(tsdn_t *tsdn, edata_cache_t *edata_cache, ehooks_t *ehooks,
|
|||||||
|
|
||||||
static edata_t *
|
static edata_t *
|
||||||
extent_try_coalesce_impl(tsdn_t *tsdn, edata_cache_t *edata_cache,
|
extent_try_coalesce_impl(tsdn_t *tsdn, edata_cache_t *edata_cache,
|
||||||
ehooks_t *ehooks, rtree_ctx_t *rtree_ctx, ecache_t *ecache, edata_t *edata,
|
ehooks_t *ehooks, ecache_t *ecache, edata_t *edata, bool *coalesced,
|
||||||
bool *coalesced, bool growing_retained, bool inactive_only) {
|
bool growing_retained, bool inactive_only) {
|
||||||
/*
|
/*
|
||||||
* We avoid checking / locking inactive neighbors for large size
|
* We avoid checking / locking inactive neighbors for large size
|
||||||
* classes, since they are eagerly coalesced on deallocation which can
|
* classes, since they are eagerly coalesced on deallocation which can
|
||||||
@ -966,7 +924,7 @@ extent_try_coalesce_impl(tsdn_t *tsdn, edata_cache_t *edata_cache,
|
|||||||
|
|
||||||
/* Try to coalesce forward. */
|
/* Try to coalesce forward. */
|
||||||
edata_t *next = emap_lock_edata_from_addr(tsdn, &emap_global,
|
edata_t *next = emap_lock_edata_from_addr(tsdn, &emap_global,
|
||||||
rtree_ctx, edata_past_get(edata), inactive_only);
|
edata_past_get(edata), inactive_only);
|
||||||
if (next != NULL) {
|
if (next != NULL) {
|
||||||
/*
|
/*
|
||||||
* ecache->mtx only protects against races for
|
* ecache->mtx only protects against races for
|
||||||
@ -992,7 +950,7 @@ extent_try_coalesce_impl(tsdn_t *tsdn, edata_cache_t *edata_cache,
|
|||||||
|
|
||||||
/* Try to coalesce backward. */
|
/* Try to coalesce backward. */
|
||||||
edata_t *prev = emap_lock_edata_from_addr(tsdn, &emap_global,
|
edata_t *prev = emap_lock_edata_from_addr(tsdn, &emap_global,
|
||||||
rtree_ctx, edata_before_get(edata), inactive_only);
|
edata_before_get(edata), inactive_only);
|
||||||
if (prev != NULL) {
|
if (prev != NULL) {
|
||||||
bool can_coalesce = extent_can_coalesce(ecache, edata,
|
bool can_coalesce = extent_can_coalesce(ecache, edata,
|
||||||
prev);
|
prev);
|
||||||
@ -1020,18 +978,17 @@ extent_try_coalesce_impl(tsdn_t *tsdn, edata_cache_t *edata_cache,
|
|||||||
|
|
||||||
static edata_t *
|
static edata_t *
|
||||||
extent_try_coalesce(tsdn_t *tsdn, edata_cache_t *edata_cache, ehooks_t *ehooks,
|
extent_try_coalesce(tsdn_t *tsdn, edata_cache_t *edata_cache, ehooks_t *ehooks,
|
||||||
rtree_ctx_t *rtree_ctx, ecache_t *ecache, edata_t *edata, bool *coalesced,
|
ecache_t *ecache, edata_t *edata, bool *coalesced, bool growing_retained) {
|
||||||
bool growing_retained) {
|
return extent_try_coalesce_impl(tsdn, edata_cache, ehooks, ecache,
|
||||||
return extent_try_coalesce_impl(tsdn, edata_cache, ehooks, rtree_ctx,
|
edata, coalesced, growing_retained, false);
|
||||||
ecache, edata, coalesced, growing_retained, false);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static edata_t *
|
static edata_t *
|
||||||
extent_try_coalesce_large(tsdn_t *tsdn, edata_cache_t *edata_cache,
|
extent_try_coalesce_large(tsdn_t *tsdn, edata_cache_t *edata_cache,
|
||||||
ehooks_t *ehooks, rtree_ctx_t *rtree_ctx, ecache_t *ecache, edata_t *edata,
|
ehooks_t *ehooks, ecache_t *ecache, edata_t *edata, bool *coalesced,
|
||||||
bool *coalesced, bool growing_retained) {
|
bool growing_retained) {
|
||||||
return extent_try_coalesce_impl(tsdn, edata_cache, ehooks, rtree_ctx,
|
return extent_try_coalesce_impl(tsdn, edata_cache, ehooks, ecache,
|
||||||
ecache, edata, coalesced, growing_retained, true);
|
edata, coalesced, growing_retained, true);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -1041,9 +998,6 @@ extent_try_coalesce_large(tsdn_t *tsdn, edata_cache_t *edata_cache,
|
|||||||
static void
|
static void
|
||||||
extent_record(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks, ecache_t *ecache,
|
extent_record(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks, ecache_t *ecache,
|
||||||
edata_t *edata, bool growing_retained) {
|
edata_t *edata, bool growing_retained) {
|
||||||
rtree_ctx_t rtree_ctx_fallback;
|
|
||||||
rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
|
|
||||||
|
|
||||||
assert((ecache->state != extent_state_dirty &&
|
assert((ecache->state != extent_state_dirty &&
|
||||||
ecache->state != extent_state_muzzy) ||
|
ecache->state != extent_state_muzzy) ||
|
||||||
!edata_zeroed_get(edata));
|
!edata_zeroed_get(edata));
|
||||||
@ -1052,16 +1006,15 @@ extent_record(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks, ecache_t *ecache,
|
|||||||
|
|
||||||
edata_szind_set(edata, SC_NSIZES);
|
edata_szind_set(edata, SC_NSIZES);
|
||||||
if (edata_slab_get(edata)) {
|
if (edata_slab_get(edata)) {
|
||||||
emap_deregister_interior(tsdn, &emap_global, rtree_ctx, edata);
|
emap_deregister_interior(tsdn, &emap_global, edata);
|
||||||
edata_slab_set(edata, false);
|
edata_slab_set(edata, false);
|
||||||
}
|
}
|
||||||
|
|
||||||
assert(rtree_edata_read(tsdn, &emap_global.rtree, rtree_ctx,
|
emap_assert_mapped(tsdn, &emap_global, edata);
|
||||||
(uintptr_t)edata_base_get(edata), true) == edata);
|
|
||||||
|
|
||||||
if (!ecache->delay_coalesce) {
|
if (!ecache->delay_coalesce) {
|
||||||
edata = extent_try_coalesce(tsdn, &arena->edata_cache, ehooks,
|
edata = extent_try_coalesce(tsdn, &arena->edata_cache, ehooks,
|
||||||
rtree_ctx, ecache, edata, NULL, growing_retained);
|
ecache, edata, NULL, growing_retained);
|
||||||
} else if (edata_size_get(edata) >= SC_LARGE_MINCLASS) {
|
} else if (edata_size_get(edata) >= SC_LARGE_MINCLASS) {
|
||||||
assert(ecache == &arena->ecache_dirty);
|
assert(ecache == &arena->ecache_dirty);
|
||||||
/* Always coalesce large extents eagerly. */
|
/* Always coalesce large extents eagerly. */
|
||||||
@ -1069,8 +1022,8 @@ extent_record(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks, ecache_t *ecache,
|
|||||||
do {
|
do {
|
||||||
assert(edata_state_get(edata) == extent_state_active);
|
assert(edata_state_get(edata) == extent_state_active);
|
||||||
edata = extent_try_coalesce_large(tsdn,
|
edata = extent_try_coalesce_large(tsdn,
|
||||||
&arena->edata_cache, ehooks, rtree_ctx, ecache,
|
&arena->edata_cache, ehooks, ecache, edata,
|
||||||
edata, &coalesced, growing_retained);
|
&coalesced, growing_retained);
|
||||||
} while (coalesced);
|
} while (coalesced);
|
||||||
if (edata_size_get(edata) >= oversize_threshold &&
|
if (edata_size_get(edata) >= oversize_threshold &&
|
||||||
arena_may_force_decay(arena)) {
|
arena_may_force_decay(arena)) {
|
||||||
@ -1276,11 +1229,9 @@ extent_split_impl(tsdn_t *tsdn, edata_cache_t *edata_cache, ehooks_t *ehooks,
|
|||||||
goto label_error_a;
|
goto label_error_a;
|
||||||
}
|
}
|
||||||
|
|
||||||
rtree_ctx_t rtree_ctx_fallback;
|
|
||||||
rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
|
|
||||||
emap_prepare_t prepare;
|
emap_prepare_t prepare;
|
||||||
bool err = emap_split_prepare(tsdn, &emap_global, rtree_ctx, &prepare,
|
bool err = emap_split_prepare(tsdn, &emap_global, &prepare, edata,
|
||||||
edata, size_a, szind_a, slab_a, trail, size_b, szind_b, slab_b);
|
size_a, szind_a, slab_a, trail, size_b, szind_b, slab_b);
|
||||||
if (err) {
|
if (err) {
|
||||||
goto label_error_b;
|
goto label_error_b;
|
||||||
}
|
}
|
||||||
@ -1339,10 +1290,8 @@ extent_merge_impl(tsdn_t *tsdn, ehooks_t *ehooks, edata_cache_t *edata_cache,
|
|||||||
* owned, so the following code uses decomposed helper functions rather
|
* owned, so the following code uses decomposed helper functions rather
|
||||||
* than extent_{,de}register() to do things in the right order.
|
* than extent_{,de}register() to do things in the right order.
|
||||||
*/
|
*/
|
||||||
rtree_ctx_t rtree_ctx_fallback;
|
|
||||||
rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
|
|
||||||
emap_prepare_t prepare;
|
emap_prepare_t prepare;
|
||||||
emap_merge_prepare(tsdn, &emap_global, rtree_ctx, &prepare, a, b);
|
emap_merge_prepare(tsdn, &emap_global, &prepare, a, b);
|
||||||
|
|
||||||
emap_lock_edata2(tsdn, &emap_global, a, b);
|
emap_lock_edata2(tsdn, &emap_global, a, b);
|
||||||
emap_merge_commit(tsdn, &emap_global, &prepare, a, b);
|
emap_merge_commit(tsdn, &emap_global, &prepare, a, b);
|
||||||
|
Loading…
Reference in New Issue
Block a user