Refactor rtree to always use base_alloc() for node allocation.

This commit is contained in:
Jason Evans
2016-04-16 00:36:11 -07:00
parent db72272bef
commit 8c9be3e837
15 changed files with 315 additions and 217 deletions

View File

@@ -264,12 +264,13 @@ arena_run_reg_alloc(arena_run_t *run, const arena_bin_info_t *bin_info)
}
JEMALLOC_INLINE_C void
arena_run_reg_dalloc(arena_run_t *run, extent_t *extent, void *ptr)
arena_run_reg_dalloc(tsdn_t *tsdn, arena_run_t *run, extent_t *extent,
void *ptr)
{
arena_chunk_t *chunk = (arena_chunk_t *)extent_addr_get(extent);
size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
size_t mapbits = arena_mapbits_get(chunk, pageind);
szind_t binind = arena_ptr_small_binind_get(ptr, mapbits);
szind_t binind = arena_ptr_small_binind_get(tsdn, ptr, mapbits);
const arena_bin_info_t *bin_info = &arena_bin_info[binind];
size_t regind = arena_run_regind(run, bin_info, ptr);
@@ -665,7 +666,7 @@ arena_chunk_discard(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk)
bool committed;
chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
chunk_deregister(chunk, &chunk->extent);
chunk_deregister(tsdn, chunk, &chunk->extent);
committed = (arena_mapbits_decommitted_get(chunk, map_bias) == 0);
if (!committed) {
@@ -1037,11 +1038,13 @@ arena_run_first_best_fit(arena_t *arena, size_t size)
}
static arena_run_t *
arena_run_alloc_large_helper(arena_t *arena, size_t size, bool zero)
arena_run_alloc_large_helper(tsdn_t *tsdn, arena_t *arena, size_t size,
bool zero)
{
arena_run_t *run = arena_run_first_best_fit(arena, s2u(size));
if (run != NULL) {
if (arena_run_split_large(arena, iealloc(run), run, size, zero))
if (arena_run_split_large(arena, iealloc(tsdn, run), run, size,
zero))
run = NULL;
}
return (run);
@@ -1057,7 +1060,7 @@ arena_run_alloc_large(tsdn_t *tsdn, arena_t *arena, size_t size, bool zero)
assert(size == PAGE_CEILING(size));
/* Search the arena's chunks for the lowest best fit. */
run = arena_run_alloc_large_helper(arena, size, zero);
run = arena_run_alloc_large_helper(tsdn, arena, size, zero);
if (run != NULL)
return (run);
@@ -1067,7 +1070,8 @@ arena_run_alloc_large(tsdn_t *tsdn, arena_t *arena, size_t size, bool zero)
chunk = arena_chunk_alloc(tsdn, arena);
if (chunk != NULL) {
run = &arena_miscelm_get_mutable(chunk, map_bias)->run;
if (arena_run_split_large(arena, iealloc(run), run, size, zero))
if (arena_run_split_large(arena, iealloc(tsdn, run), run, size,
zero))
run = NULL;
return (run);
}
@@ -1077,15 +1081,16 @@ arena_run_alloc_large(tsdn_t *tsdn, arena_t *arena, size_t size, bool zero)
* sufficient memory available while this one dropped arena->lock in
* arena_chunk_alloc(), so search one more time.
*/
return (arena_run_alloc_large_helper(arena, size, zero));
return (arena_run_alloc_large_helper(tsdn, arena, size, zero));
}
static arena_run_t *
arena_run_alloc_small_helper(arena_t *arena, size_t size, szind_t binind)
arena_run_alloc_small_helper(tsdn_t *tsdn, arena_t *arena, size_t size,
szind_t binind)
{
arena_run_t *run = arena_run_first_best_fit(arena, size);
if (run != NULL) {
if (arena_run_split_small(arena, iealloc(run), run, size,
if (arena_run_split_small(arena, iealloc(tsdn, run), run, size,
binind))
run = NULL;
}
@@ -1103,7 +1108,7 @@ arena_run_alloc_small(tsdn_t *tsdn, arena_t *arena, size_t size, szind_t binind)
assert(binind != BININD_INVALID);
/* Search the arena's chunks for the lowest best fit. */
run = arena_run_alloc_small_helper(arena, size, binind);
run = arena_run_alloc_small_helper(tsdn, arena, size, binind);
if (run != NULL)
return (run);
@@ -1113,7 +1118,7 @@ arena_run_alloc_small(tsdn_t *tsdn, arena_t *arena, size_t size, szind_t binind)
chunk = arena_chunk_alloc(tsdn, arena);
if (chunk != NULL) {
run = &arena_miscelm_get_mutable(chunk, map_bias)->run;
if (arena_run_split_small(arena, iealloc(run), run, size,
if (arena_run_split_small(arena, iealloc(tsdn, run), run, size,
binind))
run = NULL;
return (run);
@@ -1124,7 +1129,7 @@ arena_run_alloc_small(tsdn_t *tsdn, arena_t *arena, size_t size, szind_t binind)
* sufficient memory available while this one dropped arena->lock in
* arena_chunk_alloc(), so search one more time.
*/
return (arena_run_alloc_small_helper(arena, size, binind));
return (arena_run_alloc_small_helper(tsdn, arena, size, binind));
}
static bool
@@ -1426,7 +1431,7 @@ arena_maybe_purge(tsdn_t *tsdn, arena_t *arena)
}
static size_t
arena_dirty_count(arena_t *arena)
arena_dirty_count(tsdn_t *tsdn, arena_t *arena)
{
size_t ndirty = 0;
arena_runs_dirty_link_t *rdelm;
@@ -1441,7 +1446,7 @@ arena_dirty_count(arena_t *arena)
npages = extent_size_get(chunkselm) >> LG_PAGE;
chunkselm = qr_next(chunkselm, cc_link);
} else {
extent_t *extent = iealloc(rdelm);
extent_t *extent = iealloc(tsdn, rdelm);
arena_chunk_t *chunk =
(arena_chunk_t *)extent_addr_get(extent);
arena_chunk_map_misc_t *miscelm =
@@ -1504,7 +1509,7 @@ arena_stash_dirty(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
LG_PAGE));
chunkselm = chunkselm_next;
} else {
extent_t *extent = iealloc(rdelm);
extent_t *extent = iealloc(tsdn, rdelm);
arena_chunk_t *chunk =
(arena_chunk_t *)extent_addr_get(extent);
arena_chunk_map_misc_t *miscelm =
@@ -1586,7 +1591,7 @@ arena_purge_stashed(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
} else {
size_t pageind, run_size, flag_unzeroed, flags, i;
bool decommitted;
extent_t *extent = iealloc(rdelm);
extent_t *extent = iealloc(tsdn, rdelm);
arena_chunk_t *chunk =
(arena_chunk_t *)extent_addr_get(extent);
arena_chunk_map_misc_t *miscelm =
@@ -1671,7 +1676,7 @@ arena_unstash_purged(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
chunk_dalloc_wrapper(tsdn, arena, chunk_hooks, addr,
size, zeroed, committed);
} else {
extent_t *extent = iealloc(rdelm);
extent_t *extent = iealloc(tsdn, rdelm);
arena_chunk_t *chunk =
(arena_chunk_t *)extent_addr_get(extent);
arena_chunk_map_misc_t *miscelm =
@@ -1711,7 +1716,7 @@ arena_purge_to_limit(tsdn_t *tsdn, arena_t *arena, size_t ndirty_limit)
* because overhead grows nonlinearly as memory usage increases.
*/
if (false && config_debug) {
size_t ndirty = arena_dirty_count(arena);
size_t ndirty = arena_dirty_count(tsdn, arena);
assert(ndirty == arena->ndirty);
}
assert(opt_purge != purge_mode_ratio || (arena->nactive >>
@@ -2276,7 +2281,7 @@ arena_bin_malloc_hard(tsdn_t *tsdn, arena_t *arena, arena_bin_t *bin)
* arena_bin_lower_run() must be called, as if a region
* were just deallocated from the run.
*/
extent = iealloc(run);
extent = iealloc(tsdn, run);
chunk = (arena_chunk_t *)extent_addr_get(extent);
if (run->nfree == bin_info->nregs) {
arena_dalloc_bin_run(tsdn, arena, chunk, extent,
@@ -2537,7 +2542,7 @@ arena_palloc_large(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment,
malloc_mutex_unlock(tsdn, &arena->lock);
return (NULL);
}
extent = iealloc(run);
extent = iealloc(tsdn, run);
chunk = (arena_chunk_t *)extent_addr_get(extent);
miscelm = arena_run_to_miscelm(run);
rpages = arena_miscelm_to_rpages(miscelm);
@@ -2555,7 +2560,7 @@ arena_palloc_large(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment,
arena_miscelm_to_pageind(head_miscelm) + (leadsize >>
LG_PAGE));
run = &miscelm->run;
extent = iealloc(run);
extent = iealloc(tsdn, run);
arena_run_trim_head(tsdn, arena, chunk, head_extent, head_run,
alloc_size, alloc_size - leadsize);
@@ -2745,7 +2750,7 @@ arena_dalloc_bin_locked_impl(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
if (!junked && config_fill && unlikely(opt_junk_free))
arena_dalloc_junk_small(ptr, bin_info);
arena_run_reg_dalloc(run, extent, ptr);
arena_run_reg_dalloc(tsdn, run, extent, ptr);
if (run->nfree == bin_info->nregs) {
arena_dissociate_bin_run(extent, run, bin);
arena_dalloc_bin_run(tsdn, arena, chunk, extent, run, bin);
@@ -2793,8 +2798,8 @@ arena_dalloc_small(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
if (config_debug) {
/* arena_ptr_small_binind_get() does extra sanity checking. */
assert(arena_ptr_small_binind_get(ptr, arena_mapbits_get(chunk,
pageind)) != BININD_INVALID);
assert(arena_ptr_small_binind_get(tsdn, ptr,
arena_mapbits_get(chunk, pageind)) != BININD_INVALID);
}
bitselm = arena_bitselm_get_mutable(chunk, pageind);
arena_dalloc_bin(tsdn, arena, chunk, extent, ptr, pageind, bitselm);
@@ -2939,8 +2944,8 @@ arena_ralloc_large_grow(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
goto label_fail;
run = &arena_miscelm_get_mutable(chunk, pageind+npages)->run;
if (arena_run_split_large(arena, iealloc(run), run, splitsize,
zero))
if (arena_run_split_large(arena, iealloc(tsdn, run), run,
splitsize, zero))
goto label_fail;
if (config_cache_oblivious && zero) {

View File

@@ -146,8 +146,9 @@ chunk_register(tsdn_t *tsdn, const void *chunk, const extent_t *extent)
assert(extent_addr_get(extent) == chunk);
if (rtree_write(&chunks_rtree, (uintptr_t)chunk, extent))
if (rtree_write(tsdn, &chunks_rtree, (uintptr_t)chunk, extent))
return (true);
if (config_prof && opt_prof) {
size_t size = extent_size_get(extent);
size_t nadd = (size == 0) ? 1 : size / chunksize;
@@ -168,10 +169,10 @@ chunk_register(tsdn_t *tsdn, const void *chunk, const extent_t *extent)
}
void
chunk_deregister(const void *chunk, const extent_t *extent)
chunk_deregister(tsdn_t *tsdn, const void *chunk, const extent_t *extent)
{
rtree_clear(&chunks_rtree, (uintptr_t)chunk);
rtree_clear(tsdn, &chunks_rtree, (uintptr_t)chunk);
if (config_prof && opt_prof) {
size_t size = extent_size_get(extent);
size_t nsub = (size == 0) ? 1 : size / chunksize;
@@ -691,14 +692,6 @@ chunk_merge_default(void *chunk_a, size_t size_a, void *chunk_b, size_t size_b,
return (false);
}
static rtree_elm_t *
chunks_rtree_node_alloc(size_t nelms)
{
return ((rtree_elm_t *)base_alloc(tsdn_fetch(), nelms *
sizeof(rtree_elm_t)));
}
bool
chunk_boot(void)
{
@@ -735,7 +728,7 @@ chunk_boot(void)
if (have_dss && chunk_dss_boot())
return (true);
if (rtree_new(&chunks_rtree, (unsigned)((ZU(1) << (LG_SIZEOF_PTR+3)) -
opt_lg_chunk), chunks_rtree_node_alloc, NULL))
opt_lg_chunk)))
return (true);
return (false);

View File

@@ -283,12 +283,14 @@ ckh_grow(tsdn_t *tsdn, ckh_t *ckh)
ckh->lg_curbuckets = lg_curcells - LG_CKH_BUCKET_CELLS;
if (!ckh_rebuild(ckh, tab)) {
idalloctm(tsdn, iealloc(tab), tab, NULL, true, true);
idalloctm(tsdn, iealloc(tsdn, tab), tab, NULL, true,
true);
break;
}
/* Rebuilding failed, so back out partially rebuilt table. */
idalloctm(tsdn, iealloc(ckh->tab), ckh->tab, NULL, true, true);
idalloctm(tsdn, iealloc(tsdn, ckh->tab), ckh->tab, NULL, true,
true);
ckh->tab = tab;
ckh->lg_curbuckets = lg_prevbuckets;
}
@@ -330,7 +332,7 @@ ckh_shrink(tsdn_t *tsdn, ckh_t *ckh)
ckh->lg_curbuckets = lg_curcells - LG_CKH_BUCKET_CELLS;
if (!ckh_rebuild(ckh, tab)) {
idalloctm(tsdn, iealloc(tab), tab, NULL, true, true);
idalloctm(tsdn, iealloc(tsdn, tab), tab, NULL, true, true);
#ifdef CKH_COUNT
ckh->nshrinks++;
#endif
@@ -338,7 +340,7 @@ ckh_shrink(tsdn_t *tsdn, ckh_t *ckh)
}
/* Rebuilding failed, so back out partially rebuilt table. */
idalloctm(tsdn, iealloc(ckh->tab), ckh->tab, NULL, true, true);
idalloctm(tsdn, iealloc(tsdn, ckh->tab), ckh->tab, NULL, true, true);
ckh->tab = tab;
ckh->lg_curbuckets = lg_prevbuckets;
#ifdef CKH_COUNT
@@ -421,7 +423,7 @@ ckh_delete(tsdn_t *tsdn, ckh_t *ckh)
(unsigned long long)ckh->nrelocs);
#endif
idalloctm(tsdn, iealloc(ckh->tab), ckh->tab, NULL, true, true);
idalloctm(tsdn, iealloc(tsdn, ckh->tab), ckh->tab, NULL, true, true);
if (config_debug)
memset(ckh, JEMALLOC_FREE_JUNK, sizeof(ckh_t));
}

View File

@@ -45,7 +45,8 @@ huge_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment,
arena = arena_choose(tsdn_tsd(tsdn), arena);
if (unlikely(arena == NULL) || (ret = arena_chunk_alloc_huge(tsdn,
arena, usize, alignment, &is_zeroed)) == NULL) {
idalloctm(tsdn, iealloc(extent), extent, NULL, true, true);
idalloctm(tsdn, iealloc(tsdn, extent), extent, NULL, true,
true);
return (NULL);
}
@@ -53,7 +54,8 @@ huge_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment,
if (chunk_register(tsdn, ret, extent)) {
arena_chunk_dalloc_huge(tsdn, arena, ret, usize);
idalloctm(tsdn, iealloc(extent), extent, NULL, true, true);
idalloctm(tsdn, iealloc(tsdn, extent), extent, NULL, true,
true);
return (NULL);
}
@@ -194,7 +196,7 @@ huge_ralloc_no_move_shrink(tsdn_t *tsdn, extent_t *extent, void *ptr,
post_zeroed = pre_zeroed;
/* Update the size of the huge allocation. */
chunk_deregister(ptr, extent);
chunk_deregister(tsdn, ptr, extent);
malloc_mutex_lock(tsdn, &arena->huge_mtx);
extent_size_set(extent, usize);
/* Update zeroed. */
@@ -231,7 +233,7 @@ huge_ralloc_no_move_expand(tsdn_t *tsdn, extent_t *extent, void *ptr,
return (true);
/* Update the size of the huge allocation. */
chunk_deregister(ptr, extent);
chunk_deregister(tsdn, ptr, extent);
malloc_mutex_lock(tsdn, &arena->huge_mtx);
extent_size_set(extent, usize);
malloc_mutex_unlock(tsdn, &arena->huge_mtx);
@@ -353,7 +355,7 @@ huge_dalloc(tsdn_t *tsdn, extent_t *extent, void *ptr)
arena_t *arena;
arena = extent_arena_get(extent);
chunk_deregister(ptr, extent);
chunk_deregister(tsdn, ptr, extent);
malloc_mutex_lock(tsdn, &arena->huge_mtx);
ql_remove(&arena->huge, extent, ql_link);
malloc_mutex_unlock(tsdn, &arena->huge_mtx);
@@ -362,7 +364,7 @@ huge_dalloc(tsdn_t *tsdn, extent_t *extent, void *ptr)
extent_size_get(extent));
arena_chunk_dalloc_huge(tsdn, extent_arena_get(extent),
extent_addr_get(extent), extent_size_get(extent));
idalloctm(tsdn, iealloc(extent), extent, NULL, true, true);
idalloctm(tsdn, iealloc(tsdn, extent), extent, NULL, true, true);
arena_decay_tick(tsdn, arena);
}
@@ -387,7 +389,7 @@ huge_prof_tctx_get(tsdn_t *tsdn, const extent_t *extent, const void *ptr)
prof_tctx_t *tctx;
arena_t *arena;
assert(extent == iealloc(ptr));
assert(extent == iealloc(tsdn, ptr));
arena = extent_arena_get(extent);
malloc_mutex_lock(tsdn, &arena->huge_mtx);
@@ -403,7 +405,7 @@ huge_prof_tctx_set(tsdn_t *tsdn, extent_t *extent, const void *ptr,
{
arena_t *arena;
assert(extent == iealloc(ptr));
assert(extent == iealloc(tsdn, ptr));
arena = extent_arena_get(extent);
malloc_mutex_lock(tsdn, &arena->huge_mtx);

View File

@@ -325,7 +325,7 @@ void
a0dalloc(void *ptr)
{
a0idalloc(iealloc(ptr), ptr, true);
a0idalloc(iealloc(NULL, ptr), ptr, true);
}
/*
@@ -365,7 +365,7 @@ bootstrap_free(void *ptr)
if (unlikely(ptr == NULL))
return;
a0idalloc(iealloc(ptr), ptr, false);
a0idalloc(iealloc(NULL, ptr), ptr, false);
}
static void
@@ -1401,7 +1401,8 @@ ialloc_prof_sample(tsd_t *tsd, size_t usize, szind_t ind, bool zero,
p = ialloc(tsd, LARGE_MINCLASS, ind_large, zero, slow_path);
if (p == NULL)
return (NULL);
arena_prof_promoted(tsd_tsdn(tsd), iealloc(p), p, usize);
arena_prof_promoted(tsd_tsdn(tsd), iealloc(tsd_tsdn(tsd), p), p,
usize);
} else
p = ialloc(tsd, usize, ind, zero, slow_path);
@@ -1423,7 +1424,7 @@ ialloc_prof(tsd_t *tsd, size_t usize, szind_t ind, bool zero, bool slow_path)
prof_alloc_rollback(tsd, tctx, true);
return (NULL);
}
prof_malloc(tsd_tsdn(tsd), iealloc(p), p, usize, tctx);
prof_malloc(tsd_tsdn(tsd), iealloc(tsd_tsdn(tsd), p), p, usize, tctx);
return (p);
}
@@ -1482,7 +1483,8 @@ ialloc_post_check(void *ret, tsdn_t *tsdn, size_t usize, const char *func,
set_errno(ENOMEM);
}
if (config_stats && likely(ret != NULL)) {
assert(usize == isalloc(tsdn, iealloc(ret), ret, config_prof));
assert(usize == isalloc(tsdn, iealloc(tsdn, ret), ret,
config_prof));
*tsd_thread_allocatedp_get(tsdn_tsd(tsdn)) += usize;
}
witness_assert_lockless(tsdn);
@@ -1525,7 +1527,8 @@ imemalign_prof_sample(tsd_t *tsd, size_t alignment, size_t usize,
p = ipalloc(tsd, LARGE_MINCLASS, alignment, false);
if (p == NULL)
return (NULL);
arena_prof_promoted(tsd_tsdn(tsd), iealloc(p), p, usize);
arena_prof_promoted(tsd_tsdn(tsd), iealloc(tsd_tsdn(tsd), p), p,
usize);
} else
p = ipalloc(tsd, usize, alignment, false);
@@ -1547,7 +1550,7 @@ imemalign_prof(tsd_t *tsd, size_t alignment, size_t usize)
prof_alloc_rollback(tsd, tctx, true);
return (NULL);
}
prof_malloc(tsd_tsdn(tsd), iealloc(p), p, usize, tctx);
prof_malloc(tsd_tsdn(tsd), iealloc(tsd_tsdn(tsd), p), p, usize, tctx);
return (p);
}
@@ -1604,8 +1607,8 @@ imemalign(void **memptr, size_t alignment, size_t size, size_t min_alignment)
ret = 0;
label_return:
if (config_stats && likely(result != NULL)) {
assert(usize == isalloc(tsd_tsdn(tsd), iealloc(result), result,
config_prof));
assert(usize == isalloc(tsd_tsdn(tsd), iealloc(tsd_tsdn(tsd),
result), result, config_prof));
*tsd_thread_allocatedp_get(tsd) += usize;
}
UTRACE(0, size, result);
@@ -1696,7 +1699,8 @@ irealloc_prof_sample(tsd_t *tsd, extent_t *extent, void *old_ptr,
false);
if (p == NULL)
return (NULL);
arena_prof_promoted(tsd_tsdn(tsd), iealloc(p), p, usize);
arena_prof_promoted(tsd_tsdn(tsd), iealloc(tsd_tsdn(tsd), p), p,
usize);
} else
p = iralloc(tsd, extent, old_ptr, old_usize, usize, 0, false);
@@ -1724,7 +1728,7 @@ irealloc_prof(tsd_t *tsd, extent_t *extent, void *old_ptr, size_t old_usize,
prof_alloc_rollback(tsd, tctx, true);
return (NULL);
}
e = (p == old_ptr) ? extent : iealloc(p);
e = (p == old_ptr) ? extent : iealloc(tsd_tsdn(tsd), p);
prof_realloc(tsd, e, p, usize, tctx, prof_active, true,
old_ptr, old_usize, old_tctx);
@@ -1742,7 +1746,7 @@ ifree(tsd_t *tsd, void *ptr, tcache_t *tcache, bool slow_path)
assert(ptr != NULL);
assert(malloc_initialized() || IS_INITIALIZER);
extent = iealloc(ptr);
extent = iealloc(tsd_tsdn(tsd), ptr);
if (config_prof && opt_prof) {
usize = isalloc(tsd_tsdn(tsd), extent, ptr, config_prof);
prof_free(tsd, extent, ptr, usize);
@@ -1810,9 +1814,8 @@ je_realloc(void *ptr, size_t size)
witness_assert_lockless(tsd_tsdn(tsd));
extent = iealloc(ptr);
extent = iealloc(tsd_tsdn(tsd), ptr);
old_usize = isalloc(tsd_tsdn(tsd), extent, ptr, config_prof);
if (config_prof && opt_prof) {
usize = s2u(size);
ret = unlikely(usize == 0 || usize > HUGE_MAXCLASS) ?
@@ -1845,7 +1848,8 @@ je_realloc(void *ptr, size_t size)
if (config_stats && likely(ret != NULL)) {
tsd_t *tsd;
assert(usize == isalloc(tsdn, iealloc(ret), ret, config_prof));
assert(usize == isalloc(tsdn, iealloc(tsdn, ret), ret,
config_prof));
tsd = tsdn_tsd(tsdn);
*tsd_thread_allocatedp_get(tsd) += usize;
*tsd_thread_deallocatedp_get(tsd) += old_usize;
@@ -1999,7 +2003,7 @@ imallocx_prof_sample(tsdn_t *tsdn, size_t usize, size_t alignment, bool zero,
tcache, arena, slow_path);
if (p == NULL)
return (NULL);
arena_prof_promoted(tsdn, iealloc(p), p, usize);
arena_prof_promoted(tsdn, iealloc(tsdn, p), p, usize);
} else
p = imallocx_flags(tsdn, usize, alignment, zero, tcache, arena,
slow_path);
@@ -2033,7 +2037,7 @@ imallocx_prof(tsd_t *tsd, size_t size, int flags, size_t *usize, bool slow_path)
prof_alloc_rollback(tsd, tctx, true);
return (NULL);
}
prof_malloc(tsd_tsdn(tsd), iealloc(p), p, *usize, tctx);
prof_malloc(tsd_tsdn(tsd), iealloc(tsd_tsdn(tsd), p), p, *usize, tctx);
assert(alignment == 0 || ((uintptr_t)p & (alignment - 1)) == ZU(0));
return (p);
@@ -2134,7 +2138,7 @@ irallocx_prof_sample(tsdn_t *tsdn, extent_t *extent, void *old_ptr,
alignment, zero, tcache, arena);
if (p == NULL)
return (NULL);
arena_prof_promoted(tsdn, iealloc(p), p, usize);
arena_prof_promoted(tsdn, iealloc(tsdn, p), p, usize);
} else {
p = iralloct(tsdn, extent, old_ptr, old_usize, usize, alignment,
zero, tcache, arena);
@@ -2180,7 +2184,7 @@ irallocx_prof(tsd_t *tsd, extent_t *extent, void *old_ptr, size_t old_usize,
e = extent;
*usize = isalloc(tsd_tsdn(tsd), e, p, config_prof);
} else
e = iealloc(p);
e = iealloc(tsd_tsdn(tsd), p);
prof_realloc(tsd, e, p, *usize, tctx, prof_active, true, old_ptr,
old_usize, old_tctx);
@@ -2207,7 +2211,7 @@ je_rallocx(void *ptr, size_t size, int flags)
assert(malloc_initialized() || IS_INITIALIZER);
tsd = tsd_fetch();
witness_assert_lockless(tsd_tsdn(tsd));
extent = iealloc(ptr);
extent = iealloc(tsd_tsdn(tsd), ptr);
if (unlikely((flags & MALLOCX_ARENA_MASK) != 0)) {
unsigned arena_ind = MALLOCX_ARENA_GET(flags);
@@ -2241,8 +2245,8 @@ je_rallocx(void *ptr, size_t size, int flags)
if (unlikely(p == NULL))
goto label_oom;
if (config_stats) {
usize = isalloc(tsd_tsdn(tsd), iealloc(p), p,
config_prof);
usize = isalloc(tsd_tsdn(tsd), iealloc(tsd_tsdn(tsd),
p), p, config_prof);
}
}
assert(alignment == 0 || ((uintptr_t)p & (alignment - 1)) == ZU(0));
@@ -2357,7 +2361,7 @@ je_xallocx(void *ptr, size_t size, size_t extra, int flags)
assert(malloc_initialized() || IS_INITIALIZER);
tsd = tsd_fetch();
witness_assert_lockless(tsd_tsdn(tsd));
extent = iealloc(ptr);
extent = iealloc(tsd_tsdn(tsd), ptr);
old_usize = isalloc(tsd_tsdn(tsd), extent, ptr, config_prof);
@@ -2412,7 +2416,7 @@ je_sallocx(const void *ptr, int flags)
if (config_ivsalloc)
usize = ivsalloc(tsdn, ptr, config_prof);
else
usize = isalloc(tsdn, iealloc(ptr), ptr, config_prof);
usize = isalloc(tsdn, iealloc(tsdn, ptr), ptr, config_prof);
witness_assert_lockless(tsdn);
return (usize);
@@ -2471,7 +2475,7 @@ je_sdallocx(void *ptr, size_t size, int flags)
assert(ptr != NULL);
assert(malloc_initialized() || IS_INITIALIZER);
tsd = tsd_fetch();
extent = iealloc(ptr);
extent = iealloc(tsd_tsdn(tsd), ptr);
usize = inallocx(tsd_tsdn(tsd), size, flags);
assert(usize == isalloc(tsd_tsdn(tsd), extent, ptr, config_prof));
@@ -2591,7 +2595,7 @@ je_malloc_usable_size(JEMALLOC_USABLE_SIZE_CONST void *ptr)
if (config_ivsalloc)
ret = ivsalloc(tsdn, ptr, config_prof);
else {
ret = (ptr == NULL) ? 0 : isalloc(tsdn, iealloc(ptr), ptr,
ret = (ptr == NULL) ? 0 : isalloc(tsdn, iealloc(tsdn, ptr), ptr,
config_prof);
}

View File

@@ -596,7 +596,8 @@ prof_gctx_try_destroy(tsd_t *tsd, prof_tdata_t *tdata_self, prof_gctx_t *gctx,
prof_leave(tsd, tdata_self);
/* Destroy gctx. */
malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock);
idalloctm(tsd_tsdn(tsd), iealloc(gctx), gctx, NULL, true, true);
idalloctm(tsd_tsdn(tsd), iealloc(tsd_tsdn(tsd), gctx), gctx,
NULL, true, true);
} else {
/*
* Compensate for increment in prof_tctx_destroy() or
@@ -707,7 +708,8 @@ prof_tctx_destroy(tsd_t *tsd, prof_tctx_t *tctx)
prof_tdata_destroy(tsd_tsdn(tsd), tdata, false);
if (destroy_tctx)
idalloctm(tsd_tsdn(tsd), iealloc(tctx), tctx, NULL, true, true);
idalloctm(tsd_tsdn(tsd), iealloc(tsd_tsdn(tsd), tctx), tctx,
NULL, true, true);
}
static bool
@@ -736,8 +738,8 @@ prof_lookup_global(tsd_t *tsd, prof_bt_t *bt, prof_tdata_t *tdata,
if (ckh_insert(tsd_tsdn(tsd), &bt2gctx, btkey.v, gctx.v)) {
/* OOM. */
prof_leave(tsd, tdata);
idalloctm(tsd_tsdn(tsd), iealloc(gctx.v), gctx.v, NULL,
true, true);
idalloctm(tsd_tsdn(tsd), iealloc(tsd_tsdn(tsd), gctx.v),
gctx.v, NULL, true, true);
return (true);
}
new_gctx = true;
@@ -817,8 +819,8 @@ prof_lookup(tsd_t *tsd, prof_bt_t *bt)
if (error) {
if (new_gctx)
prof_gctx_try_destroy(tsd, tdata, gctx, tdata);
idalloctm(tsd_tsdn(tsd), iealloc(ret.v), ret.v, NULL,
true, true);
idalloctm(tsd_tsdn(tsd), iealloc(tsd_tsdn(tsd), ret.v),
ret.v, NULL, true, true);
return (NULL);
}
malloc_mutex_lock(tsd_tsdn(tsd), gctx->lock);
@@ -1241,8 +1243,8 @@ prof_gctx_finish(tsd_t *tsd, prof_gctx_tree_t *gctxs)
tctx_tree_remove(&gctx->tctxs,
to_destroy);
idalloctm(tsd_tsdn(tsd),
iealloc(to_destroy), to_destroy,
NULL, true, true);
iealloc(tsd_tsdn(tsd), to_destroy),
to_destroy, NULL, true, true);
} else
next = NULL;
} while (next != NULL);
@@ -1818,7 +1820,7 @@ prof_tdata_init_impl(tsdn_t *tsdn, uint64_t thr_uid, uint64_t thr_discrim,
if (ckh_new(tsdn, &tdata->bt2tctx, PROF_CKH_MINITEMS,
prof_bt_hash, prof_bt_keycomp)) {
idalloctm(tsdn, iealloc(tdata), tdata, NULL, true, true);
idalloctm(tsdn, iealloc(tsdn, tdata), tdata, NULL, true, true);
return (NULL);
}
@@ -1882,11 +1884,11 @@ prof_tdata_destroy_locked(tsdn_t *tsdn, prof_tdata_t *tdata,
assert(prof_tdata_should_destroy_unlocked(tdata, even_if_attached));
if (tdata->thread_name != NULL) {
idalloctm(tsdn, iealloc(tdata->thread_name), tdata->thread_name,
NULL, true, true);
idalloctm(tsdn, iealloc(tsdn, tdata->thread_name),
tdata->thread_name, NULL, true, true);
}
ckh_delete(tsdn, &tdata->bt2tctx);
idalloctm(tsdn, iealloc(tdata), tdata, NULL, true, true);
idalloctm(tsdn, iealloc(tsdn, tdata), tdata, NULL, true, true);
}
static void
@@ -2080,8 +2082,8 @@ prof_thread_name_set(tsd_t *tsd, const char *thread_name)
return (EAGAIN);
if (tdata->thread_name != NULL) {
idalloctm(tsd_tsdn(tsd), iealloc(tdata->thread_name),
tdata->thread_name, NULL, true, true);
idalloctm(tsd_tsdn(tsd), iealloc(tsd_tsdn(tsd),
tdata->thread_name), tdata->thread_name, NULL, true, true);
tdata->thread_name = NULL;
}
if (strlen(s) > 0)

View File

@@ -13,8 +13,7 @@ hmin(unsigned ha, unsigned hb)
* used.
*/
bool
rtree_new(rtree_t *rtree, unsigned bits, rtree_node_alloc_t *alloc,
rtree_node_dalloc_t *dalloc)
rtree_new(rtree_t *rtree, unsigned bits)
{
unsigned bits_in_leaf, height, i;
@@ -32,8 +31,6 @@ rtree_new(rtree_t *rtree, unsigned bits, rtree_node_alloc_t *alloc,
height = 1;
assert((height-1) * RTREE_BITS_PER_LEVEL + bits_in_leaf == bits);
rtree->alloc = alloc;
rtree->dalloc = dalloc;
rtree->height = height;
/* Root level. */
@@ -64,8 +61,43 @@ rtree_new(rtree_t *rtree, unsigned bits, rtree_node_alloc_t *alloc,
return (false);
}
#ifdef JEMALLOC_JET
#undef rtree_node_alloc
#define rtree_node_alloc JEMALLOC_N(rtree_node_alloc_impl)
#endif
static rtree_elm_t *
rtree_node_alloc(tsdn_t *tsdn, rtree_t *rtree, size_t nelms)
{
return ((rtree_elm_t *)base_alloc(tsdn, nelms * sizeof(rtree_elm_t)));
}
#ifdef JEMALLOC_JET
#undef rtree_node_alloc
#define rtree_node_alloc JEMALLOC_N(rtree_node_alloc)
rtree_node_alloc_t *rtree_node_alloc = JEMALLOC_N(rtree_node_alloc_impl);
#endif
#ifdef JEMALLOC_JET
#undef rtree_node_dalloc
#define rtree_node_dalloc JEMALLOC_N(rtree_node_dalloc_impl)
#endif
UNUSED static void
rtree_node_dalloc(tsdn_t *tsdn, rtree_t *rtree, rtree_elm_t *node)
{
/* Nodes are never deleted during normal operation. */
not_reached();
}
#ifdef JEMALLOC_JET
#undef rtree_node_dalloc
#define rtree_node_dalloc JEMALLOC_N(rtree_node_dalloc)
rtree_node_dalloc_t *rtree_node_dalloc = JEMALLOC_N(rtree_node_dalloc_impl);
#endif
#ifdef JEMALLOC_JET
static void
rtree_delete_subtree(rtree_t *rtree, rtree_elm_t *node, unsigned level)
rtree_delete_subtree(tsdn_t *tsdn, rtree_t *rtree, rtree_elm_t *node,
unsigned level)
{
if (level + 1 < rtree->height) {
@@ -74,27 +106,31 @@ rtree_delete_subtree(rtree_t *rtree, rtree_elm_t *node, unsigned level)
nchildren = ZU(1) << rtree->levels[level].bits;
for (i = 0; i < nchildren; i++) {
rtree_elm_t *child = node[i].child;
if (child != NULL)
rtree_delete_subtree(rtree, child, level + 1);
if (child != NULL) {
rtree_delete_subtree(tsdn, rtree, child, level +
1);
}
}
}
rtree->dalloc(node);
rtree_node_dalloc(tsdn, rtree, node);
}
void
rtree_delete(rtree_t *rtree)
rtree_delete(tsdn_t *tsdn, rtree_t *rtree)
{
unsigned i;
for (i = 0; i < rtree->height; i++) {
rtree_elm_t *subtree = rtree->levels[i].subtree;
if (subtree != NULL)
rtree_delete_subtree(rtree, subtree, i);
rtree_delete_subtree(tsdn, rtree, subtree, i);
}
}
#endif
static rtree_elm_t *
rtree_node_init(rtree_t *rtree, unsigned level, rtree_elm_t **elmp)
rtree_node_init(tsdn_t *tsdn, rtree_t *rtree, unsigned level,
rtree_elm_t **elmp)
{
rtree_elm_t *node;
@@ -108,7 +144,8 @@ rtree_node_init(rtree_t *rtree, unsigned level, rtree_elm_t **elmp)
node = atomic_read_p((void **)elmp);
} while (node == RTREE_NODE_INITIALIZING);
} else {
node = rtree->alloc(ZU(1) << rtree->levels[level].bits);
node = rtree_node_alloc(tsdn, rtree, ZU(1) <<
rtree->levels[level].bits);
if (node == NULL)
return (NULL);
atomic_write_p((void **)elmp, node);
@@ -118,15 +155,17 @@ rtree_node_init(rtree_t *rtree, unsigned level, rtree_elm_t **elmp)
}
rtree_elm_t *
rtree_subtree_read_hard(rtree_t *rtree, unsigned level)
rtree_subtree_read_hard(tsdn_t *tsdn, rtree_t *rtree, unsigned level)
{
return (rtree_node_init(rtree, level, &rtree->levels[level].subtree));
return (rtree_node_init(tsdn, rtree, level,
&rtree->levels[level].subtree));
}
rtree_elm_t *
rtree_child_read_hard(rtree_t *rtree, rtree_elm_t *elm, unsigned level)
rtree_child_read_hard(tsdn_t *tsdn, rtree_t *rtree, rtree_elm_t *elm,
unsigned level)
{
return (rtree_node_init(rtree, level, &elm->child));
return (rtree_node_init(tsdn, rtree, level, &elm->child));
}

View File

@@ -27,7 +27,7 @@ size_t
tcache_salloc(tsdn_t *tsdn, const void *ptr)
{
return (arena_salloc(tsdn, iealloc(ptr), ptr, false));
return (arena_salloc(tsdn, iealloc(tsdn, ptr), ptr, false));
}
void
@@ -101,7 +101,7 @@ tcache_bin_flush_small(tsd_t *tsd, tcache_t *tcache, tcache_bin_t *tbin,
assert(arena != NULL);
for (nflush = tbin->ncached - rem; nflush > 0; nflush = ndeferred) {
/* Lock the arena bin associated with the first object. */
extent_t *extent = iealloc(*(tbin->avail - 1));
extent_t *extent = iealloc(tsd_tsdn(tsd), *(tbin->avail - 1));
arena_t *bin_arena = extent_arena_get(extent);
arena_bin_t *bin = &bin_arena->bins[binind];
@@ -125,7 +125,7 @@ tcache_bin_flush_small(tsd_t *tsd, tcache_t *tcache, tcache_bin_t *tbin,
ptr = *(tbin->avail - 1 - i);
assert(ptr != NULL);
extent = iealloc(ptr);
extent = iealloc(tsd_tsdn(tsd), ptr);
if (extent_arena_get(extent) == bin_arena) {
arena_chunk_t *chunk =
(arena_chunk_t *)extent_addr_get(extent);
@@ -185,7 +185,7 @@ tcache_bin_flush_large(tsd_t *tsd, tcache_bin_t *tbin, szind_t binind,
assert(arena != NULL);
for (nflush = tbin->ncached - rem; nflush > 0; nflush = ndeferred) {
/* Lock the arena associated with the first object. */
extent_t *extent = iealloc(*(tbin->avail - 1));
extent_t *extent = iealloc(tsd_tsdn(tsd), *(tbin->avail - 1));
arena_t *locked_arena = extent_arena_get(extent);
UNUSED bool idump;
@@ -211,7 +211,7 @@ tcache_bin_flush_large(tsd_t *tsd, tcache_bin_t *tbin, szind_t binind,
for (i = 0; i < nflush; i++) {
ptr = *(tbin->avail - 1 - i);
assert(ptr != NULL);
extent = iealloc(ptr);
extent = iealloc(tsd_tsdn(tsd), ptr);
if (extent_arena_get(extent) == locked_arena) {
arena_chunk_t *chunk =
(arena_chunk_t *)extent_addr_get(extent);
@@ -394,7 +394,8 @@ tcache_destroy(tsd_t *tsd, tcache_t *tcache)
arena_prof_accum(tsd_tsdn(tsd), arena, tcache->prof_accumbytes))
prof_idump(tsd_tsdn(tsd));
idalloctm(tsd_tsdn(tsd), iealloc(tcache), tcache, NULL, true, true);
idalloctm(tsd_tsdn(tsd), iealloc(tsd_tsdn(tsd), tcache), tcache, NULL,
true, true);
}
void