From 8c9be3e83732883e852d43bca2cf7724c465f93e Mon Sep 17 00:00:00 2001 From: Jason Evans Date: Sat, 16 Apr 2016 00:36:11 -0700 Subject: [PATCH] Refactor rtree to always use base_alloc() for node allocation. --- include/jemalloc/internal/arena.h | 18 +-- include/jemalloc/internal/chunk.h | 9 +- .../jemalloc/internal/jemalloc_internal.h.in | 31 +++-- include/jemalloc/internal/private_symbols.txt | 2 + include/jemalloc/internal/rtree.h | 75 ++++++----- include/jemalloc/internal/tcache.h | 4 +- src/arena.c | 59 +++++---- src/chunk.c | 17 +-- src/ckh.c | 12 +- src/huge.c | 18 +-- src/jemalloc.c | 56 +++++---- src/prof.c | 30 ++--- src/rtree.c | 71 ++++++++--- src/tcache.c | 13 +- test/unit/rtree.c | 117 ++++++++++++------ 15 files changed, 315 insertions(+), 217 deletions(-) diff --git a/include/jemalloc/internal/arena.h b/include/jemalloc/internal/arena.h index d441aaf5..ff3e01d8 100644 --- a/include/jemalloc/internal/arena.h +++ b/include/jemalloc/internal/arena.h @@ -633,7 +633,8 @@ size_t arena_metadata_allocated_get(arena_t *arena); bool arena_prof_accum_impl(arena_t *arena, uint64_t accumbytes); bool arena_prof_accum_locked(arena_t *arena, uint64_t accumbytes); bool arena_prof_accum(tsdn_t *tsdn, arena_t *arena, uint64_t accumbytes); -szind_t arena_ptr_small_binind_get(const void *ptr, size_t mapbits); +szind_t arena_ptr_small_binind_get(tsdn_t *tsdn, const void *ptr, + size_t mapbits); szind_t arena_bin_index(arena_t *arena, arena_bin_t *bin); size_t arena_run_regind(arena_run_t *run, const arena_bin_info_t *bin_info, const void *ptr); @@ -647,7 +648,7 @@ void arena_decay_ticks(tsdn_t *tsdn, arena_t *arena, unsigned nticks); void arena_decay_tick(tsdn_t *tsdn, arena_t *arena); void *arena_malloc(tsdn_t *tsdn, arena_t *arena, size_t size, szind_t ind, bool zero, tcache_t *tcache, bool slow_path); -arena_t *arena_aalloc(const void *ptr); +arena_t *arena_aalloc(tsdn_t *tsdn, const void *ptr); size_t arena_salloc(tsdn_t *tsdn, const extent_t *extent, const void *ptr, bool demote); void arena_dalloc(tsdn_t *tsdn, extent_t *extent, void *ptr, @@ -1049,7 +1050,7 @@ arena_prof_accum(tsdn_t *tsdn, arena_t *arena, uint64_t accumbytes) # ifdef JEMALLOC_ARENA_INLINE_B JEMALLOC_ALWAYS_INLINE szind_t -arena_ptr_small_binind_get(const void *ptr, size_t mapbits) +arena_ptr_small_binind_get(tsdn_t *tsdn, const void *ptr, size_t mapbits) { szind_t binind; @@ -1071,7 +1072,7 @@ arena_ptr_small_binind_get(const void *ptr, size_t mapbits) assert(binind != BININD_INVALID); assert(binind < NBINS); - extent = iealloc(ptr); + extent = iealloc(tsdn, ptr); chunk = (arena_chunk_t *)extent_addr_get(extent); arena = extent_arena_get(extent); pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; @@ -1314,10 +1315,10 @@ arena_malloc(tsdn_t *tsdn, arena_t *arena, size_t size, szind_t ind, bool zero, } JEMALLOC_ALWAYS_INLINE arena_t * -arena_aalloc(const void *ptr) +arena_aalloc(tsdn_t *tsdn, const void *ptr) { - return (extent_arena_get(iealloc(ptr))); + return (extent_arena_get(iealloc(tsdn, ptr))); } /* Return the size of the allocation pointed to by ptr. */ @@ -1361,7 +1362,7 @@ arena_salloc(tsdn_t *tsdn, const extent_t *extent, const void *ptr, bool demote) * object). */ assert(arena_mapbits_large_get(chunk, pageind) != 0 || - arena_ptr_small_binind_get(ptr, + arena_ptr_small_binind_get(tsdn, ptr, arena_mapbits_get(chunk, pageind)) == binind); ret = index2size(binind); } @@ -1389,7 +1390,8 @@ arena_dalloc(tsdn_t *tsdn, extent_t *extent, void *ptr, tcache_t *tcache, if (likely((mapbits & CHUNK_MAP_LARGE) == 0)) { /* Small allocation. */ if (likely(tcache != NULL)) { - szind_t binind = arena_ptr_small_binind_get(ptr, + szind_t binind = + arena_ptr_small_binind_get(tsdn, ptr, mapbits); tcache_dalloc_small(tsdn_tsd(tsdn), tcache, ptr, binind, slow_path); diff --git a/include/jemalloc/internal/chunk.h b/include/jemalloc/internal/chunk.h index c13f2171..be56c2bd 100644 --- a/include/jemalloc/internal/chunk.h +++ b/include/jemalloc/internal/chunk.h @@ -53,7 +53,8 @@ chunk_hooks_t chunk_hooks_set(tsdn_t *tsdn, arena_t *arena, const chunk_hooks_t *chunk_hooks); bool chunk_register(tsdn_t *tsdn, const void *chunk, const extent_t *extent); -void chunk_deregister(const void *chunk, const extent_t *extent); +void chunk_deregister(tsdn_t *tsdn, const void *chunk, + const extent_t *extent); void chunk_reregister(tsdn_t *tsdn, const void *chunk, const extent_t *extent); void *chunk_alloc_base(size_t size); @@ -81,15 +82,15 @@ void chunk_postfork_child(tsdn_t *tsdn); #ifdef JEMALLOC_H_INLINES #ifndef JEMALLOC_ENABLE_INLINE -extent_t *chunk_lookup(const void *chunk, bool dependent); +extent_t *chunk_lookup(tsdn_t *tsdn, const void *chunk, bool dependent); #endif #if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_CHUNK_C_)) JEMALLOC_INLINE extent_t * -chunk_lookup(const void *ptr, bool dependent) +chunk_lookup(tsdn_t *tsdn, const void *ptr, bool dependent) { - return (rtree_read(&chunks_rtree, (uintptr_t)ptr, dependent)); + return (rtree_read(tsdn, &chunks_rtree, (uintptr_t)ptr, dependent)); } #endif diff --git a/include/jemalloc/internal/jemalloc_internal.h.in b/include/jemalloc/internal/jemalloc_internal.h.in index 1fc9d3d7..d1306e17 100644 --- a/include/jemalloc/internal/jemalloc_internal.h.in +++ b/include/jemalloc/internal/jemalloc_internal.h.in @@ -961,15 +961,15 @@ decay_ticker_get(tsd_t *tsd, unsigned ind) #undef JEMALLOC_ARENA_INLINE_A #ifndef JEMALLOC_ENABLE_INLINE -extent_t *iealloc(const void *ptr); +extent_t *iealloc(tsdn_t *tsdn, const void *ptr); #endif #if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_C_)) JEMALLOC_ALWAYS_INLINE extent_t * -iealloc(const void *ptr) +iealloc(tsdn_t *tsdn, const void *ptr) { - return (chunk_lookup(ptr, true)); + return (chunk_lookup(tsdn, ptr, true)); } #endif @@ -980,8 +980,7 @@ iealloc(const void *ptr) #include "jemalloc/internal/hash.h" #ifndef JEMALLOC_ENABLE_INLINE -extent_t *iealloc(const void *ptr); -arena_t *iaalloc(const void *ptr); +arena_t *iaalloc(tsdn_t *tsdn, const void *ptr); size_t isalloc(tsdn_t *tsdn, const extent_t *extent, const void *ptr, bool demote); void *iallocztm(tsdn_t *tsdn, size_t size, szind_t ind, bool zero, @@ -1012,19 +1011,19 @@ bool ixalloc(tsdn_t *tsdn, extent_t *extent, void *ptr, size_t oldsize, #if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_C_)) JEMALLOC_ALWAYS_INLINE arena_t * -iaalloc(const void *ptr) +iaalloc(tsdn_t *tsdn, const void *ptr) { assert(ptr != NULL); - return (arena_aalloc(ptr)); + return (arena_aalloc(tsdn, ptr)); } /* * Typical usage: * tsdn_t *tsdn = [...] * void *ptr = [...] - * extent_t *extent = iealloc(ptr); + * extent_t *extent = iealloc(tsdn, ptr); * size_t sz = isalloc(tsdn, extent, ptr, config_prof); */ JEMALLOC_ALWAYS_INLINE size_t @@ -1050,8 +1049,8 @@ iallocztm(tsdn_t *tsdn, size_t size, szind_t ind, bool zero, tcache_t *tcache, ret = arena_malloc(tsdn, arena, size, ind, zero, tcache, slow_path); if (config_stats && is_metadata && likely(ret != NULL)) { - arena_metadata_allocated_add(iaalloc(ret), isalloc(tsdn, - iealloc(ret), ret, config_prof)); + arena_metadata_allocated_add(iaalloc(tsdn, ret), isalloc(tsdn, + iealloc(tsdn, ret), ret, config_prof)); } return (ret); } @@ -1078,8 +1077,8 @@ ipallocztm(tsdn_t *tsdn, size_t usize, size_t alignment, bool zero, ret = arena_palloc(tsdn, arena, usize, alignment, zero, tcache); assert(ALIGNMENT_ADDR2BASE(ret, alignment) == ret); if (config_stats && is_metadata && likely(ret != NULL)) { - arena_metadata_allocated_add(iaalloc(ret), isalloc(tsdn, - iealloc(ret), ret, config_prof)); + arena_metadata_allocated_add(iaalloc(tsdn, ret), isalloc(tsdn, + iealloc(tsdn, ret), ret, config_prof)); } return (ret); } @@ -1106,7 +1105,7 @@ ivsalloc(tsdn_t *tsdn, const void *ptr, bool demote) extent_t *extent; /* Return 0 if ptr is not within a chunk managed by jemalloc. */ - extent = chunk_lookup(ptr, false); + extent = chunk_lookup(tsdn, ptr, false); if (extent == NULL) return (0); /* Only arena chunks should be looked up via interior pointers. */ @@ -1123,10 +1122,10 @@ idalloctm(tsdn_t *tsdn, extent_t *extent, void *ptr, tcache_t *tcache, assert(ptr != NULL); assert(!is_metadata || tcache == NULL); - assert(!is_metadata || iaalloc(ptr)->ind < narenas_auto); + assert(!is_metadata || iaalloc(tsdn, ptr)->ind < narenas_auto); if (config_stats && is_metadata) { - arena_metadata_allocated_sub(iaalloc(ptr), isalloc(tsdn, extent, - ptr, config_prof)); + arena_metadata_allocated_sub(iaalloc(tsdn, ptr), isalloc(tsdn, + extent, ptr, config_prof)); } arena_dalloc(tsdn, extent, ptr, tcache, slow_path); diff --git a/include/jemalloc/internal/private_symbols.txt b/include/jemalloc/internal/private_symbols.txt index 5f4a4b0b..42c730c6 100644 --- a/include/jemalloc/internal/private_symbols.txt +++ b/include/jemalloc/internal/private_symbols.txt @@ -460,6 +460,8 @@ rtree_child_tryread rtree_clear rtree_delete rtree_new +rtree_node_alloc +rtree_node_dalloc rtree_node_valid rtree_elm_acquire rtree_elm_lookup diff --git a/include/jemalloc/internal/rtree.h b/include/jemalloc/internal/rtree.h index 59a7ab3c..dbea434c 100644 --- a/include/jemalloc/internal/rtree.h +++ b/include/jemalloc/internal/rtree.h @@ -23,13 +23,6 @@ typedef struct rtree_s rtree_t; /* Used for two-stage lock-free node initialization. */ #define RTREE_NODE_INITIALIZING ((rtree_elm_t *)0x1) -/* - * The node allocation callback function's argument is the number of contiguous - * rtree_elm_t structures to allocate, and the resulting memory must be zeroed. - */ -typedef rtree_elm_t *(rtree_node_alloc_t)(size_t); -typedef void (rtree_node_dalloc_t)(rtree_elm_t *); - #endif /* JEMALLOC_H_TYPES */ /******************************************************************************/ #ifdef JEMALLOC_H_STRUCTS @@ -79,8 +72,6 @@ struct rtree_level_s { }; struct rtree_s { - rtree_node_alloc_t *alloc; - rtree_node_dalloc_t *dalloc; unsigned height; /* * Precomputed table used to convert from the number of leading 0 key @@ -94,12 +85,18 @@ struct rtree_s { /******************************************************************************/ #ifdef JEMALLOC_H_EXTERNS -bool rtree_new(rtree_t *rtree, unsigned bits, rtree_node_alloc_t *alloc, - rtree_node_dalloc_t *dalloc); -void rtree_delete(rtree_t *rtree); -rtree_elm_t *rtree_subtree_read_hard(rtree_t *rtree, unsigned level); -rtree_elm_t *rtree_child_read_hard(rtree_t *rtree, rtree_elm_t *elm, +bool rtree_new(rtree_t *rtree, unsigned bits); +#ifdef JEMALLOC_JET +typedef rtree_elm_t *(rtree_node_alloc_t)(tsdn_t *, rtree_t *, size_t); +extern rtree_node_alloc_t *rtree_node_alloc; +typedef void (rtree_node_dalloc_t)(tsdn_t *, rtree_t *, rtree_elm_t *); +extern rtree_node_dalloc_t *rtree_node_dalloc; +void rtree_delete(tsdn_t *tsdn, rtree_t *rtree); +#endif +rtree_elm_t *rtree_subtree_read_hard(tsdn_t *tsdn, rtree_t *rtree, unsigned level); +rtree_elm_t *rtree_child_read_hard(tsdn_t *tsdn, rtree_t *rtree, + rtree_elm_t *elm, unsigned level); #endif /* JEMALLOC_H_EXTERNS */ /******************************************************************************/ @@ -111,25 +108,27 @@ uintptr_t rtree_subkey(rtree_t *rtree, uintptr_t key, unsigned level); bool rtree_node_valid(rtree_elm_t *node); rtree_elm_t *rtree_child_tryread(rtree_elm_t *elm, bool dependent); -rtree_elm_t *rtree_child_read(rtree_t *rtree, rtree_elm_t *elm, +rtree_elm_t *rtree_child_read(tsdn_t *tsdn, rtree_t *rtree, rtree_elm_t *elm, unsigned level, bool dependent); extent_t *rtree_elm_read(rtree_elm_t *elm, bool dependent); void rtree_elm_write(rtree_elm_t *elm, const extent_t *extent); rtree_elm_t *rtree_subtree_tryread(rtree_t *rtree, unsigned level, bool dependent); -rtree_elm_t *rtree_subtree_read(rtree_t *rtree, unsigned level, - bool dependent); -rtree_elm_t *rtree_elm_lookup(rtree_t *rtree, uintptr_t key, +rtree_elm_t *rtree_subtree_read(tsdn_t *tsdn, rtree_t *rtree, + unsigned level, bool dependent); +rtree_elm_t *rtree_elm_lookup(tsdn_t *tsdn, rtree_t *rtree, uintptr_t key, bool dependent, bool init_missing); -bool rtree_write(rtree_t *rtree, uintptr_t key, const extent_t *extent); -extent_t *rtree_read(rtree_t *rtree, uintptr_t key, bool dependent); -rtree_elm_t *rtree_elm_acquire(rtree_t *rtree, uintptr_t key, +bool rtree_write(tsdn_t *tsdn, rtree_t *rtree, uintptr_t key, + const extent_t *extent); +extent_t *rtree_read(tsdn_t *tsdn, rtree_t *rtree, uintptr_t key, + bool dependent); +rtree_elm_t *rtree_elm_acquire(tsdn_t *tsdn, rtree_t *rtree, uintptr_t key, bool dependent, bool init_missing); extent_t *rtree_elm_read_acquired(rtree_elm_t *elm); void rtree_elm_write_acquired(rtree_elm_t *elm, const extent_t *extent); void rtree_elm_release(rtree_elm_t *elm); -void rtree_clear(rtree_t *rtree, uintptr_t key); +void rtree_clear(tsdn_t *tsdn, rtree_t *rtree, uintptr_t key); #endif #if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_RTREE_C_)) @@ -177,14 +176,14 @@ rtree_child_tryread(rtree_elm_t *elm, bool dependent) } JEMALLOC_ALWAYS_INLINE rtree_elm_t * -rtree_child_read(rtree_t *rtree, rtree_elm_t *elm, unsigned level, +rtree_child_read(tsdn_t *tsdn, rtree_t *rtree, rtree_elm_t *elm, unsigned level, bool dependent) { rtree_elm_t *child; child = rtree_child_tryread(elm, dependent); if (!dependent && unlikely(!rtree_node_valid(child))) - child = rtree_child_read_hard(rtree, elm, level); + child = rtree_child_read_hard(tsdn, rtree, elm, level); assert(!dependent || child != NULL); return (child); } @@ -238,19 +237,19 @@ rtree_subtree_tryread(rtree_t *rtree, unsigned level, bool dependent) } JEMALLOC_ALWAYS_INLINE rtree_elm_t * -rtree_subtree_read(rtree_t *rtree, unsigned level, bool dependent) +rtree_subtree_read(tsdn_t *tsdn, rtree_t *rtree, unsigned level, bool dependent) { rtree_elm_t *subtree; subtree = rtree_subtree_tryread(rtree, level, dependent); if (!dependent && unlikely(!rtree_node_valid(subtree))) - subtree = rtree_subtree_read_hard(rtree, level); + subtree = rtree_subtree_read_hard(tsdn, rtree, level); assert(!dependent || subtree != NULL); return (subtree); } JEMALLOC_ALWAYS_INLINE rtree_elm_t * -rtree_elm_lookup(rtree_t *rtree, uintptr_t key, bool dependent, +rtree_elm_lookup(tsdn_t *tsdn, rtree_t *rtree, uintptr_t key, bool dependent, bool init_missing) { uintptr_t subkey; @@ -261,8 +260,8 @@ rtree_elm_lookup(rtree_t *rtree, uintptr_t key, bool dependent, start_level = rtree_start_level(rtree, key); - node = init_missing ? rtree_subtree_read(rtree, start_level, dependent) - : rtree_subtree_tryread(rtree, start_level, dependent); + node = init_missing ? rtree_subtree_read(tsdn, rtree, start_level, + dependent) : rtree_subtree_tryread(rtree, start_level, dependent); #define RTREE_GET_BIAS (RTREE_HEIGHT_MAX - rtree->height) switch (start_level + RTREE_GET_BIAS) { #define RTREE_GET_SUBTREE(level) \ @@ -272,7 +271,7 @@ rtree_elm_lookup(rtree_t *rtree, uintptr_t key, bool dependent, return (NULL); \ subkey = rtree_subkey(rtree, key, level - \ RTREE_GET_BIAS); \ - node = init_missing ? rtree_child_read(rtree, \ + node = init_missing ? rtree_child_read(tsdn, rtree, \ &node[subkey], level - RTREE_GET_BIAS, dependent) : \ rtree_child_tryread(&node[subkey], dependent); \ /* Fall through. */ @@ -346,14 +345,14 @@ rtree_elm_lookup(rtree_t *rtree, uintptr_t key, bool dependent, } JEMALLOC_INLINE bool -rtree_write(rtree_t *rtree, uintptr_t key, const extent_t *extent) +rtree_write(tsdn_t *tsdn, rtree_t *rtree, uintptr_t key, const extent_t *extent) { rtree_elm_t *elm; assert(extent != NULL); /* Use rtree_clear() for this case. */ assert(((uintptr_t)extent & (uintptr_t)0x1) == (uintptr_t)0x0); - elm = rtree_elm_lookup(rtree, key, false, true); + elm = rtree_elm_lookup(tsdn, rtree, key, false, true); if (elm == NULL) return (true); assert(rtree_elm_read(elm, false) == NULL); @@ -363,11 +362,11 @@ rtree_write(rtree_t *rtree, uintptr_t key, const extent_t *extent) } JEMALLOC_ALWAYS_INLINE extent_t * -rtree_read(rtree_t *rtree, uintptr_t key, bool dependent) +rtree_read(tsdn_t *tsdn, rtree_t *rtree, uintptr_t key, bool dependent) { rtree_elm_t *elm; - elm = rtree_elm_lookup(rtree, key, dependent, false); + elm = rtree_elm_lookup(tsdn, rtree, key, dependent, false); if (elm == NULL) return (NULL); @@ -375,12 +374,12 @@ rtree_read(rtree_t *rtree, uintptr_t key, bool dependent) } JEMALLOC_INLINE rtree_elm_t * -rtree_elm_acquire(rtree_t *rtree, uintptr_t key, bool dependent, +rtree_elm_acquire(tsdn_t *tsdn, rtree_t *rtree, uintptr_t key, bool dependent, bool init_missing) { rtree_elm_t *elm; - elm = rtree_elm_lookup(rtree, key, dependent, init_missing); + elm = rtree_elm_lookup(tsdn, rtree, key, dependent, init_missing); if (!dependent && elm == NULL) return (NULL); { @@ -427,11 +426,11 @@ rtree_elm_release(rtree_elm_t *elm) } JEMALLOC_INLINE void -rtree_clear(rtree_t *rtree, uintptr_t key) +rtree_clear(tsdn_t *tsdn, rtree_t *rtree, uintptr_t key) { rtree_elm_t *elm; - elm = rtree_elm_acquire(rtree, key, true, false); + elm = rtree_elm_acquire(tsdn, rtree, key, true, false); rtree_elm_write_acquired(elm, NULL); rtree_elm_release(elm); } diff --git a/include/jemalloc/internal/tcache.h b/include/jemalloc/internal/tcache.h index d6d27506..ee63a652 100644 --- a/include/jemalloc/internal/tcache.h +++ b/include/jemalloc/internal/tcache.h @@ -370,8 +370,8 @@ tcache_alloc_large(tsd_t *tsd, arena_t *arena, tcache_t *tcache, size_t size, } if (config_prof && usize == LARGE_MINCLASS) { - arena_chunk_t *chunk = - (arena_chunk_t *)extent_addr_get(iealloc(ret)); + arena_chunk_t *chunk =(arena_chunk_t *)extent_addr_get( + iealloc(tsd_tsdn(tsd), ret)); size_t pageind = (((uintptr_t)ret - (uintptr_t)chunk) >> LG_PAGE); arena_mapbits_large_binind_set(chunk, pageind, diff --git a/src/arena.c b/src/arena.c index 3abbc623..8a93fca0 100644 --- a/src/arena.c +++ b/src/arena.c @@ -264,12 +264,13 @@ arena_run_reg_alloc(arena_run_t *run, const arena_bin_info_t *bin_info) } JEMALLOC_INLINE_C void -arena_run_reg_dalloc(arena_run_t *run, extent_t *extent, void *ptr) +arena_run_reg_dalloc(tsdn_t *tsdn, arena_run_t *run, extent_t *extent, + void *ptr) { arena_chunk_t *chunk = (arena_chunk_t *)extent_addr_get(extent); size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; size_t mapbits = arena_mapbits_get(chunk, pageind); - szind_t binind = arena_ptr_small_binind_get(ptr, mapbits); + szind_t binind = arena_ptr_small_binind_get(tsdn, ptr, mapbits); const arena_bin_info_t *bin_info = &arena_bin_info[binind]; size_t regind = arena_run_regind(run, bin_info, ptr); @@ -665,7 +666,7 @@ arena_chunk_discard(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk) bool committed; chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER; - chunk_deregister(chunk, &chunk->extent); + chunk_deregister(tsdn, chunk, &chunk->extent); committed = (arena_mapbits_decommitted_get(chunk, map_bias) == 0); if (!committed) { @@ -1037,11 +1038,13 @@ arena_run_first_best_fit(arena_t *arena, size_t size) } static arena_run_t * -arena_run_alloc_large_helper(arena_t *arena, size_t size, bool zero) +arena_run_alloc_large_helper(tsdn_t *tsdn, arena_t *arena, size_t size, + bool zero) { arena_run_t *run = arena_run_first_best_fit(arena, s2u(size)); if (run != NULL) { - if (arena_run_split_large(arena, iealloc(run), run, size, zero)) + if (arena_run_split_large(arena, iealloc(tsdn, run), run, size, + zero)) run = NULL; } return (run); @@ -1057,7 +1060,7 @@ arena_run_alloc_large(tsdn_t *tsdn, arena_t *arena, size_t size, bool zero) assert(size == PAGE_CEILING(size)); /* Search the arena's chunks for the lowest best fit. */ - run = arena_run_alloc_large_helper(arena, size, zero); + run = arena_run_alloc_large_helper(tsdn, arena, size, zero); if (run != NULL) return (run); @@ -1067,7 +1070,8 @@ arena_run_alloc_large(tsdn_t *tsdn, arena_t *arena, size_t size, bool zero) chunk = arena_chunk_alloc(tsdn, arena); if (chunk != NULL) { run = &arena_miscelm_get_mutable(chunk, map_bias)->run; - if (arena_run_split_large(arena, iealloc(run), run, size, zero)) + if (arena_run_split_large(arena, iealloc(tsdn, run), run, size, + zero)) run = NULL; return (run); } @@ -1077,15 +1081,16 @@ arena_run_alloc_large(tsdn_t *tsdn, arena_t *arena, size_t size, bool zero) * sufficient memory available while this one dropped arena->lock in * arena_chunk_alloc(), so search one more time. */ - return (arena_run_alloc_large_helper(arena, size, zero)); + return (arena_run_alloc_large_helper(tsdn, arena, size, zero)); } static arena_run_t * -arena_run_alloc_small_helper(arena_t *arena, size_t size, szind_t binind) +arena_run_alloc_small_helper(tsdn_t *tsdn, arena_t *arena, size_t size, + szind_t binind) { arena_run_t *run = arena_run_first_best_fit(arena, size); if (run != NULL) { - if (arena_run_split_small(arena, iealloc(run), run, size, + if (arena_run_split_small(arena, iealloc(tsdn, run), run, size, binind)) run = NULL; } @@ -1103,7 +1108,7 @@ arena_run_alloc_small(tsdn_t *tsdn, arena_t *arena, size_t size, szind_t binind) assert(binind != BININD_INVALID); /* Search the arena's chunks for the lowest best fit. */ - run = arena_run_alloc_small_helper(arena, size, binind); + run = arena_run_alloc_small_helper(tsdn, arena, size, binind); if (run != NULL) return (run); @@ -1113,7 +1118,7 @@ arena_run_alloc_small(tsdn_t *tsdn, arena_t *arena, size_t size, szind_t binind) chunk = arena_chunk_alloc(tsdn, arena); if (chunk != NULL) { run = &arena_miscelm_get_mutable(chunk, map_bias)->run; - if (arena_run_split_small(arena, iealloc(run), run, size, + if (arena_run_split_small(arena, iealloc(tsdn, run), run, size, binind)) run = NULL; return (run); @@ -1124,7 +1129,7 @@ arena_run_alloc_small(tsdn_t *tsdn, arena_t *arena, size_t size, szind_t binind) * sufficient memory available while this one dropped arena->lock in * arena_chunk_alloc(), so search one more time. */ - return (arena_run_alloc_small_helper(arena, size, binind)); + return (arena_run_alloc_small_helper(tsdn, arena, size, binind)); } static bool @@ -1426,7 +1431,7 @@ arena_maybe_purge(tsdn_t *tsdn, arena_t *arena) } static size_t -arena_dirty_count(arena_t *arena) +arena_dirty_count(tsdn_t *tsdn, arena_t *arena) { size_t ndirty = 0; arena_runs_dirty_link_t *rdelm; @@ -1441,7 +1446,7 @@ arena_dirty_count(arena_t *arena) npages = extent_size_get(chunkselm) >> LG_PAGE; chunkselm = qr_next(chunkselm, cc_link); } else { - extent_t *extent = iealloc(rdelm); + extent_t *extent = iealloc(tsdn, rdelm); arena_chunk_t *chunk = (arena_chunk_t *)extent_addr_get(extent); arena_chunk_map_misc_t *miscelm = @@ -1504,7 +1509,7 @@ arena_stash_dirty(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks, LG_PAGE)); chunkselm = chunkselm_next; } else { - extent_t *extent = iealloc(rdelm); + extent_t *extent = iealloc(tsdn, rdelm); arena_chunk_t *chunk = (arena_chunk_t *)extent_addr_get(extent); arena_chunk_map_misc_t *miscelm = @@ -1586,7 +1591,7 @@ arena_purge_stashed(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks, } else { size_t pageind, run_size, flag_unzeroed, flags, i; bool decommitted; - extent_t *extent = iealloc(rdelm); + extent_t *extent = iealloc(tsdn, rdelm); arena_chunk_t *chunk = (arena_chunk_t *)extent_addr_get(extent); arena_chunk_map_misc_t *miscelm = @@ -1671,7 +1676,7 @@ arena_unstash_purged(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks, chunk_dalloc_wrapper(tsdn, arena, chunk_hooks, addr, size, zeroed, committed); } else { - extent_t *extent = iealloc(rdelm); + extent_t *extent = iealloc(tsdn, rdelm); arena_chunk_t *chunk = (arena_chunk_t *)extent_addr_get(extent); arena_chunk_map_misc_t *miscelm = @@ -1711,7 +1716,7 @@ arena_purge_to_limit(tsdn_t *tsdn, arena_t *arena, size_t ndirty_limit) * because overhead grows nonlinearly as memory usage increases. */ if (false && config_debug) { - size_t ndirty = arena_dirty_count(arena); + size_t ndirty = arena_dirty_count(tsdn, arena); assert(ndirty == arena->ndirty); } assert(opt_purge != purge_mode_ratio || (arena->nactive >> @@ -2276,7 +2281,7 @@ arena_bin_malloc_hard(tsdn_t *tsdn, arena_t *arena, arena_bin_t *bin) * arena_bin_lower_run() must be called, as if a region * were just deallocated from the run. */ - extent = iealloc(run); + extent = iealloc(tsdn, run); chunk = (arena_chunk_t *)extent_addr_get(extent); if (run->nfree == bin_info->nregs) { arena_dalloc_bin_run(tsdn, arena, chunk, extent, @@ -2537,7 +2542,7 @@ arena_palloc_large(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment, malloc_mutex_unlock(tsdn, &arena->lock); return (NULL); } - extent = iealloc(run); + extent = iealloc(tsdn, run); chunk = (arena_chunk_t *)extent_addr_get(extent); miscelm = arena_run_to_miscelm(run); rpages = arena_miscelm_to_rpages(miscelm); @@ -2555,7 +2560,7 @@ arena_palloc_large(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment, arena_miscelm_to_pageind(head_miscelm) + (leadsize >> LG_PAGE)); run = &miscelm->run; - extent = iealloc(run); + extent = iealloc(tsdn, run); arena_run_trim_head(tsdn, arena, chunk, head_extent, head_run, alloc_size, alloc_size - leadsize); @@ -2745,7 +2750,7 @@ arena_dalloc_bin_locked_impl(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk, if (!junked && config_fill && unlikely(opt_junk_free)) arena_dalloc_junk_small(ptr, bin_info); - arena_run_reg_dalloc(run, extent, ptr); + arena_run_reg_dalloc(tsdn, run, extent, ptr); if (run->nfree == bin_info->nregs) { arena_dissociate_bin_run(extent, run, bin); arena_dalloc_bin_run(tsdn, arena, chunk, extent, run, bin); @@ -2793,8 +2798,8 @@ arena_dalloc_small(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk, if (config_debug) { /* arena_ptr_small_binind_get() does extra sanity checking. */ - assert(arena_ptr_small_binind_get(ptr, arena_mapbits_get(chunk, - pageind)) != BININD_INVALID); + assert(arena_ptr_small_binind_get(tsdn, ptr, + arena_mapbits_get(chunk, pageind)) != BININD_INVALID); } bitselm = arena_bitselm_get_mutable(chunk, pageind); arena_dalloc_bin(tsdn, arena, chunk, extent, ptr, pageind, bitselm); @@ -2939,8 +2944,8 @@ arena_ralloc_large_grow(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk, goto label_fail; run = &arena_miscelm_get_mutable(chunk, pageind+npages)->run; - if (arena_run_split_large(arena, iealloc(run), run, splitsize, - zero)) + if (arena_run_split_large(arena, iealloc(tsdn, run), run, + splitsize, zero)) goto label_fail; if (config_cache_oblivious && zero) { diff --git a/src/chunk.c b/src/chunk.c index e35bb30a..4443368a 100644 --- a/src/chunk.c +++ b/src/chunk.c @@ -146,8 +146,9 @@ chunk_register(tsdn_t *tsdn, const void *chunk, const extent_t *extent) assert(extent_addr_get(extent) == chunk); - if (rtree_write(&chunks_rtree, (uintptr_t)chunk, extent)) + if (rtree_write(tsdn, &chunks_rtree, (uintptr_t)chunk, extent)) return (true); + if (config_prof && opt_prof) { size_t size = extent_size_get(extent); size_t nadd = (size == 0) ? 1 : size / chunksize; @@ -168,10 +169,10 @@ chunk_register(tsdn_t *tsdn, const void *chunk, const extent_t *extent) } void -chunk_deregister(const void *chunk, const extent_t *extent) +chunk_deregister(tsdn_t *tsdn, const void *chunk, const extent_t *extent) { - rtree_clear(&chunks_rtree, (uintptr_t)chunk); + rtree_clear(tsdn, &chunks_rtree, (uintptr_t)chunk); if (config_prof && opt_prof) { size_t size = extent_size_get(extent); size_t nsub = (size == 0) ? 1 : size / chunksize; @@ -691,14 +692,6 @@ chunk_merge_default(void *chunk_a, size_t size_a, void *chunk_b, size_t size_b, return (false); } -static rtree_elm_t * -chunks_rtree_node_alloc(size_t nelms) -{ - - return ((rtree_elm_t *)base_alloc(tsdn_fetch(), nelms * - sizeof(rtree_elm_t))); -} - bool chunk_boot(void) { @@ -735,7 +728,7 @@ chunk_boot(void) if (have_dss && chunk_dss_boot()) return (true); if (rtree_new(&chunks_rtree, (unsigned)((ZU(1) << (LG_SIZEOF_PTR+3)) - - opt_lg_chunk), chunks_rtree_node_alloc, NULL)) + opt_lg_chunk))) return (true); return (false); diff --git a/src/ckh.c b/src/ckh.c index 3135ee74..2c120ac8 100644 --- a/src/ckh.c +++ b/src/ckh.c @@ -283,12 +283,14 @@ ckh_grow(tsdn_t *tsdn, ckh_t *ckh) ckh->lg_curbuckets = lg_curcells - LG_CKH_BUCKET_CELLS; if (!ckh_rebuild(ckh, tab)) { - idalloctm(tsdn, iealloc(tab), tab, NULL, true, true); + idalloctm(tsdn, iealloc(tsdn, tab), tab, NULL, true, + true); break; } /* Rebuilding failed, so back out partially rebuilt table. */ - idalloctm(tsdn, iealloc(ckh->tab), ckh->tab, NULL, true, true); + idalloctm(tsdn, iealloc(tsdn, ckh->tab), ckh->tab, NULL, true, + true); ckh->tab = tab; ckh->lg_curbuckets = lg_prevbuckets; } @@ -330,7 +332,7 @@ ckh_shrink(tsdn_t *tsdn, ckh_t *ckh) ckh->lg_curbuckets = lg_curcells - LG_CKH_BUCKET_CELLS; if (!ckh_rebuild(ckh, tab)) { - idalloctm(tsdn, iealloc(tab), tab, NULL, true, true); + idalloctm(tsdn, iealloc(tsdn, tab), tab, NULL, true, true); #ifdef CKH_COUNT ckh->nshrinks++; #endif @@ -338,7 +340,7 @@ ckh_shrink(tsdn_t *tsdn, ckh_t *ckh) } /* Rebuilding failed, so back out partially rebuilt table. */ - idalloctm(tsdn, iealloc(ckh->tab), ckh->tab, NULL, true, true); + idalloctm(tsdn, iealloc(tsdn, ckh->tab), ckh->tab, NULL, true, true); ckh->tab = tab; ckh->lg_curbuckets = lg_prevbuckets; #ifdef CKH_COUNT @@ -421,7 +423,7 @@ ckh_delete(tsdn_t *tsdn, ckh_t *ckh) (unsigned long long)ckh->nrelocs); #endif - idalloctm(tsdn, iealloc(ckh->tab), ckh->tab, NULL, true, true); + idalloctm(tsdn, iealloc(tsdn, ckh->tab), ckh->tab, NULL, true, true); if (config_debug) memset(ckh, JEMALLOC_FREE_JUNK, sizeof(ckh_t)); } diff --git a/src/huge.c b/src/huge.c index e42ea9c1..0b91c369 100644 --- a/src/huge.c +++ b/src/huge.c @@ -45,7 +45,8 @@ huge_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment, arena = arena_choose(tsdn_tsd(tsdn), arena); if (unlikely(arena == NULL) || (ret = arena_chunk_alloc_huge(tsdn, arena, usize, alignment, &is_zeroed)) == NULL) { - idalloctm(tsdn, iealloc(extent), extent, NULL, true, true); + idalloctm(tsdn, iealloc(tsdn, extent), extent, NULL, true, + true); return (NULL); } @@ -53,7 +54,8 @@ huge_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment, if (chunk_register(tsdn, ret, extent)) { arena_chunk_dalloc_huge(tsdn, arena, ret, usize); - idalloctm(tsdn, iealloc(extent), extent, NULL, true, true); + idalloctm(tsdn, iealloc(tsdn, extent), extent, NULL, true, + true); return (NULL); } @@ -194,7 +196,7 @@ huge_ralloc_no_move_shrink(tsdn_t *tsdn, extent_t *extent, void *ptr, post_zeroed = pre_zeroed; /* Update the size of the huge allocation. */ - chunk_deregister(ptr, extent); + chunk_deregister(tsdn, ptr, extent); malloc_mutex_lock(tsdn, &arena->huge_mtx); extent_size_set(extent, usize); /* Update zeroed. */ @@ -231,7 +233,7 @@ huge_ralloc_no_move_expand(tsdn_t *tsdn, extent_t *extent, void *ptr, return (true); /* Update the size of the huge allocation. */ - chunk_deregister(ptr, extent); + chunk_deregister(tsdn, ptr, extent); malloc_mutex_lock(tsdn, &arena->huge_mtx); extent_size_set(extent, usize); malloc_mutex_unlock(tsdn, &arena->huge_mtx); @@ -353,7 +355,7 @@ huge_dalloc(tsdn_t *tsdn, extent_t *extent, void *ptr) arena_t *arena; arena = extent_arena_get(extent); - chunk_deregister(ptr, extent); + chunk_deregister(tsdn, ptr, extent); malloc_mutex_lock(tsdn, &arena->huge_mtx); ql_remove(&arena->huge, extent, ql_link); malloc_mutex_unlock(tsdn, &arena->huge_mtx); @@ -362,7 +364,7 @@ huge_dalloc(tsdn_t *tsdn, extent_t *extent, void *ptr) extent_size_get(extent)); arena_chunk_dalloc_huge(tsdn, extent_arena_get(extent), extent_addr_get(extent), extent_size_get(extent)); - idalloctm(tsdn, iealloc(extent), extent, NULL, true, true); + idalloctm(tsdn, iealloc(tsdn, extent), extent, NULL, true, true); arena_decay_tick(tsdn, arena); } @@ -387,7 +389,7 @@ huge_prof_tctx_get(tsdn_t *tsdn, const extent_t *extent, const void *ptr) prof_tctx_t *tctx; arena_t *arena; - assert(extent == iealloc(ptr)); + assert(extent == iealloc(tsdn, ptr)); arena = extent_arena_get(extent); malloc_mutex_lock(tsdn, &arena->huge_mtx); @@ -403,7 +405,7 @@ huge_prof_tctx_set(tsdn_t *tsdn, extent_t *extent, const void *ptr, { arena_t *arena; - assert(extent == iealloc(ptr)); + assert(extent == iealloc(tsdn, ptr)); arena = extent_arena_get(extent); malloc_mutex_lock(tsdn, &arena->huge_mtx); diff --git a/src/jemalloc.c b/src/jemalloc.c index 67a3b564..479d8319 100644 --- a/src/jemalloc.c +++ b/src/jemalloc.c @@ -325,7 +325,7 @@ void a0dalloc(void *ptr) { - a0idalloc(iealloc(ptr), ptr, true); + a0idalloc(iealloc(NULL, ptr), ptr, true); } /* @@ -365,7 +365,7 @@ bootstrap_free(void *ptr) if (unlikely(ptr == NULL)) return; - a0idalloc(iealloc(ptr), ptr, false); + a0idalloc(iealloc(NULL, ptr), ptr, false); } static void @@ -1401,7 +1401,8 @@ ialloc_prof_sample(tsd_t *tsd, size_t usize, szind_t ind, bool zero, p = ialloc(tsd, LARGE_MINCLASS, ind_large, zero, slow_path); if (p == NULL) return (NULL); - arena_prof_promoted(tsd_tsdn(tsd), iealloc(p), p, usize); + arena_prof_promoted(tsd_tsdn(tsd), iealloc(tsd_tsdn(tsd), p), p, + usize); } else p = ialloc(tsd, usize, ind, zero, slow_path); @@ -1423,7 +1424,7 @@ ialloc_prof(tsd_t *tsd, size_t usize, szind_t ind, bool zero, bool slow_path) prof_alloc_rollback(tsd, tctx, true); return (NULL); } - prof_malloc(tsd_tsdn(tsd), iealloc(p), p, usize, tctx); + prof_malloc(tsd_tsdn(tsd), iealloc(tsd_tsdn(tsd), p), p, usize, tctx); return (p); } @@ -1482,7 +1483,8 @@ ialloc_post_check(void *ret, tsdn_t *tsdn, size_t usize, const char *func, set_errno(ENOMEM); } if (config_stats && likely(ret != NULL)) { - assert(usize == isalloc(tsdn, iealloc(ret), ret, config_prof)); + assert(usize == isalloc(tsdn, iealloc(tsdn, ret), ret, + config_prof)); *tsd_thread_allocatedp_get(tsdn_tsd(tsdn)) += usize; } witness_assert_lockless(tsdn); @@ -1525,7 +1527,8 @@ imemalign_prof_sample(tsd_t *tsd, size_t alignment, size_t usize, p = ipalloc(tsd, LARGE_MINCLASS, alignment, false); if (p == NULL) return (NULL); - arena_prof_promoted(tsd_tsdn(tsd), iealloc(p), p, usize); + arena_prof_promoted(tsd_tsdn(tsd), iealloc(tsd_tsdn(tsd), p), p, + usize); } else p = ipalloc(tsd, usize, alignment, false); @@ -1547,7 +1550,7 @@ imemalign_prof(tsd_t *tsd, size_t alignment, size_t usize) prof_alloc_rollback(tsd, tctx, true); return (NULL); } - prof_malloc(tsd_tsdn(tsd), iealloc(p), p, usize, tctx); + prof_malloc(tsd_tsdn(tsd), iealloc(tsd_tsdn(tsd), p), p, usize, tctx); return (p); } @@ -1604,8 +1607,8 @@ imemalign(void **memptr, size_t alignment, size_t size, size_t min_alignment) ret = 0; label_return: if (config_stats && likely(result != NULL)) { - assert(usize == isalloc(tsd_tsdn(tsd), iealloc(result), result, - config_prof)); + assert(usize == isalloc(tsd_tsdn(tsd), iealloc(tsd_tsdn(tsd), + result), result, config_prof)); *tsd_thread_allocatedp_get(tsd) += usize; } UTRACE(0, size, result); @@ -1696,7 +1699,8 @@ irealloc_prof_sample(tsd_t *tsd, extent_t *extent, void *old_ptr, false); if (p == NULL) return (NULL); - arena_prof_promoted(tsd_tsdn(tsd), iealloc(p), p, usize); + arena_prof_promoted(tsd_tsdn(tsd), iealloc(tsd_tsdn(tsd), p), p, + usize); } else p = iralloc(tsd, extent, old_ptr, old_usize, usize, 0, false); @@ -1724,7 +1728,7 @@ irealloc_prof(tsd_t *tsd, extent_t *extent, void *old_ptr, size_t old_usize, prof_alloc_rollback(tsd, tctx, true); return (NULL); } - e = (p == old_ptr) ? extent : iealloc(p); + e = (p == old_ptr) ? extent : iealloc(tsd_tsdn(tsd), p); prof_realloc(tsd, e, p, usize, tctx, prof_active, true, old_ptr, old_usize, old_tctx); @@ -1742,7 +1746,7 @@ ifree(tsd_t *tsd, void *ptr, tcache_t *tcache, bool slow_path) assert(ptr != NULL); assert(malloc_initialized() || IS_INITIALIZER); - extent = iealloc(ptr); + extent = iealloc(tsd_tsdn(tsd), ptr); if (config_prof && opt_prof) { usize = isalloc(tsd_tsdn(tsd), extent, ptr, config_prof); prof_free(tsd, extent, ptr, usize); @@ -1810,9 +1814,8 @@ je_realloc(void *ptr, size_t size) witness_assert_lockless(tsd_tsdn(tsd)); - extent = iealloc(ptr); + extent = iealloc(tsd_tsdn(tsd), ptr); old_usize = isalloc(tsd_tsdn(tsd), extent, ptr, config_prof); - if (config_prof && opt_prof) { usize = s2u(size); ret = unlikely(usize == 0 || usize > HUGE_MAXCLASS) ? @@ -1845,7 +1848,8 @@ je_realloc(void *ptr, size_t size) if (config_stats && likely(ret != NULL)) { tsd_t *tsd; - assert(usize == isalloc(tsdn, iealloc(ret), ret, config_prof)); + assert(usize == isalloc(tsdn, iealloc(tsdn, ret), ret, + config_prof)); tsd = tsdn_tsd(tsdn); *tsd_thread_allocatedp_get(tsd) += usize; *tsd_thread_deallocatedp_get(tsd) += old_usize; @@ -1999,7 +2003,7 @@ imallocx_prof_sample(tsdn_t *tsdn, size_t usize, size_t alignment, bool zero, tcache, arena, slow_path); if (p == NULL) return (NULL); - arena_prof_promoted(tsdn, iealloc(p), p, usize); + arena_prof_promoted(tsdn, iealloc(tsdn, p), p, usize); } else p = imallocx_flags(tsdn, usize, alignment, zero, tcache, arena, slow_path); @@ -2033,7 +2037,7 @@ imallocx_prof(tsd_t *tsd, size_t size, int flags, size_t *usize, bool slow_path) prof_alloc_rollback(tsd, tctx, true); return (NULL); } - prof_malloc(tsd_tsdn(tsd), iealloc(p), p, *usize, tctx); + prof_malloc(tsd_tsdn(tsd), iealloc(tsd_tsdn(tsd), p), p, *usize, tctx); assert(alignment == 0 || ((uintptr_t)p & (alignment - 1)) == ZU(0)); return (p); @@ -2134,7 +2138,7 @@ irallocx_prof_sample(tsdn_t *tsdn, extent_t *extent, void *old_ptr, alignment, zero, tcache, arena); if (p == NULL) return (NULL); - arena_prof_promoted(tsdn, iealloc(p), p, usize); + arena_prof_promoted(tsdn, iealloc(tsdn, p), p, usize); } else { p = iralloct(tsdn, extent, old_ptr, old_usize, usize, alignment, zero, tcache, arena); @@ -2180,7 +2184,7 @@ irallocx_prof(tsd_t *tsd, extent_t *extent, void *old_ptr, size_t old_usize, e = extent; *usize = isalloc(tsd_tsdn(tsd), e, p, config_prof); } else - e = iealloc(p); + e = iealloc(tsd_tsdn(tsd), p); prof_realloc(tsd, e, p, *usize, tctx, prof_active, true, old_ptr, old_usize, old_tctx); @@ -2207,7 +2211,7 @@ je_rallocx(void *ptr, size_t size, int flags) assert(malloc_initialized() || IS_INITIALIZER); tsd = tsd_fetch(); witness_assert_lockless(tsd_tsdn(tsd)); - extent = iealloc(ptr); + extent = iealloc(tsd_tsdn(tsd), ptr); if (unlikely((flags & MALLOCX_ARENA_MASK) != 0)) { unsigned arena_ind = MALLOCX_ARENA_GET(flags); @@ -2241,8 +2245,8 @@ je_rallocx(void *ptr, size_t size, int flags) if (unlikely(p == NULL)) goto label_oom; if (config_stats) { - usize = isalloc(tsd_tsdn(tsd), iealloc(p), p, - config_prof); + usize = isalloc(tsd_tsdn(tsd), iealloc(tsd_tsdn(tsd), + p), p, config_prof); } } assert(alignment == 0 || ((uintptr_t)p & (alignment - 1)) == ZU(0)); @@ -2357,7 +2361,7 @@ je_xallocx(void *ptr, size_t size, size_t extra, int flags) assert(malloc_initialized() || IS_INITIALIZER); tsd = tsd_fetch(); witness_assert_lockless(tsd_tsdn(tsd)); - extent = iealloc(ptr); + extent = iealloc(tsd_tsdn(tsd), ptr); old_usize = isalloc(tsd_tsdn(tsd), extent, ptr, config_prof); @@ -2412,7 +2416,7 @@ je_sallocx(const void *ptr, int flags) if (config_ivsalloc) usize = ivsalloc(tsdn, ptr, config_prof); else - usize = isalloc(tsdn, iealloc(ptr), ptr, config_prof); + usize = isalloc(tsdn, iealloc(tsdn, ptr), ptr, config_prof); witness_assert_lockless(tsdn); return (usize); @@ -2471,7 +2475,7 @@ je_sdallocx(void *ptr, size_t size, int flags) assert(ptr != NULL); assert(malloc_initialized() || IS_INITIALIZER); tsd = tsd_fetch(); - extent = iealloc(ptr); + extent = iealloc(tsd_tsdn(tsd), ptr); usize = inallocx(tsd_tsdn(tsd), size, flags); assert(usize == isalloc(tsd_tsdn(tsd), extent, ptr, config_prof)); @@ -2591,7 +2595,7 @@ je_malloc_usable_size(JEMALLOC_USABLE_SIZE_CONST void *ptr) if (config_ivsalloc) ret = ivsalloc(tsdn, ptr, config_prof); else { - ret = (ptr == NULL) ? 0 : isalloc(tsdn, iealloc(ptr), ptr, + ret = (ptr == NULL) ? 0 : isalloc(tsdn, iealloc(tsdn, ptr), ptr, config_prof); } diff --git a/src/prof.c b/src/prof.c index 121dcd91..03979ca3 100644 --- a/src/prof.c +++ b/src/prof.c @@ -596,7 +596,8 @@ prof_gctx_try_destroy(tsd_t *tsd, prof_tdata_t *tdata_self, prof_gctx_t *gctx, prof_leave(tsd, tdata_self); /* Destroy gctx. */ malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock); - idalloctm(tsd_tsdn(tsd), iealloc(gctx), gctx, NULL, true, true); + idalloctm(tsd_tsdn(tsd), iealloc(tsd_tsdn(tsd), gctx), gctx, + NULL, true, true); } else { /* * Compensate for increment in prof_tctx_destroy() or @@ -707,7 +708,8 @@ prof_tctx_destroy(tsd_t *tsd, prof_tctx_t *tctx) prof_tdata_destroy(tsd_tsdn(tsd), tdata, false); if (destroy_tctx) - idalloctm(tsd_tsdn(tsd), iealloc(tctx), tctx, NULL, true, true); + idalloctm(tsd_tsdn(tsd), iealloc(tsd_tsdn(tsd), tctx), tctx, + NULL, true, true); } static bool @@ -736,8 +738,8 @@ prof_lookup_global(tsd_t *tsd, prof_bt_t *bt, prof_tdata_t *tdata, if (ckh_insert(tsd_tsdn(tsd), &bt2gctx, btkey.v, gctx.v)) { /* OOM. */ prof_leave(tsd, tdata); - idalloctm(tsd_tsdn(tsd), iealloc(gctx.v), gctx.v, NULL, - true, true); + idalloctm(tsd_tsdn(tsd), iealloc(tsd_tsdn(tsd), gctx.v), + gctx.v, NULL, true, true); return (true); } new_gctx = true; @@ -817,8 +819,8 @@ prof_lookup(tsd_t *tsd, prof_bt_t *bt) if (error) { if (new_gctx) prof_gctx_try_destroy(tsd, tdata, gctx, tdata); - idalloctm(tsd_tsdn(tsd), iealloc(ret.v), ret.v, NULL, - true, true); + idalloctm(tsd_tsdn(tsd), iealloc(tsd_tsdn(tsd), ret.v), + ret.v, NULL, true, true); return (NULL); } malloc_mutex_lock(tsd_tsdn(tsd), gctx->lock); @@ -1241,8 +1243,8 @@ prof_gctx_finish(tsd_t *tsd, prof_gctx_tree_t *gctxs) tctx_tree_remove(&gctx->tctxs, to_destroy); idalloctm(tsd_tsdn(tsd), - iealloc(to_destroy), to_destroy, - NULL, true, true); + iealloc(tsd_tsdn(tsd), to_destroy), + to_destroy, NULL, true, true); } else next = NULL; } while (next != NULL); @@ -1818,7 +1820,7 @@ prof_tdata_init_impl(tsdn_t *tsdn, uint64_t thr_uid, uint64_t thr_discrim, if (ckh_new(tsdn, &tdata->bt2tctx, PROF_CKH_MINITEMS, prof_bt_hash, prof_bt_keycomp)) { - idalloctm(tsdn, iealloc(tdata), tdata, NULL, true, true); + idalloctm(tsdn, iealloc(tsdn, tdata), tdata, NULL, true, true); return (NULL); } @@ -1882,11 +1884,11 @@ prof_tdata_destroy_locked(tsdn_t *tsdn, prof_tdata_t *tdata, assert(prof_tdata_should_destroy_unlocked(tdata, even_if_attached)); if (tdata->thread_name != NULL) { - idalloctm(tsdn, iealloc(tdata->thread_name), tdata->thread_name, - NULL, true, true); + idalloctm(tsdn, iealloc(tsdn, tdata->thread_name), + tdata->thread_name, NULL, true, true); } ckh_delete(tsdn, &tdata->bt2tctx); - idalloctm(tsdn, iealloc(tdata), tdata, NULL, true, true); + idalloctm(tsdn, iealloc(tsdn, tdata), tdata, NULL, true, true); } static void @@ -2080,8 +2082,8 @@ prof_thread_name_set(tsd_t *tsd, const char *thread_name) return (EAGAIN); if (tdata->thread_name != NULL) { - idalloctm(tsd_tsdn(tsd), iealloc(tdata->thread_name), - tdata->thread_name, NULL, true, true); + idalloctm(tsd_tsdn(tsd), iealloc(tsd_tsdn(tsd), + tdata->thread_name), tdata->thread_name, NULL, true, true); tdata->thread_name = NULL; } if (strlen(s) > 0) diff --git a/src/rtree.c b/src/rtree.c index 71c69c41..c6b64cf4 100644 --- a/src/rtree.c +++ b/src/rtree.c @@ -13,8 +13,7 @@ hmin(unsigned ha, unsigned hb) * used. */ bool -rtree_new(rtree_t *rtree, unsigned bits, rtree_node_alloc_t *alloc, - rtree_node_dalloc_t *dalloc) +rtree_new(rtree_t *rtree, unsigned bits) { unsigned bits_in_leaf, height, i; @@ -32,8 +31,6 @@ rtree_new(rtree_t *rtree, unsigned bits, rtree_node_alloc_t *alloc, height = 1; assert((height-1) * RTREE_BITS_PER_LEVEL + bits_in_leaf == bits); - rtree->alloc = alloc; - rtree->dalloc = dalloc; rtree->height = height; /* Root level. */ @@ -64,8 +61,43 @@ rtree_new(rtree_t *rtree, unsigned bits, rtree_node_alloc_t *alloc, return (false); } +#ifdef JEMALLOC_JET +#undef rtree_node_alloc +#define rtree_node_alloc JEMALLOC_N(rtree_node_alloc_impl) +#endif +static rtree_elm_t * +rtree_node_alloc(tsdn_t *tsdn, rtree_t *rtree, size_t nelms) +{ + + return ((rtree_elm_t *)base_alloc(tsdn, nelms * sizeof(rtree_elm_t))); +} +#ifdef JEMALLOC_JET +#undef rtree_node_alloc +#define rtree_node_alloc JEMALLOC_N(rtree_node_alloc) +rtree_node_alloc_t *rtree_node_alloc = JEMALLOC_N(rtree_node_alloc_impl); +#endif + +#ifdef JEMALLOC_JET +#undef rtree_node_dalloc +#define rtree_node_dalloc JEMALLOC_N(rtree_node_dalloc_impl) +#endif +UNUSED static void +rtree_node_dalloc(tsdn_t *tsdn, rtree_t *rtree, rtree_elm_t *node) +{ + + /* Nodes are never deleted during normal operation. */ + not_reached(); +} +#ifdef JEMALLOC_JET +#undef rtree_node_dalloc +#define rtree_node_dalloc JEMALLOC_N(rtree_node_dalloc) +rtree_node_dalloc_t *rtree_node_dalloc = JEMALLOC_N(rtree_node_dalloc_impl); +#endif + +#ifdef JEMALLOC_JET static void -rtree_delete_subtree(rtree_t *rtree, rtree_elm_t *node, unsigned level) +rtree_delete_subtree(tsdn_t *tsdn, rtree_t *rtree, rtree_elm_t *node, + unsigned level) { if (level + 1 < rtree->height) { @@ -74,27 +106,31 @@ rtree_delete_subtree(rtree_t *rtree, rtree_elm_t *node, unsigned level) nchildren = ZU(1) << rtree->levels[level].bits; for (i = 0; i < nchildren; i++) { rtree_elm_t *child = node[i].child; - if (child != NULL) - rtree_delete_subtree(rtree, child, level + 1); + if (child != NULL) { + rtree_delete_subtree(tsdn, rtree, child, level + + 1); + } } } - rtree->dalloc(node); + rtree_node_dalloc(tsdn, rtree, node); } void -rtree_delete(rtree_t *rtree) +rtree_delete(tsdn_t *tsdn, rtree_t *rtree) { unsigned i; for (i = 0; i < rtree->height; i++) { rtree_elm_t *subtree = rtree->levels[i].subtree; if (subtree != NULL) - rtree_delete_subtree(rtree, subtree, i); + rtree_delete_subtree(tsdn, rtree, subtree, i); } } +#endif static rtree_elm_t * -rtree_node_init(rtree_t *rtree, unsigned level, rtree_elm_t **elmp) +rtree_node_init(tsdn_t *tsdn, rtree_t *rtree, unsigned level, + rtree_elm_t **elmp) { rtree_elm_t *node; @@ -108,7 +144,8 @@ rtree_node_init(rtree_t *rtree, unsigned level, rtree_elm_t **elmp) node = atomic_read_p((void **)elmp); } while (node == RTREE_NODE_INITIALIZING); } else { - node = rtree->alloc(ZU(1) << rtree->levels[level].bits); + node = rtree_node_alloc(tsdn, rtree, ZU(1) << + rtree->levels[level].bits); if (node == NULL) return (NULL); atomic_write_p((void **)elmp, node); @@ -118,15 +155,17 @@ rtree_node_init(rtree_t *rtree, unsigned level, rtree_elm_t **elmp) } rtree_elm_t * -rtree_subtree_read_hard(rtree_t *rtree, unsigned level) +rtree_subtree_read_hard(tsdn_t *tsdn, rtree_t *rtree, unsigned level) { - return (rtree_node_init(rtree, level, &rtree->levels[level].subtree)); + return (rtree_node_init(tsdn, rtree, level, + &rtree->levels[level].subtree)); } rtree_elm_t * -rtree_child_read_hard(rtree_t *rtree, rtree_elm_t *elm, unsigned level) +rtree_child_read_hard(tsdn_t *tsdn, rtree_t *rtree, rtree_elm_t *elm, + unsigned level) { - return (rtree_node_init(rtree, level, &elm->child)); + return (rtree_node_init(tsdn, rtree, level, &elm->child)); } diff --git a/src/tcache.c b/src/tcache.c index c02f0f0c..8bd8df01 100644 --- a/src/tcache.c +++ b/src/tcache.c @@ -27,7 +27,7 @@ size_t tcache_salloc(tsdn_t *tsdn, const void *ptr) { - return (arena_salloc(tsdn, iealloc(ptr), ptr, false)); + return (arena_salloc(tsdn, iealloc(tsdn, ptr), ptr, false)); } void @@ -101,7 +101,7 @@ tcache_bin_flush_small(tsd_t *tsd, tcache_t *tcache, tcache_bin_t *tbin, assert(arena != NULL); for (nflush = tbin->ncached - rem; nflush > 0; nflush = ndeferred) { /* Lock the arena bin associated with the first object. */ - extent_t *extent = iealloc(*(tbin->avail - 1)); + extent_t *extent = iealloc(tsd_tsdn(tsd), *(tbin->avail - 1)); arena_t *bin_arena = extent_arena_get(extent); arena_bin_t *bin = &bin_arena->bins[binind]; @@ -125,7 +125,7 @@ tcache_bin_flush_small(tsd_t *tsd, tcache_t *tcache, tcache_bin_t *tbin, ptr = *(tbin->avail - 1 - i); assert(ptr != NULL); - extent = iealloc(ptr); + extent = iealloc(tsd_tsdn(tsd), ptr); if (extent_arena_get(extent) == bin_arena) { arena_chunk_t *chunk = (arena_chunk_t *)extent_addr_get(extent); @@ -185,7 +185,7 @@ tcache_bin_flush_large(tsd_t *tsd, tcache_bin_t *tbin, szind_t binind, assert(arena != NULL); for (nflush = tbin->ncached - rem; nflush > 0; nflush = ndeferred) { /* Lock the arena associated with the first object. */ - extent_t *extent = iealloc(*(tbin->avail - 1)); + extent_t *extent = iealloc(tsd_tsdn(tsd), *(tbin->avail - 1)); arena_t *locked_arena = extent_arena_get(extent); UNUSED bool idump; @@ -211,7 +211,7 @@ tcache_bin_flush_large(tsd_t *tsd, tcache_bin_t *tbin, szind_t binind, for (i = 0; i < nflush; i++) { ptr = *(tbin->avail - 1 - i); assert(ptr != NULL); - extent = iealloc(ptr); + extent = iealloc(tsd_tsdn(tsd), ptr); if (extent_arena_get(extent) == locked_arena) { arena_chunk_t *chunk = (arena_chunk_t *)extent_addr_get(extent); @@ -394,7 +394,8 @@ tcache_destroy(tsd_t *tsd, tcache_t *tcache) arena_prof_accum(tsd_tsdn(tsd), arena, tcache->prof_accumbytes)) prof_idump(tsd_tsdn(tsd)); - idalloctm(tsd_tsdn(tsd), iealloc(tcache), tcache, NULL, true, true); + idalloctm(tsd_tsdn(tsd), iealloc(tsd_tsdn(tsd), tcache), tcache, NULL, + true, true); } void diff --git a/test/unit/rtree.c b/test/unit/rtree.c index 671e2c8a..9c992e11 100644 --- a/test/unit/rtree.c +++ b/test/unit/rtree.c @@ -1,10 +1,18 @@ #include "test/jemalloc_test.h" +rtree_node_alloc_t *rtree_node_alloc_orig; +rtree_node_dalloc_t *rtree_node_dalloc_orig; + +rtree_t *test_rtree; + static rtree_elm_t * -node_alloc(size_t nelms) +rtree_node_alloc_intercept(tsdn_t *tsdn, rtree_t *rtree, size_t nelms) { rtree_elm_t *node; + if (rtree != test_rtree) + return rtree_node_alloc_orig(tsdn, rtree, nelms); + node = (rtree_elm_t *)calloc(nelms, sizeof(rtree_elm_t)); assert_ptr_not_null(node, "Unexpected calloc() failure"); @@ -12,23 +20,33 @@ node_alloc(size_t nelms) } static void -node_dalloc(rtree_elm_t *node) +rtree_node_dalloc_intercept(tsdn_t *tsdn, rtree_t *rtree, rtree_elm_t *node) { + if (rtree != test_rtree) { + rtree_node_dalloc_orig(tsdn, rtree, node); + return; + } + free(node); } TEST_BEGIN(test_rtree_read_empty) { + tsdn_t *tsdn; unsigned i; + tsdn = tsdn_fetch(); + for (i = 1; i <= (sizeof(uintptr_t) << 3); i++) { rtree_t rtree; - assert_false(rtree_new(&rtree, i, node_alloc, node_dalloc), + test_rtree = &rtree; + assert_false(rtree_new(&rtree, i), "Unexpected rtree_new() failure"); - assert_ptr_null(rtree_read(&rtree, 0, false), + assert_ptr_null(rtree_read(tsdn, &rtree, 0, false), "rtree_read() should return NULL for empty tree"); - rtree_delete(&rtree); + rtree_delete(tsdn, &rtree); + test_rtree = NULL; } } TEST_END @@ -50,30 +68,34 @@ thd_start(void *varg) thd_start_arg_t *arg = (thd_start_arg_t *)varg; sfmt_t *sfmt; extent_t *extent; + tsdn_t *tsdn; unsigned i; sfmt = init_gen_rand(arg->seed); extent = (extent_t *)malloc(sizeof(extent)); assert_ptr_not_null(extent, "Unexpected malloc() failure"); + tsdn = tsdn_fetch(); for (i = 0; i < NITERS; i++) { uintptr_t key = (uintptr_t)gen_rand64(sfmt); if (i % 2 == 0) { rtree_elm_t *elm; - elm = rtree_elm_acquire(&arg->rtree, key, false, true); + elm = rtree_elm_acquire(tsdn, &arg->rtree, key, false, + true); assert_ptr_not_null(elm, "Unexpected rtree_elm_acquire() failure"); rtree_elm_write_acquired(elm, extent); rtree_elm_release(elm); - elm = rtree_elm_acquire(&arg->rtree, key, true, false); + elm = rtree_elm_acquire(tsdn, &arg->rtree, key, true, + false); assert_ptr_not_null(elm, "Unexpected rtree_elm_acquire() failure"); rtree_elm_read_acquired(elm); rtree_elm_release(elm); } else - rtree_read(&arg->rtree, key, false); + rtree_read(tsdn, &arg->rtree, key, false); } free(extent); @@ -86,19 +108,23 @@ TEST_BEGIN(test_rtree_concurrent) thd_start_arg_t arg; thd_t thds[NTHREADS]; sfmt_t *sfmt; + tsdn_t *tsdn; unsigned i, j; sfmt = init_gen_rand(SEED); + tsdn = tsdn_fetch(); for (i = 1; i < MAX_NBITS; i++) { arg.nbits = i; - assert_false(rtree_new(&arg.rtree, arg.nbits, node_alloc, - node_dalloc), "Unexpected rtree_new() failure"); + test_rtree = &arg.rtree; + assert_false(rtree_new(&arg.rtree, arg.nbits), + "Unexpected rtree_new() failure"); arg.seed = gen_rand32(sfmt); for (j = 0; j < NTHREADS; j++) thd_create(&thds[j], thd_start, (void *)&arg); for (j = 0; j < NTHREADS; j++) thd_join(thds[j], NULL); - rtree_delete(&arg.rtree); + rtree_delete(tsdn, &arg.rtree); + test_rtree = NULL; } fini_gen_rand(sfmt); } @@ -113,60 +139,70 @@ TEST_BEGIN(test_rtree_extrema) { unsigned i; extent_t extent_a, extent_b; + tsdn_t *tsdn; + + tsdn = tsdn_fetch(); for (i = 1; i <= (sizeof(uintptr_t) << 3); i++) { rtree_t rtree; - assert_false(rtree_new(&rtree, i, node_alloc, node_dalloc), + test_rtree = &rtree; + assert_false(rtree_new(&rtree, i), "Unexpected rtree_new() failure"); - assert_false(rtree_write(&rtree, 0, &extent_a), + assert_false(rtree_write(tsdn, &rtree, 0, &extent_a), "Unexpected rtree_write() failure, i=%u", i); - assert_ptr_eq(rtree_read(&rtree, 0, true), &extent_a, + assert_ptr_eq(rtree_read(tsdn, &rtree, 0, true), &extent_a, "rtree_read() should return previously set value, i=%u", i); - assert_false(rtree_write(&rtree, ~((uintptr_t)0), &extent_b), - "Unexpected rtree_write() failure, i=%u", i); - assert_ptr_eq(rtree_read(&rtree, ~((uintptr_t)0), true), + assert_false(rtree_write(tsdn, &rtree, ~((uintptr_t)0), + &extent_b), "Unexpected rtree_write() failure, i=%u", i); + assert_ptr_eq(rtree_read(tsdn, &rtree, ~((uintptr_t)0), true), &extent_b, "rtree_read() should return previously set value, i=%u", i); - rtree_delete(&rtree); + rtree_delete(tsdn, &rtree); + test_rtree = NULL; } } TEST_END TEST_BEGIN(test_rtree_bits) { + tsdn_t *tsdn; unsigned i, j, k; + tsdn = tsdn_fetch(); + for (i = 1; i < (sizeof(uintptr_t) << 3); i++) { uintptr_t keys[] = {0, 1, (((uintptr_t)1) << (sizeof(uintptr_t)*8-i)) - 1}; extent_t extent; rtree_t rtree; - assert_false(rtree_new(&rtree, i, node_alloc, node_dalloc), + test_rtree = &rtree; + assert_false(rtree_new(&rtree, i), "Unexpected rtree_new() failure"); for (j = 0; j < sizeof(keys)/sizeof(uintptr_t); j++) { - assert_false(rtree_write(&rtree, keys[j], &extent), - "Unexpected rtree_write() failure"); + assert_false(rtree_write(tsdn, &rtree, keys[j], + &extent), "Unexpected rtree_write() failure"); for (k = 0; k < sizeof(keys)/sizeof(uintptr_t); k++) { - assert_ptr_eq(rtree_read(&rtree, keys[k], true), - &extent, "rtree_read() should return " - "previously set value and ignore " + assert_ptr_eq(rtree_read(tsdn, &rtree, keys[k], + true), &extent, "rtree_read() should " + "return previously set value and ignore " "insignificant key bits; i=%u, j=%u, k=%u, " "set key=%#"FMTxPTR", get key=%#"FMTxPTR, i, j, k, keys[j], keys[k]); } - assert_ptr_null(rtree_read(&rtree, + assert_ptr_null(rtree_read(tsdn, &rtree, (((uintptr_t)1) << (sizeof(uintptr_t)*8-i)), false), "Only leftmost rtree leaf should be set; " "i=%u, j=%u", i, j); - rtree_clear(&rtree, keys[j]); + rtree_clear(tsdn, &rtree, keys[j]); } - rtree_delete(&rtree); + rtree_delete(tsdn, &rtree); + test_rtree = NULL; } } TEST_END @@ -175,10 +211,12 @@ TEST_BEGIN(test_rtree_random) { unsigned i; sfmt_t *sfmt; + tsdn_t *tsdn; #define NSET 16 #define SEED 42 sfmt = init_gen_rand(SEED); + tsdn = tsdn_fetch(); for (i = 1; i <= (sizeof(uintptr_t) << 3); i++) { uintptr_t keys[NSET]; extent_t extent; @@ -186,37 +224,40 @@ TEST_BEGIN(test_rtree_random) rtree_t rtree; rtree_elm_t *elm; - assert_false(rtree_new(&rtree, i, node_alloc, node_dalloc), + test_rtree = &rtree; + assert_false(rtree_new(&rtree, i), "Unexpected rtree_new() failure"); for (j = 0; j < NSET; j++) { keys[j] = (uintptr_t)gen_rand64(sfmt); - elm = rtree_elm_acquire(&rtree, keys[j], false, true); + elm = rtree_elm_acquire(tsdn, &rtree, keys[j], false, + true); assert_ptr_not_null(elm, "Unexpected rtree_elm_acquire() failure"); rtree_elm_write_acquired(elm, &extent); rtree_elm_release(elm); - assert_ptr_eq(rtree_read(&rtree, keys[j], true), + assert_ptr_eq(rtree_read(tsdn, &rtree, keys[j], true), &extent, "rtree_read() should return previously set value"); } for (j = 0; j < NSET; j++) { - assert_ptr_eq(rtree_read(&rtree, keys[j], true), + assert_ptr_eq(rtree_read(tsdn, &rtree, keys[j], true), &extent, "rtree_read() should return previously " "set value, j=%u", j); } for (j = 0; j < NSET; j++) { - rtree_clear(&rtree, keys[j]); - assert_ptr_null(rtree_read(&rtree, keys[j], true), + rtree_clear(tsdn, &rtree, keys[j]); + assert_ptr_null(rtree_read(tsdn, &rtree, keys[j], true), "rtree_read() should return previously set value"); } for (j = 0; j < NSET; j++) { - assert_ptr_null(rtree_read(&rtree, keys[j], true), + assert_ptr_null(rtree_read(tsdn, &rtree, keys[j], true), "rtree_read() should return previously set value"); } - rtree_delete(&rtree); + rtree_delete(tsdn, &rtree); + test_rtree = NULL; } fini_gen_rand(sfmt); #undef NSET @@ -228,6 +269,12 @@ int main(void) { + rtree_node_alloc_orig = rtree_node_alloc; + rtree_node_alloc = rtree_node_alloc_intercept; + rtree_node_dalloc_orig = rtree_node_dalloc; + rtree_node_dalloc = rtree_node_dalloc_intercept; + test_rtree = NULL; + return (test( test_rtree_read_empty, test_rtree_concurrent,